metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joeranbosma/Hamlyn2021",
"score": 3
} |
#### File: hamlyn2021/data_reader/data_reader.py
```python
import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
from typing import Tuple
def read_input_image(location: str, normalise: bool = True) -> np.ndarray:
"""
Read input image, stored as png image at the specified location.
:param location: str, path to input image.
Returns:
- input image, shape: (height, width, channels)
"""
assert os.path.exists(location), f"Input image not found at {location}!"
img = cv2.imread(location)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if normalise:
# normalise image
img = img / 256.
return img
def read_depth_map(location: str, normalise=True) -> np.ndarray:
"""
Read input image, stored as png image at the specified location.
:param location: str, path to input image.
Returns:
- input image, shape: (height, width, channels)
"""
assert os.path.exists(location), f"Depth map not found at {location}!"
# IMREAD_ANYDEPTH is needed because even though the data is stored in 8-bit channels
# when it's read into memory it's represented at a higher bit depth
lbl = cv2.imread(location, flags=cv2.IMREAD_ANYDEPTH)
if normalise:
# normalise depth map
lbl = lbl / 300 # <- this scales the vast majority of pixels to [0, 1]
lbl = np.clip(lbl, a_min=0, a_max=1) # <- clip the remaining pixels to [0, 1]
return lbl
def read_input_sequence(location: str, shape=(256, 512, 3), normalise=True) -> np.ndarray:
"""
Read input images, stored as png images in the specified folder.
:param location: str, path to input images.
:param shape: tuple of ints, dimensions of each input image.
Returns:
- input data, shape: (num. timepoints, height, width, channels)
"""
files = sorted(os.listdir(location))
files = [fn for fn in files
if ".png" in fn and "._" not in fn]
# initialise sequence data
num = len(files)
data = np.zeros(shape=(num, *shape))
for i, fn in enumerate(files):
img_path = os.path.join(location, fn)
data[i] = read_input_image(img_path, normalise=normalise)
return data
def read_depth_sequence(location: str, shape=(256, 512), normalise=True) -> np.ndarray:
"""
Read depth maps, stored as exr (HDR) images in the specified folder.
:param location: str, path to input images.
:param shape: tuple of ints, dimensions of each depth map.
Returns:
- depth data, shape: (num. timepoints, height, width)
"""
files = sorted(os.listdir(location))
files = [fn for fn in files
if ".exr" in fn and "._" not in fn]
# initialise sequence data
num = len(files)
data = np.zeros(shape=(num, *shape))
for i, fn in enumerate(files):
hdr_path = os.path.join(location, fn)
data[i] = read_depth_map(hdr_path, normalise=normalise)
return data
def read_sequence(input_dir: str,
depth_dir: str,
input_shape=(256, 512, 3),
depth_shape=(256, 512)) -> Tuple[np.ndarray, np.ndarray]:
"""
Read input images and depth maps, stored as png/exr images in the specified folders.
:param input_dir: str, path to input images.
:param depth_dir: str, path to depth maps.
:param input_shape: tuple of ints, dimensions of each input image.
:param depth_shape: tuple of ints, dimensions of each depth map.
Returns:
- input data, shape: (num. timepoints, height, width, channels)
- depth data, shape: (num. timepoints, height, width)
"""
images = read_input_sequence(location=input_dir, shape=input_shape)
labels = read_depth_sequence(location=depth_dir, shape=depth_shape)
return images, labels
if __name__ == "__main__":
# show example sequence
data_dir = "/Users/joeranbosma/Hamlyn2021/data/"
input_dir = os.path.join(data_dir, "translation_sequences/sequences/scene_1/translation")
depth_dir = os.path.join(data_dir, "depth_sequences/sequences/scene_1/depth")
images, labels = read_sequence(
input_dir=input_dir,
depth_dir=depth_dir,
)
print(f"Shape of input images: {images.shape}, shape of depth maps: {labels.shape}")
for img, lbl in zip(images, labels):
f, axes = plt.subplots(1, 2, figsize=(18, 8))
ax = axes[0]
ax.imshow(img)
ax = axes[1]
ax.imshow(lbl)
plt.show()
break # prevent showing a popup for all timesteps
# show example random pair
input_path = os.path.join(data_dir, "input_random/3Dircadb1.1/inputs/img00000.png")
depth_path = os.path.join(data_dir, "depth_random/3Dircadb1.1/depths/depth00000.exr")
img = read_input_image(input_path)
lbl = read_depth_map(depth_path)
f, axes = plt.subplots(1, 2, figsize=(18, 8))
ax = axes[0]
ax.imshow(img)
ax = axes[1]
ax.imshow(lbl)
plt.show()
```
#### File: hamlyn2021/data_reader/pytorch_data_reader.py
```python
from abc import abstractmethod
import os
import numpy as np
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from typing import Tuple, List, Optional
import argparse
from tqdm import tqdm
from hamlyn2021.data_reader import read_input_image, read_depth_map
class CustomDatasetLoader(Dataset):
def __init__(self, input_dir: str, depth_dir: str,
input_files: List[str], depth_files: List[str],
input_shape=(256, 512, 3), depth_shape=(256, 512)):
self.input_dir = input_dir
self.depth_dir = depth_dir
self.input_files = input_files
self.depth_files = depth_files
self.input_shape = input_shape
self.depth_shape = depth_shape
def __len__(self) -> int:
return len(self.input_files)
@abstractmethod
def __getitem__(self, idx) -> Tuple[np.ndarray, np.ndarray]:
"""Generate a single pair of input images and target depth map"""
raise NotImplementedError()
class CustomDatasetLoaderRandom(CustomDatasetLoader):
def __getitem__(self, idx) -> Tuple[np.ndarray, np.ndarray]:
"""Generate a single pair of input image and target depth map"""
input_fn = self.input_files[idx]
depth_fn = self.depth_files[idx]
if "[random_style]" in input_fn:
# randomly choose one of the five styles
num = np.random.randint(0, 4+1)
input_fn = input_fn.replace("[random_style]", str(num))
input_path = os.path.join(self.input_dir, input_fn)
depth_path = os.path.join(self.depth_dir, depth_fn)
img = read_input_image(input_path)
lbl = read_depth_map(depth_path)
return img, lbl
def setup_dataloader(input_dir, depth_dir, folders, cases=None, batch_size=32, shuffle=True) -> DataLoader:
"""Setup DataLoader for specified folders and cases"""
# colect filenames for input images and output depth maps
if cases is None:
cases = [f"{i:04d}" for i in range(3000)]
input_files = [
f"{folder}/translation/translation{case}.png"
for case in cases
for folder in folders
]
depth_files = [
f"{folder}/depth/depth{case}.exr"
for case in cases
for folder in folders
]
# set up dataloader
data_generator = CustomDatasetLoaderRandom(
input_dir=input_dir,
depth_dir=depth_dir,
input_files=input_files,
depth_files=depth_files
)
dataloader = DataLoader(
data_generator,
batch_size=batch_size,
shuffle=shuffle,
num_workers=min(batch_size, 8)
)
return dataloader
def get_dataloaders(
input_dir: str,
depth_dir: str,
train_folders: Optional[List[str]] = None,
valid_folders: Optional[List[str]] = None,
train_cases: Optional[List[str]] = None,
valid_cases: Optional[List[str]] = None,
batch_size: int = 32
) -> DataLoader:
"""
Setup DataLoader for training and validation
Inputs:
:param input_dir: path to directory containing (tranformed) input images, e.g. /path/to/data/stylernd
:param depth_dir: path to directory containing depth maps, e.g. /path/to/data/depth_random
:param train_folders: list of folders to include for training, default: ["3Dircadb1.1", "3Dircadb1.2",
"3Dircadb1.8", "3Dircadb1.9", "3Dircadb1.10", "3Dircadb1.11"]
:param train_folders: list of folders to include for validation, default: ["3Dircadb1.17", "3Dircadb1.18",
"3Dircadb1.19", "3Dircadb1.20"]
:param train_cases: List of case names to include for training, default: ['00000', ..., '01999']
:param train_cases: List of case names to include for validation, default: ['00000', ..., '01999']
:param batch_size: number of samples per batch, default: 32
Returns:
- PyTorch dataloader with training samples
- PyTorch dataloader with validation samples
"""
# colect filenames for input images and output depth maps
if train_folders is None:
train_folders = [
f"scene_{i}"
for i in (1, 2, 3, 4)
]
if valid_folders is None:
valid_folders = [
f"scene_{i}"
for i in (5, 6)
]
train_dataloader = setup_dataloader(
input_dir=input_dir,
depth_dir=depth_dir,
folders=train_folders,
cases=train_cases,
batch_size=batch_size,
shuffle=True
)
valid_dataloader = setup_dataloader(
input_dir=input_dir,
depth_dir=depth_dir,
folders=valid_folders,
cases=valid_cases,
batch_size=batch_size,
shuffle=False
)
return train_dataloader, valid_dataloader
def test_get_dataloaders():
# parse command line arguments
parser = argparse.ArgumentParser(description='Command line options')
parser.add_argument('--data_dir', type=str, required=True)
args = parser.parse_args()
# example setup of PyTorch dataloader for random data
input_dir = os.path.join(args.data_dir, "translation_random_views/random_views")
depth_dir = os.path.join(args.data_dir, "depth_random_views/random_views")
train_dataloader, valid_dataloader = get_dataloaders(
input_dir=input_dir,
depth_dir=depth_dir,
)
for images, labels in valid_dataloader:
# visualise first sample of the batch
img, lbl = images[0].numpy(), labels[0].numpy()
f, axes = plt.subplots(1, 2, figsize=(18, 8))
ax = axes[0]
ax.imshow(img)
ax = axes[1]
ax.imshow(lbl)
plt.show()
break
for images, labels in tqdm(train_dataloader):
pass
for images, labels in tqdm(valid_dataloader):
pass
if __name__ == "__main__":
test_get_dataloaders()
``` |
{
"source": "joeranbosma/ModelsGenesis",
"score": 2
} |
#### File: ModelsGenesis/competition/config.py
```python
import os
import shutil
class models_genesis_config:
DATA_DIR = "/mnt/dataset/shared/zongwei/LUNA16/Self_Learning_Cubes"
nb_epoch = 1000
patience = 20
lr = 1e-1
train_fold=[0,1,2,3,4]
valid_fold=[5,6]
test_fold=[7,8,9]
hu_max = 1000.0
hu_min = -1000.0
def __init__(self,
note="",
data_augmentation=True,
input_rows=64,
input_cols=64,
input_deps=32,
batch_size=64,
weights=None,
nb_class=2,
nonlinear_rate=0.9,
paint_rate=0.9,
outpaint_rate=0.8,
rotation_rate=0.0,
flip_rate=0.4,
local_rate=0.5,
verbose=1,
scale=64,
):
self.exp_name = "genesis_nnunet_luna16_006"
self.data_augmentation = data_augmentation
self.input_rows, self.input_cols = input_rows, input_cols
self.input_deps = input_deps
self.batch_size = batch_size
self.verbose = verbose
self.nonlinear_rate = nonlinear_rate
self.paint_rate = paint_rate
self.outpaint_rate = outpaint_rate
self.inpaint_rate = 1.0 - self.outpaint_rate
self.rotation_rate = rotation_rate
self.flip_rate = flip_rate
self.local_rate = local_rate
self.nb_class = nb_class
self.scale = scale
self.weights = weights
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
```
#### File: downstream_tasks/BraTS/DataSet.py
```python
import os
import pandas as pd
import numpy as np
import nibabel as nib
from tqdm import tqdm
from BraTS.Patient import *
from BraTS.structure import *
from BraTS.modalities import *
from BraTS.load_utils import *
survival_df_cache = {} # Prevents loading CSVs more than once
class DataSubSet:
def __init__(self, directory_map, survival_csv, data_set_type=None):
self.directory_map = directory_map
self._patient_ids = sorted(list(directory_map.keys()))
self._survival_csv = survival_csv
self._num_patients = len(self._patient_ids)
self.type = data_set_type
# Data caches
self._mris = None
self._segs = None
self._patients = {}
self._survival_df_cached = None
self._patients_fully_loaded = False
self._id_indexer = {patient_id: i for i, patient_id in enumerate(self._patient_ids)}
def subset(self, patient_ids):
"""
Split this data subset into a small subset by patient ID
:param n: The number of elements in the smaller training set
:return: A new data subset with only the specified number of items
"""
dir_map = {id: self.directory_map[id] for id in patient_ids}
return DataSubSet(dir_map, self._survival_csv)
@property
def ids(self):
"""
List of all patient IDs in this dataset
Will copy the ids... so modify them all you want
:return: Copy of the patient IDs
"""
return list(self._patient_ids)
@property
def mris(self):
if self._mris is not None:
return self._mris
self._load_images()
return self._mris
@property
def segs(self):
if self._segs is None:
self._load_images()
return self._segs
def _load_images(self):
mris_shape = (self._num_patients,) + mri_shape
segs_shape = (self._num_patients,) + image_shape
self._mris = np.empty(shape=mris_shape)
self._segs = np.empty(shape=segs_shape)
if self._patients_fully_loaded:
# All the patients were already loaded
for i, patient in enumerate(tqdm(self._patients.values())):
self._mris[i] = patient.mri_data
self._segs[i] = patient.seg
else:
# Load it from scratch
for i, patient_id in enumerate(self._patient_ids):
patient_dir = self.directory_map[patient_id]
load_patient_data_inplace(patient_dir, self._mris, self._segs, i)
@property
def patients(self):
"""
Loads ALL of the patients from disk into patient objects
:return: A dictionary containing ALL patients
"""
for patient_id in self.ids:
yield self.patient(patient_id)
self._patients_fully_loaded = True
def patient(self, patient_id):
"""
Loads only a single patient from disk
:param patient_id: The patient ID
:return: A Patient object loaded from disk
"""
if patient_id not in self._patient_ids:
raise ValueError("Patient id \"%s\" not present." % patient_id)
# Return cached value if present
if patient_id in self._patients:
return self._patients[patient_id]
# Load patient data into memory
patient = Patient(patient_id)
patient_dir = self.directory_map[patient_id]
df = self._survival_df
if patient_id in df.id.values:
patient.age = float(df.loc[df.id == patient_id].age)
patient.survival = int(df.loc[df.id == patient_id].survival)
if self._mris is not None and self._segs is not None:
# Load from _mris and _segs if possible
index = self._id_indexer[patient_id]
patient.mri = self._mris[index]
patient.seg = self._segs[index]
else:
# Load the mri and segmentation data from disk
patient.mri, patient.seg = load_patient_data(patient_dir)
self._patients[patient_id] = patient # cache the value for later
return patient
def drop_cache(self):
self._patients.clear()
self._mris = None
self._segs = None
@property
def _survival_df(self):
if self._survival_csv in survival_df_cache:
return survival_df_cache[self._survival_csv]
df = load_survival(self._survival_csv)
survival_df_cache[self._survival_csv] = df
return df
class DataSet(object):
def __init__(self, data_set_dir=None, brats_root=None, year=None):
if data_set_dir is not None:
# The data-set directory was specified explicitly
assert isinstance(data_set_dir, str)
self._data_set_dir = data_set_dir
elif brats_root is not None and isinstance(year, int):
# Find the directory by specifying the year
assert isinstance(brats_root, str)
year_dir = find_file_containing(brats_root, str(year % 100))
self._data_set_dir = os.path.join(brats_root, year_dir)
self._brats_root = brats_root
self._year = year
else:
# BraTS data-set location was not improperly specified
raise Exception("Specify BraTS location with \"data_set_dir\" or with \"brats_root\" and \"year\"")
self._validation = None
self._train = None
self._hgg = None
self._lgg = None
self._dir_map_cache = None
self._val_dir = None
self._train_dir_cached = None
self._hgg_dir = os.path.join(self._train_dir, "HGG")
self._lgg_dir = os.path.join(self._train_dir, "LGG")
self._train_survival_csv_cached = None
self._validation_survival_csv_cached = None
self._train_ids = None
self._hgg_ids_cached = None
self._lgg_ids_cached = None
self._train_dir_map_cache = None
self._validation_dir_map_cache = None
self._hgg_dir_map_cache = None
self._lgg_dir_map_cache = None
def set(self, data_set_type):
"""
Get a data subset by type
:param data_set_type: The DataSubsetType to get
:return: The data sub-set of interest
"""
assert isinstance(data_set_type, DataSubsetType)
if data_set_type == DataSubsetType.train:
return self.train
if data_set_type == DataSubsetType.hgg:
return self.hgg
if data_set_type == DataSubsetType.lgg:
return self.lgg
if data_set_type == DataSubsetType.validation:
return self.validation
@property
def train(self):
"""
Training data
Loads the training data from disk, utilizing caching
:return: A tf.data.Dataset object containing the training data
"""
if self._train is None:
try:
self._train = DataSubSet(self._train_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.train)
except FileNotFoundError:
return None
return self._train
@property
def validation(self):
"""
Validation data
:return: Validation data
"""
if self._validation is None:
try:
self._validation = DataSubSet(self._validation_dir_map,
self._validation_survival_csv,
data_set_type=DataSubsetType.validation)
except FileNotFoundError:
return None
return self._validation
@property
def hgg(self):
if self._hgg is None:
try:
self._hgg = DataSubSet(self._hgg_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.hgg)
except FileNotFoundError:
return None
return self._hgg
@property
def lgg(self):
if self._lgg is None:
try:
self._lgg = DataSubSet(self._lgg_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.lgg)
except FileNotFoundError:
return None
return self._lgg
def drop_cache(self):
"""
Drops the cached values in the object
:return: None
"""
self._validation = None
self._train = None
self._hgg = None
self._lgg = None
self._dir_map_cache = None
self._val_dir = None
self._train_dir_cached = None
self._train_survival_csv_cached = None
self._validation_survival_csv_cached = None
self._train_ids = None
self._hgg_ids_cached = None
self._lgg_ids_cached = None
self._train_dir_map_cache = None
self._validation_dir_map_cache = None
self._hgg_dir_map_cache = None
self._lgg_dir_map_cache = None
@property
def _train_survival_csv(self):
if self._train_survival_csv_cached is None:
self._train_survival_csv_cached = find_file_containing(self._train_dir, "survival")
if self._train_survival_csv_cached is None:
raise FileNotFoundError("Could not find survival CSV in %s" % self._train_dir)
return self._train_survival_csv_cached
@property
def _validation_survival_csv(self):
if self._validation_survival_csv_cached is None:
self._validation_survival_csv_cached = find_file_containing(self._validation_dir, "survival")
if self._validation_survival_csv_cached is None:
raise FileNotFoundError("Could not find survival CSV in %s" % self._validation_dir)
return self._validation_survival_csv_cached
@property
def _train_dir(self):
if self._train_dir_cached is not None:
return self._train_dir_cached
self._train_dir_cached = find_file_containing(self._data_set_dir, "training")
if self._train_dir_cached is None:
raise FileNotFoundError("Could not find training directory in %s" % self._data_set_dir)
return self._train_dir_cached
@property
def _validation_dir(self):
if self._val_dir is not None:
return self._val_dir
self._val_dir = find_file_containing(self._data_set_dir, "validation")
if self._val_dir is None:
raise FileNotFoundError("Could not find validation directory in %s" % self._data_set_dir)
return self._val_dir
@property
def _train_dir_map(self):
if self._train_dir_map_cache is None:
self._train_dir_map_cache = dict(self._hgg_dir_map)
self._train_dir_map_cache.update(self._lgg_dir_map)
return self._train_dir_map_cache
@property
def _validation_dir_map(self):
if self._validation_dir_map_cache is None:
self._validation_dir_map_cache = self._directory_map(self._validation_dir)
return self._validation_dir_map_cache
@property
def _hgg_dir_map(self):
if self._hgg_dir_map_cache is None:
self._hgg_dir_map_cache = self._directory_map(self._hgg_dir)
return self._hgg_dir_map_cache
@property
def _lgg_dir_map(self):
if self._lgg_dir_map_cache is None:
self._lgg_dir_map_cache = self._directory_map(self._lgg_dir)
return self._lgg_dir_map_cache
@property
def _hgg_ids(self):
if self._hgg_ids_cached is None:
self._hgg_ids_cached = os.listdir(self._hgg_dir)
return self._hgg_ids_cached
@property
def _lgg_ids(self):
if self._lgg_ids_cached is None:
self._lgg_ids_cached = os.listdir(self._lgg_dir)
return self._lgg_ids_cached
@classmethod
def _directory_map(cls, dir):
return {file: os.path.join(dir, file)
for file in os.listdir(dir)
if os.path.isdir(os.path.join(dir, file))}
```
#### File: downstream_tasks/BraTS/structure.py
```python
import os
from enum import Enum
from BraTS.load_utils import find_file_containing
class DataSubsetType(Enum):
hgg = 0
lgg = 1
train = 2
validation = 3
def get_brats_subset_directory(brats_dataset_dir, data_set_type):
if data_set_type == DataSubsetType.train:
# Training data
try:
found_train = find_file_containing(brats_dataset_dir, "train", case_sensitive=False)
except FileNotFoundError:
found_train = None
if found_train is not None:
return found_train
return os.path.join(brats_dataset_dir, "training")
if data_set_type == DataSubsetType.hgg:
train_dir = get_brats_subset_directory(brats_dataset_dir, DataSubsetType.train)
return os.path.join(train_dir, "HGG")
if data_set_type == DataSubsetType.lgg:
train_dir = get_brats_subset_directory(brats_dataset_dir, DataSubsetType.train)
return os.path.join(train_dir, "LGG")
if data_set_type == DataSubsetType.validation:
# Validation
try:
found_validation = find_file_containing(brats_dataset_dir, "validation", case_sensitive=False)
except FileNotFoundError:
found_validation = None
if found_validation is not None:
return found_validation
return os.path.join(brats_dataset_dir, "validation")
``` |
{
"source": "joeranbosma/nnDetection",
"score": 2
} |
#### File: nndet/arch/conv.py
```python
import torch
import torch.nn as nn
from typing import Union, Callable, Any, Optional, Tuple, Sequence, Type
from nndet.arch.initializer import InitWeights_He
from nndet.arch.layers.norm import GroupNorm
NdParam = Union[int, Tuple[int, int], Tuple[int, int, int]]
class Generator:
def __init__(self, conv_cls, dim: int):
"""
Factory helper which saves the conv class and dimension to generate objects
Args:
conv_cls (callable): class of convolution
dim (int): number of spatial dimensions (in general 2 or 3)
"""
self.dim = dim
self.conv_cls = conv_cls
def __call__(self, *args, **kwargs) -> Any:
"""
Create object
Args:
*args: passed to object
**kwargs: passed to object
Returns:
Any
"""
return self.conv_cls(self.dim, *args, **kwargs)
class BaseConvNormAct(torch.nn.Sequential):
def __init__(self,
dim: int,
in_channels: int,
out_channels: int,
norm: Optional[Union[Callable[..., Type[nn.Module]], str]],
act: Optional[Union[Callable[..., Type[nn.Module]], str]],
kernel_size: Union[int, tuple],
stride: Union[int, tuple] = 1,
padding: Union[int, tuple] = 0,
dilation: Union[int, tuple] = 1,
groups: int = 1,
bias: bool = None,
transposed: bool = False,
norm_kwargs: Optional[dict] = None,
act_inplace: Optional[bool] = None,
act_kwargs: Optional[dict] = None,
initializer: Callable[[nn.Module], None] = None,
):
"""
Baseclass for default ordering:
conv -> norm -> activation
Args
dim: number of dimensions the convolution should be chosen for
in_channels: input channels
out_channels: output_channels
norm: type of normalization. If None, no normalization will be applied
kernel_size: size of convolution kernel
act: class of non linearity; if None no actication is used.
stride: convolution stride
padding: padding value
(if input or output padding depends on whether the convolution
is transposed or not)
dilation: convolution dilation
groups: number of convolution groups
bias: whether to include bias or not
If None, the bias will be determined dynamicaly: False
if a normalization follows otherwise True
transposed: whether the convolution should be transposed or not
norm_kwargs: keyword arguments for normalization layer
act_inplace: whether to perform activation inplce or not
If None, inplace will be determined dynamicaly: True
if a normalization follows otherwise False
act_kwargs: keyword arguments for non linearity layer.
initializer: initilize weights
"""
super().__init__()
# process optional arguments
norm_kwargs = {} if norm_kwargs is None else norm_kwargs
act_kwargs = {} if act_kwargs is None else act_kwargs
if "inplace" in act_kwargs:
raise ValueError("Use keyword argument to en-/disable inplace activations")
if act_inplace is None:
act_inplace = bool(norm is not None)
act_kwargs["inplace"] = act_inplace
# process dynamic values
bias = bool(norm is None) if bias is None else bias
conv = nd_conv(dim=dim,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
transposed=transposed
)
self.add_module("conv", conv)
if norm is not None:
if isinstance(norm, str):
_norm = nd_norm(norm, dim, out_channels, **norm_kwargs)
else:
_norm = norm(dim, out_channels, **norm_kwargs)
self.add_module("norm", _norm)
if act is not None:
if isinstance(act, str):
_act = nd_act(act, dim, **act_kwargs)
else:
_act = act(**act_kwargs)
self.add_module("act", _act)
if initializer is not None:
self.apply(initializer)
class ConvInstanceRelu(BaseConvNormAct):
def __init__(self,
dim: int,
in_channels: int,
out_channels: int,
kernel_size: Union[int, tuple],
stride: Union[int, tuple] = 1,
padding: Union[int, tuple] = 0,
dilation: Union[int, tuple] = 1,
groups: int = 1,
bias: bool = None,
transposed: bool = False,
add_norm: bool = True,
add_act: bool = True,
act_inplace: Optional[bool] = None,
norm_eps: float = 1e-5,
norm_affine: bool = True,
initializer: Callable[[nn.Module], None] = None,
):
"""
Baseclass for default ordering:
conv -> norm -> activation
Args
dim: number of dimensions the convolution should be chosen for
in_channels: input channels
out_channels: output_channels
norm: type of normalization. If None, no normalization will be applied
kernel_size: size of convolution kernel
act: class of non linearity; if None no actication is used.
stride: convolution stride
padding: padding value
(if input or output padding depends on whether the convolution
is transposed or not)
dilation: convolution dilation
groups: number of convolution groups
bias: whether to include bias or not
If None the bias will be determined dynamicaly: False
if a normalization follows otherwise True
transposed: whether the convolution should be transposed or not
add_norm: add normalisation layer to conv block
add_act: add activation layer to conv block
act_inplace: whether to perform activation inplce or not
If None, inplace will be determined dynamicaly: True
if a normalization follows otherwise False
norm_eps: instance norm eps (see pytorch for more info)
norm_affine: instance affine parameter (see pytorch for more info)
initializer: initilize weights
"""
norm = "Instance" if add_norm else None
act = "ReLU" if add_act else None
super().__init__(
dim=dim,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
transposed=transposed,
norm=norm,
act=act,
norm_kwargs={
"eps": norm_eps,
"affine": norm_affine,
},
act_inplace=act_inplace,
initializer=initializer,
)
class ConvGroupRelu(BaseConvNormAct):
def __init__(self,
dim: int,
in_channels: int,
out_channels: int,
kernel_size: Union[int, tuple],
stride: Union[int, tuple] = 1,
padding: Union[int, tuple] = 0,
dilation: Union[int, tuple] = 1,
groups: int = 1,
bias: bool = None,
transposed: bool = False,
add_norm: bool = True,
add_act: bool = True,
act_inplace: Optional[bool] = None,
norm_eps: float = 1e-5,
norm_affine: bool = True,
norm_channels_per_group: int = 16,
initializer: Callable[[nn.Module], None] = None,
):
"""
Baseclass for default ordering:
conv -> norm -> activation
Args
dim: number of dimensions the convolution should be chosen for
in_channels: input channels
out_channels: output_channels
norm: type of normalization. If None, no normalization will be applied
kernel_size: size of convolution kernel
act: class of non linearity; if None no actication is used.
stride: convolution stride
padding: padding value
(if input or output padding depends on whether the convolution
is transposed or not)
dilation: convolution dilation
groups: number of convolution groups
bias: whether to include bias or not
If None the bias will be determined dynamicaly: False
if a normalization follows otherwise True
transposed: whether the convolution should be transposed or not
add_norm: add normalisation layer to conv block
add_act: add activation layer to conv block
act_inplace: whether to perform activation inplce or not
If None, inplace will be determined dynamicaly: True
if a normalization follows otherwise False
norm_eps: instance norm eps (see pytorch for more info)
norm_affine: instance affine parameter (see pytorch for more info)
norm_channels_per_group: channels per group for group norm
initializer: initilize weights
"""
norm = "Group" if add_norm else None
act = "ReLU" if add_act else None
super().__init__(
dim=dim,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
transposed=transposed,
norm=norm,
act=act,
norm_kwargs={
"eps": norm_eps,
"affine": norm_affine,
"channels_per_group": norm_channels_per_group,
},
act_inplace=act_inplace,
initializer=initializer,
)
def nd_conv(dim: int,
in_channels: int,
out_channels: int,
kernel_size: Union[int, tuple],
stride: Union[int, tuple] = 1,
padding: Union[int, tuple] = 0,
dilation: Union[int, tuple] = 1,
groups: int = 1,
bias: bool = True,
transposed: bool = False,
**kwargs,
) -> torch.nn.Module:
"""
Convolution Wrapper to Switch accross dimensions and transposed by a
single argument
Args
n_dim (int): number of dimensions the convolution should be chosen for
in_channels (int): input channels
out_channels (int): output_channels
kernel_size (int or Iterable): size of convolution kernel
stride (int or Iterable): convolution stride
padding (int or Iterable): padding value
(if input or output padding depends on whether the convolution
is transposed or not)
dilation (int or Iterable): convolution dilation
groups (int): number of convolution groups
bias (bool): whether to include bias or not
transposed (bool): whether the convolution should be transposed or not
Returns:
torch.nn.Module: generated module
See Also
Torch Convolutions:
* :class:`torch.nn.Conv1d`
* :class:`torch.nn.Conv2d`
* :class:`torch.nn.Conv3d`
* :class:`torch.nn.ConvTranspose1d`
* :class:`torch.nn.ConvTranspose2d`
* :class:`torch.nn.ConvTranspose3d`
"""
if transposed:
transposed_str = "Transpose"
else:
transposed_str = ""
conv_cls = getattr(torch.nn, f"Conv{transposed_str}{dim}d")
return conv_cls(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias, **kwargs)
def nd_pool(pooling_type: str, dim: int, *args, **kwargs) -> torch.nn.Module:
"""
Wrapper to switch between different pooling types and convolutions by a single argument
Args
pooling_type (str): Type of Pooling, case sensitive.
Supported values are
* ``Max``
* ``Avg``
* ``AdaptiveAvg``
* ``AdaptiveMax``
n_dim (int): number of dimensions
*args : positional arguments of the chosen pooling class
**kwargs : keyword arguments of the chosen pooling class
Returns:
torch.nn.Module: generated module
See Also
Torch Pooling Classes:
* :class:`torch.nn.MaxPool1d`
* :class:`torch.nn.MaxPool2d`
* :class:`torch.nn.MaxPool3d`
* :class:`torch.nn.AvgPool1d`
* :class:`torch.nn.AvgPool2d`
* :class:`torch.nn.AvgPool3d`
* :class:`torch.nn.AdaptiveMaxPool1d`
* :class:`torch.nn.AdaptiveMaxPool2d`
* :class:`torch.nn.AdaptiveMaxPool3d`
* :class:`torch.nn.AdaptiveAvgPool1d`
* :class:`torch.nn.AdaptiveAvgPool2d`
* :class:`torch.nn.AdaptiveAvgPool3d`
"""
pool_cls = getattr(torch.nn, f"{pooling_type}Pool{dim}d")
return pool_cls(*args, **kwargs)
def nd_norm(norm_type: str, dim: int, *args, **kwargs) -> torch.nn.Module:
"""
Wrapper to switch between different types of normalization and
dimensions by a single argument
Args
norm_type (str): type of normalization, case sensitive.
Supported types are:
* ``Batch``
* ``Instance``
* ``LocalResponse``
* ``Group``
* ``Layer``
n_dim (int, None): dimension of normalization input; can be None if normalization
is dimension-agnostic (e.g. LayerNorm)
*args : positional arguments of chosen normalization class
**kwargs : keyword arguments of chosen normalization class
Returns
torch.nn.Module: generated module
See Also
Torch Normalizations:
* :class:`torch.nn.BatchNorm1d`
* :class:`torch.nn.BatchNorm2d`
* :class:`torch.nn.BatchNorm3d`
* :class:`torch.nn.InstanceNorm1d`
* :class:`torch.nn.InstanceNorm2d`
* :class:`torch.nn.InstanceNorm3d`
* :class:`torch.nn.LocalResponseNorm`
* :class:`nndet.arch.layers.norm.GroupNorm`
"""
if dim is None:
dim_str = ""
else:
dim_str = str(dim)
if norm_type.lower() == "group":
norm_cls = GroupNorm
else:
norm_cls = getattr(torch.nn, f"{norm_type}Norm{dim_str}d")
return norm_cls(*args, **kwargs)
def nd_act(act_type: str, dim: int, *args, **kwargs) -> torch.nn.Module:
"""
Helper to search for activations by string
The dim parameter is ignored.
Searches in torch.nn for activatio.
Args:
act_type: name of activation layer to look up.
dim: ignored
Returns:
torch.nn.Module: activation module
"""
act_cls = getattr(torch.nn, f"{act_type}")
return act_cls(*args, **kwargs)
def nd_dropout(dim: int, p: float = 0.5, inplace: bool = False, **kwargs) -> torch.nn.Module:
"""
Generate 1,2,3 dimensional dropout
Args:
dim (int): number of dimensions
p (float): doupout probability
inplace (bool): apply operation inplace
**kwargs: passed to dropout
Returns:
torch.nn.Module: generated module
"""
dropout_cls = getattr(torch.nn, "Dropout%dd" % dim)
return dropout_cls(p=p, inplace=inplace, **kwargs)
def compute_padding_for_kernel(kernel_size: Union[int, Sequence[int]]) -> \
Union[int, Tuple[int, int], Tuple[int, int, int]]:
"""
Compute padding such that feature maps keep their size with stride 1
Args:
kernel_size: kernel size to compute padding for
Returns:
Union[int, Tuple[int, int], Tuple[int, int, int]]: computed padding
"""
if isinstance(kernel_size, Sequence):
padding = tuple([(i - 1) // 2 for i in kernel_size])
else:
padding = (kernel_size - 1) // 2
return padding
def conv_kwargs_helper(norm: bool, activation: bool):
"""
Helper to force disable normalization and activation in layers
which have those by default
Args:
norm: en-/disable normalization layer
activation: en-/disable activation layer
Returns:
dict: keyword arguments to pass to conv generator
"""
kwargs = {
"add_norm": norm,
"add_act": activation,
}
return kwargs
```
#### File: arch/heads/regressor.py
```python
import torch
import torch.nn as nn
from typing import Optional, Tuple, Callable, TypeVar
from abc import abstractmethod
from loguru import logger
from nndet.core.boxes import box_iou
from nndet.arch.layers.scale import Scale
from torch import Tensor
from nndet.losses import SmoothL1Loss, GIoULoss
CONV_TYPES = (nn.Conv2d, nn.Conv3d)
class Regressor(nn.Module):
@abstractmethod
def compute_loss(self, pred_deltas: Tensor, target_deltas: Tensor, **kwargs) -> Tensor:
"""
Compute regression loss (l1 loss)
Args:
pred_deltas (Tensor): predicted bounding box deltas [N, dim * 2]
target_deltas (Tensor): target bounding box deltas [N, dim * 2]
Returns:
Tensor: loss
"""
raise NotImplementedError
class BaseRegressor(Regressor):
def __init__(self,
conv,
in_channels: int,
internal_channels: int,
anchors_per_pos: int,
num_levels: int,
num_convs: int = 3,
add_norm: bool = True,
learn_scale: bool = False,
**kwargs,
):
"""
Base class to build regressor heads with typical conv structure
conv(in, internal) -> num_convs x conv(internal, internal) ->
conv(internal, out)
Args:
conv: Convolution modules which handles a single layer
in_channels: number of input channels
internal_channels: number of channels internally used
anchors_per_pos: number of anchors per position
num_levels: number of decoder levels which are passed through the
regressor
num_convs: number of convolutions
in conv -> num convs -> final conv
add_norm: en-/disable normalization layers in internal layers
learn_scale: learn additional single scalar values per feature
pyramid level
kwargs: keyword arguments passed to first and internal convolutions
"""
super().__init__()
self.dim = conv.dim
self.num_levels = num_levels
self.num_convs = num_convs
self.learn_scale = learn_scale
self.anchors_per_pos = anchors_per_pos
self.in_channels = in_channels
self.internal_channels = internal_channels
self.conv_internal = self.build_conv_internal(conv, add_norm=add_norm, **kwargs)
self.conv_out = self.build_conv_out(conv)
if self.learn_scale:
self.scales = self.build_scales()
self.loss: Optional[nn.Module] = None
self.init_weights()
def build_conv_internal(self, conv, **kwargs):
"""
Build internal convolutions
"""
_conv_internal = nn.Sequential()
_conv_internal.add_module(
name="c_in",
module=conv(
self.in_channels,
self.internal_channels,
kernel_size=3,
stride=1,
padding=1,
**kwargs,
))
for i in range(self.num_convs):
_conv_internal.add_module(
name=f"c_internal{i}",
module=conv(
self.internal_channels,
self.internal_channels,
kernel_size=3,
stride=1,
padding=1,
**kwargs,
))
return _conv_internal
def build_conv_out(self, conv):
"""
Build final convolutions
"""
out_channels = self.anchors_per_pos * self.dim * 2
return conv(
self.internal_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
add_norm=False,
add_act=False,
bias=True,
)
def build_scales(self) -> nn.ModuleList:
"""
Build additionales scalar values per level
"""
logger.info("Learning level specific scalar in regressor")
return nn.ModuleList([Scale() for _ in range(self.num_levels)])
def forward(self, x: torch.Tensor, level: int, **kwargs) -> torch.Tensor:
"""
Forward input
Args:
x: input feature map of size [N x C x Y x X x Z]
Returns:
torch.Tensor: classification logits for each anchor
[N, n_anchors, dim*2]
"""
bb_logits = self.conv_out(self.conv_internal(x))
if self.learn_scale:
bb_logits = self.scales[level](bb_logits)
axes = (0, 2, 3, 1) if self.dim == 2 else (0, 2, 3, 4, 1)
bb_logits = bb_logits.permute(*axes)
bb_logits = bb_logits.contiguous()
bb_logits = bb_logits.view(x.size()[0], -1, self.dim * 2)
return bb_logits
def compute_loss(self,
pred_deltas: Tensor,
target_deltas: Tensor,
**kwargs,
) -> Tensor:
"""
Compute regression loss (l1 loss)
Args:
pred_deltas: predicted bounding box deltas [N, dim * 2]
target_deltas: target bounding box deltas [N, dim * 2]
Returns:
Tensor: loss
"""
return self.loss(pred_deltas, target_deltas, **kwargs)
def init_weights(self) -> None:
"""
Init weights with normal distribution (mean=0, std=0.01)
"""
logger.info("Overwriting regressor conv weight init")
for layer in self.modules():
if isinstance(layer, CONV_TYPES):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
if layer.bias is not None:
torch.nn.init.constant_(layer.bias, 0)
class L1Regressor(BaseRegressor):
def __init__(self,
conv,
in_channels: int,
internal_channels: int,
anchors_per_pos: int,
num_levels: int,
num_convs: int = 3,
add_norm: bool = True,
beta: float = 1.,
reduction: Optional[str] = "sum",
loss_weight: float = 1.,
learn_scale: bool = False,
**kwargs,
):
"""
Build regressor heads with typical conv structure and smooth L1 loss
conv(in, internal) -> num_convs x conv(internal, internal) ->
conv(internal, out)
Args:
conv: Convolution modules which handles a single layer
in_channels: number of input channels
internal_channels: number of channels internally used
anchors_per_pos: number of anchors per position
num_levels: number of decoder levels which are passed through the
regressor
num_convs: number of convolutions
in conv -> num convs -> final conv
add_norm: en-/disable normalization layers in internal layers
beta: L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
reduction: reduction to apply to loss. 'sum' | 'mean' | 'none'
loss_weight: scalar to balance multiple losses
learn_scale: learn additional single scalar values per feature
pyramid level
kwargs: keyword arguments passed to first and internal convolutions
"""
super().__init__(
conv=conv,
in_channels=in_channels,
internal_channels=internal_channels,
anchors_per_pos=anchors_per_pos,
num_levels=num_levels,
num_convs=num_convs,
add_norm=add_norm,
learn_scale=learn_scale,
**kwargs
)
self.loss = SmoothL1Loss(
beta=beta,
reduction=reduction,
loss_weight=loss_weight,
)
class GIoURegressor(BaseRegressor):
def __init__(self,
conv,
in_channels: int,
internal_channels: int,
anchors_per_pos: int,
num_levels: int,
num_convs: int = 3,
add_norm: bool = True,
reduction: Optional[str] = "sum",
loss_weight: float = 1.,
learn_scale: bool = False,
**kwargs,
):
"""
Build regressor heads with typical conv structure and generalized
IoU loss
conv(in, internal) -> num_convs x conv(internal, internal) ->
conv(internal, out)
Args:
conv: Convolution modules which handles a single layer
in_channels: number of input channels
internal_channels: number of channels internally used
anchors_per_pos: number of anchors per position
num_levels: number of decoder levels which are passed through the
regressor
num_convs: number of convolutions
in conv -> num convs -> final conv
add_norm: en-/disable normalization layers in internal layers
reduction: reduction to apply to loss. 'sum' | 'mean' | 'none'
loss_weight: scalar to balance multiple losses
learn_scale: learn additional single scalar values per feature
pyramid level
kwargs: keyword arguments passed to first and internal convolutions
"""
super().__init__(
conv=conv,
in_channels=in_channels,
internal_channels=internal_channels,
anchors_per_pos=anchors_per_pos,
num_levels=num_levels,
num_convs=num_convs,
add_norm=add_norm,
learn_scale=learn_scale,
**kwargs
)
self.loss = GIoULoss(
reduction=reduction,
loss_weight=loss_weight,
)
RegressorType = TypeVar('RegressorType', bound=Regressor)
```
#### File: arch/layers/interpolation.py
```python
import torch
import torch.nn.functional as F
from typing import Union, Tuple, List
from torch import Tensor
__all__ = ["InterpolateToShapes", "InterpolateToShape", "Interpolate"]
class InterpolateToShapes(torch.nn.Module):
def __init__(self, mode: str = "nearest", align_corners: bool = None):
"""
Downsample target tensor to size of prediction feature maps
Args:
mode: algorithm used for upsampling: nearest, linear, bilinear,
bicubic, trilinear, area. Defaults to "nearest".
align_corners: Align corners points for interpolation. (see pytorch
for more info) Defaults to None.
See Also:
:func:`torch.nn.functional.interpolate`
Warnings:
Use nearest for segmentation, everything else will result in
wrong values.
"""
super().__init__()
self.mode = mode
self.align_corners = align_corners
def forward(self, preds: List[Tensor], target: Tensor) -> List[Tensor]:
"""
Interpolate target to match shape with predictions
Args:
preds: predictions to extract shape of
target: target to interpolate
Returns:
List[Tensor]: interpolated targets
"""
shapes = [tuple(pred.shape)[2:] for pred in preds]
squeeze_result = False
if target.ndim == preds[0].ndim - 1:
target = target.unsqueeze(dim=1)
squeeze_result = True
new_targets = [F.interpolate(
target, size=shape, mode=self.mode, align_corners=self.align_corners)
for shape in shapes]
if squeeze_result:
new_targets = [nt.squeeze(dim=1) for nt in new_targets]
return new_targets
class MaxPoolToShapes(torch.nn.Module):
def forward(self, preds: List[Tensor], target: Tensor) -> List[Tensor]:
"""
Pool target to match shape with predictions
Args:
preds: predictions to extract shape of
target: target to pool
Returns:
List[Tensor]: pooled targets
"""
dim = preds[0].ndim - 2
target_shape = list(target.shape)[-dim:]
pool = []
for pred in preds:
pred_shape = list(pred.shape)[-dim:]
pool.append(tuple([int(t / p) for t, p in zip(target_shape, pred_shape)]))
squeeze_result = False
if target.ndim == preds[0].ndim - 1:
target = target.unsqueeze(dim=1)
squeeze_result = True
fn = getattr(F, f"max_pool{dim}d")
new_targets = [fn(target, kernel_size=p, stride=p) for p in pool]
if squeeze_result:
new_targets = [nt.squeeze(dim=1) for nt in new_targets]
return new_targets
class InterpolateToShape(InterpolateToShapes):
"""
Interpolate predictions to target size
"""
def forward(self, preds: List[Tensor], target: Tensor) -> List[Tensor]:
"""
Interpolate predictions to match target
Args:
preds: predictions to extract shape of
target: target to interpolate
Returns:
List[Tensor]: interpolated targets
"""
shape = tuple(target.shape)[2:]
squeeze_result = False
if target.ndim == preds[0].ndim - 1:
target = target.unsqueeze(dim=1)
squeeze_result = True
new_targets = [F.interpolate(
pred, size=shape, mode=self.mode, align_corners=self.align_corners)
for pred in preds]
if squeeze_result:
new_targets = [nt.squeeze(dim=1) for nt in new_targets]
return new_targets
class Interpolate(torch.nn.Module):
def __init__(self, size: Union[int, Tuple[int]] = None,
scale_factor: Union[float, Tuple[float]] = None,
mode: str = "nearest", align_corners: bool = None):
"""
nn.Module for interpolation based on functional interpolation from
pytorch
Args:
size: output spatial size. Defaults to None.
scale_factor: multiplier for spatial size. Has to match input size
if it is a tuple. Defaults to None.
mode: algorithm used for upsampling: nearest, linear, bilinear,
bicubic, trilinear, aera. Defaults to "nearest".
align_corners: Align corners points for interpolation. (see pytorch
for more info) Defaults to None.
See Also:
:func:`torch.nn.functional.interpolate`
"""
super().__init__()
self.size = size
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x: Tensor) -> Tensor:
"""
Interpolate input batch
Args:
x: input tensor to interpolate
Returns:
Tensor: interpolated tensor
"""
return F.interpolate(
x, size=self.size, scale_factor=self.scale_factor,
mode=self.mode, align_corners=self.align_corners)
```
#### File: arch/layers/norm.py
```python
import torch.nn as nn
from typing import Optional
"""
Note: register new normalization layers in
nndet.training.optimizer.NORM_TYPES to exclude them from weight decay
"""
class GroupNorm(nn.GroupNorm):
def __init__(self, num_channels: int,
num_groups: Optional[int] = None,
channels_per_group: Optional[int] = None,
eps: float = 1e-05, affine: bool = True, **kwargs) -> None:
"""
PyTorch Group Norm (changed interface, num_channels at first position)
Args:
num_channels: number of input channels
num_groups: number of groups to separate channels. Mutually
exclusive with `channels_per_group`
channels_per_group: number of channels per group. Mutually exclusive
with `num_groups`
eps: value added to the denom for numerical stability. Defaults to 1e-05.
affine: Enable learnable per channel affine params. Defaults to True.
"""
if channels_per_group is not None:
if num_groups is not None:
raise ValueError("Can only use `channels_per_group` OR `num_groups` in GroupNorm")
num_groups = num_channels // channels_per_group
super().__init__(num_channels=num_channels,
num_groups=num_groups,
eps=eps, affine=affine, **kwargs)
```
#### File: core/boxes/sampler.py
```python
import torch
from loguru import logger
from abc import ABC
from typing import List
from torch import Tensor
from torchvision.models.detection._utils import BalancedPositiveNegativeSampler
class AbstractSampler(ABC):
def __call__(self, target_labels: List[Tensor], fg_probs: Tensor):
"""
Select positive and negative anchors
Args:
target_labels (List[Tensor]): labels for each anchor per image, List[[A]],
where A is the number of anchors in one image
fg_probs (Tensor): maximum foreground probability per anchor, [R]
where R is the sum of all anchors inside one batch
Returns:
List[Tensor]: binary mask for positive anchors, List[[A]]
List[Tensor]: binary mask for negative anchors, List[[A]]
"""
raise NotImplementedError
class NegativeSampler(BalancedPositiveNegativeSampler, AbstractSampler):
def __call__(self, target_labels: List[Tensor], fg_probs: Tensor):
"""
Randomly sample negatives and positives until batch_size_per_img
is reached
If not enough positive samples are found, it will be padded with
negative samples
"""
return super(NegativeSampler, self).__call__(target_labels)
class HardNegativeSamplerMixin(ABC):
def __init__(self, pool_size: float = 10):
"""
Create a pool from the highest scoring false positives and sample
defined number of negatives from it
Args:
pool_size (float): hard negatives are sampled from a pool of size:
batch_size_per_image * (1 - positive_fraction) * pool_size
"""
self.pool_size = pool_size
def select_negatives(self, negative: Tensor, num_neg: int,
img_labels: Tensor, img_fg_probs: Tensor):
"""
Select negative anchors
Args:
negative (Tensor): indices of negative anchors [P],
where P is the number of negative anchors
num_neg (int): number of negative anchors to sample
img_labels (Tensor): labels for all anchors in a image [A],
where A is the number of anchors in one image
img_fg_probs (Tensor): maximum foreground probability per anchor [A],
where A is the the number of anchors in one image
Returns:
Tensor: binary mask of negative anchors to choose [A],
where A is the the number of anchors in one image
"""
pool = int(num_neg * self.pool_size)
pool = min(negative.numel(), pool) # protect against not enough negatives
# select pool of highest scoring false positives
_, negative_idx_pool = img_fg_probs[negative].topk(pool, sorted=True)
negative = negative[negative_idx_pool]
# select negatives from pool
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
neg_idx_per_image = negative[perm2]
neg_idx_per_image_mask = torch.zeros_like(img_labels, dtype=torch.uint8)
neg_idx_per_image_mask[neg_idx_per_image] = 1
return neg_idx_per_image_mask
class HardNegativeSampler(HardNegativeSamplerMixin):
def __init__(self, batch_size_per_image: int, positive_fraction: float,
min_neg: int = 0, pool_size: float = 10):
"""
Created a pool from the highest scoring false positives and sample
defined number of negatives from it
Args:
batch_size_per_image (int): number of elements to be selected per image
positive_fraction (float): percentage of positive elements per batch
pool_size (float): hard negatives are sampled from a pool of size:
batch_size_per_image * (1 - positive_fraction) * pool_size
"""
super().__init__(pool_size=pool_size)
self.min_neg = min_neg
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
def __call__(self, target_labels: List[Tensor], fg_probs: Tensor):
"""
Select hard negatives from list anchors per image
Args:
target_labels (List[Tensor]): labels for each anchor per image, List[[A]],
where A is the number of anchors in one image
fg_probs (Tensor): maximum foreground probability per anchor, [R]
where R is the sum of all anchors inside one batch
Returns:
List[Tensor]: binary mask for positive anchors, List[[A]]
List[Tensor]: binary mask for negative anchors, List[[A]]
"""
anchors_per_image = [anchors_in_image.shape[0] for anchors_in_image in target_labels]
fg_probs = fg_probs.split(anchors_per_image, 0)
pos_idx = []
neg_idx = []
for img_labels, img_fg_probs in zip(target_labels, fg_probs):
positive = torch.where(img_labels >= 1)[0]
negative = torch.where(img_labels == 0)[0]
num_pos = self.get_num_pos(positive)
pos_idx_per_image_mask = self.select_positives(
positive, num_pos, img_labels, img_fg_probs)
pos_idx.append(pos_idx_per_image_mask)
num_neg = self.get_num_neg(negative, num_pos)
neg_idx_per_image_mask = self.select_negatives(
negative, num_neg, img_labels, img_fg_probs)
neg_idx.append(neg_idx_per_image_mask)
return pos_idx, neg_idx
def get_num_pos(self, positive: torch.Tensor) -> int:
"""
Number of positive samples to draw
Args:
positive: indices of positive anchors
Returns:
int: number of postive sample
"""
# positive anchor sampling
num_pos = int(self.batch_size_per_image * self.positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
return num_pos
def get_num_neg(self, negative: torch.Tensor, num_pos: int) -> int:
"""
Sample enough negatives to fill up :param:`self.batch_size_per_image`
Args:
negative: indices of positive anchors
num_pos: number of positive samples to draw
Returns:
int: number of negative samples
"""
# always assume at least one pos anchor was sampled
num_neg = int(max(1, num_pos) * abs(1 - 1. / float(self.positive_fraction)))
# protect against not enough negative examples and sample at least one neg if possible
num_neg = min(negative.numel(), max(num_neg, self.min_neg))
return num_neg
def select_positives(self, positive: Tensor, num_pos: int,
img_labels: Tensor, img_fg_probs: Tensor):
"""
Select positive anchors
Args:
positive (Tensor): indices of positive anchors [P],
where P is the number of positive anchors
num_pos (int): number of positive anchors to sample
img_labels (Tensor): labels for all anchors in a image [A],
where A is the number of anchors in one image
img_fg_probs (Tensor): maximum foreground probability per anchor [A],
where A is the the number of anchors in one image
Returns:
Tensor: binary mask of positive anchors to choose [A],
where A is the the number of anchors in one image
"""
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
pos_idx_per_image = positive[perm1]
pos_idx_per_image_mask = torch.zeros_like(img_labels, dtype=torch.uint8)
pos_idx_per_image_mask[pos_idx_per_image] = 1
return pos_idx_per_image_mask
class HardNegativeSamplerBatched(HardNegativeSampler):
"""
Samples negatives and positives on a per batch basis
(default sampler only does this on a per image basis)
Note:
:attr:`batch_size_per_image` is manipulated to sample the correct
number of samples per batch, use :attr:`_batch_size_per_image`
to get the number of anchors per image
"""
def __init__(self, batch_size_per_image: int, positive_fraction: float,
min_neg: int = 0, pool_size: float = 10):
"""
Args:
batch_size_per_image (int): number of elements to be selected per image
positive_fraction (float): percentage of positive elements per batch
pool_size (float): hard negatives are sampled from a pool of size:
batch_size_per_image * (1 - positive_fraction) * pool_size
"""
super().__init__(min_neg=min_neg, batch_size_per_image=batch_size_per_image,
positive_fraction=positive_fraction, pool_size=pool_size)
self._batch_size_per_image = batch_size_per_image
logger.info("Sampling hard negatives on a per batch basis")
def __call__(self, target_labels: List[Tensor], fg_probs: Tensor):
"""
Select hard negatives from list anchors per image
Args:
target_labels (List[Tensor]): labels for each anchor per image, List[[A]],
where A is the number of anchors in one image
fg_probs (Tensor): maximum foreground probability per anchor, [R]
where R is the sum of all anchors inside one batch
Returns:
List[Tensor]: binary mask for positive anchors, List[[A]]
List[Tensor]: binary mask for negative anchors, List[[A]]
"""
batch_size = len(target_labels)
self.batch_size_per_image = self._batch_size_per_image * batch_size
target_labels_batch = torch.cat(target_labels, dim=0)
positive = torch.where(target_labels_batch >= 1)[0]
negative = torch.where(target_labels_batch == 0)[0]
num_pos = self.get_num_pos(positive)
pos_idx = self.select_positives(
positive, num_pos, target_labels_batch, fg_probs)
num_neg = self.get_num_neg(negative, num_pos)
neg_idx = self.select_negatives(
negative, num_neg, target_labels_batch, fg_probs)
# Comb Head with sampling concatenates masks after sampling so do not split them here
# anchors_per_image = [anchors_in_image.shape[0] for anchors_in_image in target_labels]
# return pos_idx.split(anchors_per_image, 0), neg_idx.split(anchors_per_image, 0)
return [pos_idx], [neg_idx]
class BalancedHardNegativeSampler(HardNegativeSampler):
def get_num_neg(self, negative: torch.Tensor, num_pos: int) -> int:
"""
Sample same number of negatives as positives but at least one
Args:
negative: indices of positive anchors
num_pos: number of positive samples to draw
Returns:
int: number of negative samples
"""
# protect against not enough negative examples and sample at least one neg if possible
num_neg = min(negative.numel(), max(num_pos, 1))
return num_neg
class HardNegativeSamplerFgAll(HardNegativeSamplerMixin):
def __init__(self, negative_ratio: float = 1, pool_size: float = 10):
"""
Use all positive anchors for loss and sample corresponding number
of hard negatives
Args:
negative_ratio (float): ratio of negative to positive sample;
(samples negative_ratio * positive_anchors examples)
pool_size (float): hard negatives are sampled from a pool of size:
batch_size_per_image * (1 - positive_fraction) * pool_size
"""
super().__init__(pool_size=pool_size)
self.negative_ratio = negative_ratio
def __call__(self, target_labels: List[Tensor], fg_probs: Tensor):
"""
Select hard negatives from list anchors per image
Args:
target_labels (List[Tensor]): labels for each anchor per image, List[[A]],
where A is the number of anchors in one image
fg_probs (Tensor): maximum foreground probability per anchor, [R]
where R is the sum of all anchors inside one batch
Returns:
List[Tensor]: binary mask for positive anchors, List[[A]]
List[Tensor]: binary mask for negative anchors, List[[A]]
"""
anchors_per_image = [anchors_in_image.shape[0] for anchors_in_image in target_labels]
fg_probs = fg_probs.split(anchors_per_image, 0)
pos_idx = []
neg_idx = []
for img_labels, img_fg_probs in zip(target_labels, fg_probs):
negative = torch.where(img_labels == 0)[0]
# positive anchor sampling
pos_idx_per_image_mask = (img_labels >= 1).to(dtype=torch.uint8)
pos_idx.append(pos_idx_per_image_mask)
num_neg = int(self.negative_ratio * pos_idx_per_image_mask.sum())
# protect against not enough negative examples and sample at least one neg if possible
num_neg = min(negative.numel(), max(num_neg, 1))
neg_idx_per_image_mask = self.select_negatives(
negative, num_neg, img_labels, img_fg_probs)
neg_idx.append(neg_idx_per_image_mask)
return pos_idx, neg_idx
```
#### File: nndet/io/patching.py
```python
import typing
import itertools
import numpy as np
from loguru import logger
from skimage.measure import regionprops
import SimpleITK as sitk
def center_crop_object_mask(mask: np.ndarray, cshape: typing.Union[tuple, int],
) -> typing.List[tuple]:
"""
Creates indices to crop patches around individual objects in mask
Args
mask: mask where objects have diffrent numbers. Objects need to be numbered
consequtively from one to n, with 0 as background.
cshape: size of individual crops. Needs to be divisible by two.
Otherwise crops do not have the expected size.
If cshape is a int, crops will have the same size in every dimension.
Returns
list[tuple]: each crop generates one tuple with indices
Raises
TypeError: raised if mask and patches define different dimensionalities
TypeError: raised if `cshape` is larger than mask
See Also
:func:`save_get_crop`
Warnings
The returned crops are not checked for image boundaries. Slices
with negative indices and indices which extend over the mask boundaries
are possible! To correct for this, use `save_get_crop` which handles
this exceptions.
"""
if isinstance(cshape, int):
cshape = tuple([cshape] * mask.ndim)
if mask.ndim != len(cshape):
raise TypeError("Size of crops needs to be defined for "
"every dimension")
if any(np.subtract(mask.shape, cshape) < 0):
raise TypeError("Patches must be smaller than data.")
if mask.max() == 0:
# no objects in mask
return []
all_centroids = [i['centroid'] for i in regionprops(mask.astype(np.int32))]
crops = []
for centroid in all_centroids:
crops.append(tuple(slice(int(c) - (s // 2), int(c) + (s // 2))
for c, s in zip(centroid, cshape)))
return crops
def center_crop_object_seg(seg: np.ndarray, cshape: typing.Union[tuple, int],
**kwargs) -> typing.List[tuple]:
"""
Creates indices to crop patches around individual objects in segmentation.
Objects are determined by region growing with connected threshold.
Args
seg: semantic segmentation of objects.
cshape: size of individual crops. Needs to be divisible by two.
Otherwise crops do not have the expected size.
If cshape is a int, crops will have the same size in every dimension.
kwargs: additional keyword arguments passed to `center_crop_objects_mask`
Returns
list[tuple]: each crop generates one tuple with indices
See Also
:func:`save_get_crop`
Warnings
The returned crops are not checked for image boundaries. Slices
with negative indices and indices which extend over the mask boundaries
are possible! To correct for this, use `save_get_crop` which handles
this exceptions.
"""
_mask, _ = create_mask_from_seg(seg)
return center_crop_object_mask(_mask, cshape=cshape, **kwargs)
def create_mask_from_seg(seg: np.ndarray) -> typing.Tuple[np.ndarray, list]:
"""
Create a mask where objects are enumerated from 1, ..., n.
Objects are determined by region growing with connected threshold.
Args
seg: semantic segmentation array
Returns
np.ndarray: mask with objects
list: classes to objects (ascending order)
"""
_seg = np.copy(seg).astype(np.int32)
_seg_sitk = sitk.GetImageFromArray(_seg)
_mask = np.zeros_like(seg).astype(np.int32)
_obj_cls = []
_obj = 1
while _seg.max() > 0:
# choose one seed in segmentation
seed = np.transpose(np.nonzero(_seg))[0]
# invert coordinates for sitk
seed_sitk = tuple(seed[:: -1].tolist())
seed = tuple(seed)
# region growing
seg_con = sitk.ConnectedThreshold(_seg_sitk,
seedList=[seed_sitk],
lower=int(_seg[seed]),
upper=int(_seg[seed]))
seg_con = sitk.GetArrayFromImage(seg_con).astype(bool)
# add object to mask
_mask[seg_con] = _obj
_obj_cls.append(_seg[seed])
# remove object from segmentation
_seg[seg_con] = 0
_obj += 1
# objects should never overlap
assert _mask.max() < _obj
return _mask, _obj_cls
def create_grid(cshape: typing.Union[typing.Sequence[int], int],
dshape: typing.Sequence[int],
overlap: typing.Union[typing.Sequence[int], int] = 0,
mode='fixed',
center_boarder: bool = False,
**kwargs,
) -> typing.List[typing.Tuple[slice]]:
"""
Create indices for a grid
Args
cshape: size of individual patches
dshape: shape of data
overlap: overlap between patches. If `overlap` is an integer is is applied
to all dimensions.
mode: defines how borders should be handled, by default 'fixed'.
`fixed` created patches without special handling of borders, thus
the last patch might exceed `dshape`
`symmetric` moves patches such that the the first and last patch are
equally overlapping of dshape (when combined with padding, the last and
first patch would have the same amount of padding)
center_boarder: adds additional crops at the boarders which have the
boarder as their center
Returns
typing.List[slice]: slices to extract patches
Raises
TypeError: raised if `cshape` and `dshape` do not have the same length
TypeError: raised if `overlap` and `dshape` do not have the same length
TypeError: raised if `cshape` is larger than `dshape`
TypeError: raised if `overlap` is larger than `cshape`
Warnings
The returned crops are can exceed the image boundaries. Slices
with negative indices and indices which extend over the image
boundary at the start. To correct for this, use `save_get_crop`
which handles exceptions at borders.
"""
_mode_fn = {
"fixed": _fixed_slices,
"symmetric": _symmetric_slices,
}
if len(dshape) == 3 and len(cshape) == 2:
logger.info("Creating 2d grid.")
slices_3d = dshape[0]
dshape = dshape[1:]
else:
slices_3d = None
# create tuples from shapes
if isinstance(cshape, int):
cshape = tuple([cshape] * len(dshape))
if isinstance(overlap, int):
overlap = tuple([overlap] * len(dshape))
# check shapes
if len(cshape) != len(dshape):
raise TypeError(
"cshape and dshape must be defined for same dimensionality.")
if len(overlap) != len(dshape):
raise TypeError(
"overlap and dshape must be defined for same dimensionality.")
if any(np.subtract(dshape, cshape) < 0):
axes = np.nonzero(np.subtract(dshape, cshape) < 0)
logger.warning(f"Found patch size which is bigger than data: data {dshape} patch {cshape}")
if any(np.subtract(cshape, overlap) < 0):
raise TypeError("Overlap must be smaller than size of patches.")
grid_slices = [_mode_fn[mode](psize, dlim, ov, **kwargs)
for psize, dlim, ov in zip(cshape, dshape, overlap)]
if center_boarder:
for idx, (psize, dlim, ov) in enumerate(zip(cshape, dshape, overlap)):
lower_bound_start = int(-0.5 * psize)
upper_bound_start = dlim - int(0.5 * psize)
grid_slices[idx] = tuple([
slice(lower_bound_start, lower_bound_start + psize),
*grid_slices[idx],
slice(upper_bound_start, upper_bound_start + psize),
])
if slices_3d is not None:
grid_slices = [tuple([slice(i, i + 1) for i in range(slices_3d)])] + grid_slices
grid = list(itertools.product(*grid_slices))
return grid
def _fixed_slices(psize: int, dlim: int, overlap: int, start: int = 0) -> typing.Tuple[slice]:
"""
Creates fixed slicing of a single axis. Only last patch exceeds dlim.
Args
psize: size of patch
dlim: size of data
overlap: overlap between patches
start: where to start patches, by default 0
Returns
typing.List[slice]: ordered slices for a single axis
"""
upper_limit = 0
lower_limit = start
idx = 0
crops = []
while upper_limit < dlim:
if idx != 0:
lower_limit = lower_limit - overlap
upper_limit = lower_limit + psize
crops.append(slice(lower_limit, upper_limit))
lower_limit = upper_limit
idx += 1
return tuple(crops)
def _symmetric_slices(psize: int, dlim: int, overlap: int) -> typing.Tuple[slice]:
"""
Creates symmetric slicing of a single axis. First and last patch exceed
data borders.
Args
psize: size of patch
dlim: size of data
overlap: overlap between patches
start: where to patches, by default 0
Returns
typing.List[slice]: ordered slices for a single axis
"""
if psize >= dlim:
return _fixed_slices(psize, dlim, overlap, start=-(psize - dlim) // 2)
pmod = dlim % (psize - overlap)
start = (pmod - psize) // 2
return _fixed_slices(psize, dlim, overlap, start=start)
def save_get_crop(data: np.ndarray,
crop: typing.Sequence[slice],
mode: str = "shift",
**kwargs,
) -> typing.Tuple[np.ndarray,
typing.Tuple[int],
typing.Tuple[slice]]:
"""
Safely extract crops from data
Args
data: list or tuple with data where patches are extracted from
crop: contains the coordiates of a single crop as slices
mode: Handling of borders when crops are outside of data, by default "shift".
Following modes are supported: "shift" crops are shifted inside the
data | other modes are identical to `np.pad`
kwargs: additional keyword arguments passed to `np.pad`
Returns
list[np.ndarray]: crops from data
Tuple[int]: origin offset of crop with regard to data origin (can be
used to offset bounding boxes)
Tuple[slice]: crop from data used to extract information
See Also
:func:`center_crop_objects_mask`, :func:`center_crop_objects_seg`
Warnings
This functions only supports positive indexing. Negative indices are
interpreted like they were outside the lower boundary!
"""
if len(crop) > data.ndim:
raise TypeError(
"crop must have smaller or same dimensionality as data.")
if mode == 'shift':
# move slices if necessary
return _shifted_crop(data, crop)
else:
# use np.pad if necessary
return _padded_crop(data, crop, mode, **kwargs)
def _shifted_crop(data: np.ndarray,
crop: typing.Sequence[slice],
) -> typing.Tuple[np.ndarray,
typing.Tuple[int],
typing.Tuple[slice]]:
"""
Created shifted crops to handle borders
Args
data: crop is extracted from data
crop: defines boundaries of crops
Returns
List[np.ndarray]: list of crops
Tuple[int]: origin offset of crop with regard to data origin (can be
used to offset bounding boxes)
Tuple[slice]: crop from data used to extract information
Raises
TypeError: raised if patchsize is bigger than data
Warnings
This functions only supports positive indexing. Negative indices are
interpreted like they were outside the lower boundary!
"""
shifted_crop = []
dshape = tuple(data.shape)
# index from back, so batch and channel dimensions must not be defined
axis = data.ndim - len(crop)
for idx, crop_dim in enumerate(crop):
if crop_dim.start < 0:
# start is negative, thus it is subtracted from stop
new_slice = slice(0, crop_dim.stop - crop_dim.start, crop_dim.step)
if new_slice.stop > dshape[axis + idx]:
raise RuntimeError(
"Patch is bigger than entire data. shift "
"is not supported in this case.")
shifted_crop.append(new_slice)
elif crop_dim.stop > dshape[axis + idx]:
new_slice = \
slice(crop_dim.start - (crop_dim.stop - dshape[axis + idx]),
dshape[axis + idx], crop_dim.step)
if new_slice.start < 0:
raise RuntimeError(
"Patch is bigger than entire data. shift "
"is not supported in this case.")
shifted_crop.append(new_slice)
else:
shifted_crop.append(crop_dim)
origin = [int(x.start) for x in shifted_crop]
return data[tuple([..., *shifted_crop])], origin, shifted_crop
def _padded_crop(data: np.ndarray,
crop: typing.Sequence[slice],
mode: str,
**kwargs,
) -> typing.Tuple[np.ndarray,
typing.Tuple[int],
typing.Tuple[slice]]:
"""
Extract patch from data and pad accordingly
Args
data: crop is extracted from data
crop: defines boundaries of crops
mode: mode for padding. See `np.pad` for more details
kwargs: additional keyword arguments passed to :func:`np.pad`
Returns
typing.List[np.ndarray]: list of crops
Tuple[int]: origin offset of crop with regard to data origin (can be
used to offset bounding boxes)
Tuple[slice]: crop from data used to extract information
"""
clipped_crop = []
dshape = tuple(data.shape)
# index from back, so batch and channel dimensions must not be defined
axis = data.ndim - len(crop)
padding = [(0, 0)] * axis if axis > 0 else []
for idx, crop_dim in enumerate(crop):
lower_pad = 0
upper_pad = 0
lower_bound = crop_dim.start
upper_bound = crop_dim.stop
# handle lower bound
if lower_bound < 0:
lower_pad = -lower_bound
lower_bound = 0
# handle upper bound
if upper_bound > dshape[axis + idx]:
upper_pad = upper_bound - dshape[axis + idx]
upper_bound = dshape[axis + idx]
padding.append((lower_pad, upper_pad))
clipped_crop.append(slice(lower_bound, upper_bound, crop_dim.step))
origin = [int(x.start) for x in crop]
return (np.pad(data[tuple([..., *clipped_crop])], pad_width=padding, mode=mode, **kwargs),
origin,
clipped_crop,
)
```
#### File: nndet/losses/classification.py
```python
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch import Tensor
from loguru import logger
from nndet.losses.base import reduction_helper
from nndet.utils import make_onehot_batch
def one_hot_smooth(data,
num_classes: int,
smoothing: float = 0.0,
):
targets = torch.empty(size=(*data.shape, num_classes), device=data.device)\
.fill_(smoothing / num_classes)\
.scatter_(-1, data.long().unsqueeze(-1), 1. - smoothing)
return targets
@torch.jit.script
def focal_loss_with_logits(
logits: torch.Tensor,
target: torch.Tensor, gamma: float,
alpha: float = -1,
reduction: str = "mean",
) -> torch.Tensor:
"""
Focal loss
https://arxiv.org/abs/1708.02002
Args:
logits: predicted logits [N, dims]
target: (float) binary targets [N, dims]
gamma: balance easy and hard examples in focal loss
alpha: balance positive and negative samples [0, 1] (increasing
alpha increase weight of foreground classes (better recall))
reduction: 'mean'|'sum'|'none'
mean: mean of loss over entire batch
sum: sum of loss over entire batch
none: no reduction
Returns:
torch.Tensor: loss
See Also
:class:`BFocalLossWithLogits`, :class:`FocalLossWithLogits`
"""
bce_loss = F.binary_cross_entropy_with_logits(logits, target, reduction='none')
p = torch.sigmoid(logits)
pt = (p * target + (1 - p) * (1 - target))
focal_term = (1. - pt).pow(gamma)
loss = focal_term * bce_loss
if alpha >= 0:
alpha_t = (alpha * target + (1 - alpha) * (1 - target))
loss = alpha_t * loss
return reduction_helper(loss, reduction=reduction)
class FocalLossWithLogits(nn.Module):
def __init__(self,
gamma: float = 2,
alpha: float = -1,
reduction: str = "sum",
loss_weight: float = 1.,
):
"""
Focal loss with multiple classes (uses one hot encoding and sigmoid)
Args:
gamma: balance easy and hard examples in focal loss
alpha: balance positive and negative samples [0, 1] (increasing
alpha increase weight of foreground classes (better recall))
reduction: 'mean'|'sum'|'none'
mean: mean of loss over entire batch
sum: sum of loss over entire batch
none: no reduction
loss_weight: scalar to balance multiple losses
"""
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
logits: torch.Tensor,
targets: torch.Tensor,
) -> torch.Tensor:
"""
Compute loss
Args:
logits: predicted logits [N, C, dims], where N is the batch size,
C number of classes, dims are arbitrary spatial dimensions
(background classes should be located at channel 0 if
ignore background is enabled)
targets: targets encoded as numbers [N, dims], where N is the
batch size, dims are arbitrary spatial dimensions
Returns:
torch.Tensor: loss
"""
n_classes = logits.shape[1] + 1
target_onehot = make_onehot_batch(targets, n_classes=n_classes).float()
target_onehot = target_onehot[:, 1:]
return self.loss_weight * focal_loss_with_logits(
logits, target_onehot,
gamma=self.gamma,
alpha=self.alpha,
reduction=self.reduction,
)
class BCEWithLogitsLossOneHot(torch.nn.BCEWithLogitsLoss):
def __init__(self,
*args,
num_classes: int,
smoothing: float = 0.0,
loss_weight: float = 1.,
**kwargs,
):
"""
BCE loss with one hot encoding of targets
Args:
num_classes: number of classes
smoothing: label smoothing
loss_weight: scalar to balance multiple losses
"""
super().__init__(*args, **kwargs)
self.smoothing = smoothing
if smoothing > 0:
logger.info(f"Running label smoothing with smoothing: {smoothing}")
self.num_classes = num_classes
self.loss_weight = loss_weight
def forward(self,
input: Tensor,
target: Tensor,
) -> Tensor:
"""
Compute bce loss based on one hot encoding
Args:
input: logits for all foreground classes [N, C]
N is the number of anchors, and C is the number of foreground
classes
target: target classes. 0 is treated as background, >0 are
treated as foreground classes. [N] is the number of anchors
Returns:
Tensor: final loss
"""
target_one_hot = one_hot_smooth(
target, num_classes=self.num_classes + 1, smoothing=self.smoothing) # [N, C + 1]
target_one_hot = target_one_hot[:, 1:] # background is implicitly encoded
return self.loss_weight * super().forward(input, target_one_hot.float())
class CrossEntropyLoss(torch.nn.CrossEntropyLoss):
def __init__(self,
*args,
loss_weight: float = 1.,
**kwargs,
) -> None:
"""
Same as CE from pytorch with additional loss weight for uniform API
"""
super().__init__(*args, **kwargs)
self.loss_weight = loss_weight
def forward(self,
input: Tensor,
target: Tensor,
) -> Tensor:
"""
Same as CE from pytorch
"""
return self.loss_weight * super().forward(input, target)
```
#### File: planning/architecture/abstract.py
```python
from abc import ABC, abstractmethod
from typing import TypeVar
class ArchitecturePlanner(ABC):
def __init__(self, **kwargs):
"""
Plan architecture and training hyperparameters (batch size and patch size)
"""
for key, item in kwargs.items():
setattr(self, key, item)
@abstractmethod
def plan(self, *args, **kwargs) -> dict:
"""
Plan architecture and training parameters
Args:
*args: positional arguments determined by Planner
**kwargs: keyword arguments determined by Planner
Returns:
dict: training and architecture information
`patch_size` (Sequence[int]): patch size
`batch_size` (int): batch size for training
`architecture` (dict): dictionary with all parameters needed for the final model
"""
raise NotImplementedError
def approximate_vram(self):
"""
Approximate vram usage of model for planning
"""
pass
def get_planner_id(self) -> str:
"""
Create identifier for this planner
Returns:
str: identifier
"""
return self.__class__.__name__
ArchitecturePlannerType = TypeVar('ArchitecturePlannerType', bound=ArchitecturePlanner)
```
#### File: architecture/boxes/base.py
```python
import os
from pathlib import Path
from abc import abstractmethod
from typing import Type, Dict, Sequence, List, Callable, Tuple
import torch
import numpy as np
from tqdm import tqdm
from loguru import logger
from torchvision.models.detection.rpn import AnchorGenerator
from nndet.utils.info import SuppressPrint
with SuppressPrint():
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props
from nndet.io.load import load_pickle
from nndet.arch.abstract import AbstractModel
from nndet.planning.estimator import MemoryEstimator, MemoryEstimatorDetection
from nndet.planning.architecture.abstract import ArchitecturePlanner
from nndet.core.boxes import (
get_anchor_generator,
expand_to_boxes,
box_center,
box_size,
compute_anchors_for_strides,
box_iou,
box_size_np,
box_area_np,
permute_boxes,
)
from nndet.planning.architecture.boxes.utils import (
fixed_anchor_init,
scale_with_abs_strides,
)
class BaseBoxesPlanner(ArchitecturePlanner):
def __init__(self,
preprocessed_output_dir: os.PathLike,
save_dir: os.PathLike,
network_cls: Type[AbstractModel] = None,
estimator: MemoryEstimator = None,
**kwargs,
):
"""
Plan the architecture for training
Args:
min_feature_map_length (int): minimal size of feature map in bottleneck
"""
super().__init__(**kwargs)
self.preprocessed_output_dir = Path(preprocessed_output_dir)
self.save_dir = Path(save_dir)
self.save_dir.mkdir(parents=True, exist_ok=True)
self.network_cls = network_cls
self.estimator = estimator
self.dataset_properties = load_pickle(
self.preprocessed_output_dir / "properties" / 'dataset_properties.pkl')
# parameters initialized from process properties
self.all_boxes: np.ndarray = None
self.all_ious: np.ndarray = None
self.class_ious: Dict[str, np.ndarray] = None
self.num_instances: Dict[int, int] = None
self.dim: int = None
self.architecture_kwargs: dict = {}
self.transpose_forward = None
def process_properties(self, **kwargs):
"""
Load dataset properties and extract information
"""
assert self.transpose_forward is not None
boxes = [case["boxes"] for case_id, case
in self.dataset_properties["instance_props_per_patient"].items()]
self.all_boxes = np.concatenate([b for b in boxes if not isinstance(b, list) and b.size > 0], axis=0)
self.all_boxes = permute_boxes(self.all_boxes, dims=self.transpose_forward)
self.all_ious = self.dataset_properties["all_ious"]
self.class_ious = self.dataset_properties["class_ious"]
self.num_instances = self.dataset_properties["num_instances"]
self.num_instances_per_case = {case_id: sum(case["num_instances"].values())
for case_id, case in self.dataset_properties["instance_props_per_patient"].items()}
self.dim = self.dataset_properties["dim"]
self.architecture_kwargs["classifier_classes"] = \
len(self.dataset_properties["class_dct"])
self.architecture_kwargs["seg_classes"] = \
self.architecture_kwargs["classifier_classes"]
self.architecture_kwargs["in_channels"] = \
len(self.dataset_properties["modalities"])
self.architecture_kwargs["dim"] = \
self.dataset_properties["dim"]
def plot_box_distribution(self, **kwargs):
"""
Plot histogram with ground truth bounding box distribution for
all axis
"""
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
logger.error("Failed to import matplotlib continue anyway.")
if plt is not None:
if isinstance(self.all_boxes, list):
_boxes = np.concatenate(
[b for b in self.all_boxes if not isinstance(b, list) and b.size > 0], axis=0)
dists = box_size_np(_boxes)
else:
dists = box_size_np(self.all_boxes)
for axis in range(dists.shape[1]):
dist = dists[:, axis]
plt.hist(dist, bins=100)
plt.savefig(
self.save_dir / f'bbox_sizes_axis_{axis}.png')
plt.xscale('log')
plt.savefig(
self.save_dir / f'bbox_sizes_axis_{axis}_xlog.png')
plt.yscale('log')
plt.savefig(
self.save_dir / f'bbox_sizes_axis_{axis}_xylog.png')
plt.close()
def plot_box_area_distribution(self, **kwargs):
"""
Plot histogram of areas of all ground truth boxes
"""
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
logger.error("Failed to import matplotlib continue anyway.")
if plt is not None:
if isinstance(self.all_boxes, list):
_boxes = np.concatenate(
[b for b in self.all_boxes if not isinstance(b, list) and b.size > 0], axis=0)
area = box_area_np(_boxes)
else:
area = box_area_np(self.all_boxes)
plt.hist(area, bins=100)
plt.savefig(self.save_dir / f'box_areas.png')
plt.xscale('log')
plt.savefig(self.save_dir / f'box_areas_xlog.png')
plt.yscale('log')
plt.savefig(self.save_dir / f'box_areas_xylog.png')
plt.close()
def plot_class_distribution(self, **kwargs):
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
logger.error("Failed to import matplotlib continue anyway.")
if plt is not None:
num_instances_dict = self.dataset_properties["num_instances"]
num_instances = []
classes = []
for key, item in num_instances_dict.items():
num_instances.append(item)
classes.append(str(key))
ind = np.arange(len(num_instances))
plt.bar(ind, num_instances)
plt.xlabel("Classes")
plt.ylabel("Num Instances")
plt.xticks(ind, classes)
plt.savefig(self.save_dir / f'num_classes.png')
plt.yscale('log')
plt.savefig(self.save_dir / f'num_classes_ylog.png')
plt.close()
def plot_instance_distribution(self, **kwargs):
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
logger.error("Failed to import matplotlib continue anyway.")
if plt is not None:
num_instances_per_case = list(self.num_instances_per_case.values())
plt.hist(num_instances_per_case, bins=100, range=(0, 100))
plt.savefig(self.save_dir / f'instances_per_case.png')
plt.close()
plt.hist(num_instances_per_case, bins=30, range=(0, 30))
plt.savefig(self.save_dir / f'instances_per_case_0_30.png')
plt.close()
plt.hist(num_instances_per_case, bins=11, range=(0, 11))
plt.savefig(self.save_dir / f'instances_per_case_0_10.png')
plt.close()
@abstractmethod
def _plan_anchors(self) -> dict:
"""
Plan anchors hyperparameters
"""
raise NotImplementedError
@abstractmethod
def _plan_architecture(self) -> Sequence[int]:
"""
Plan architecture
"""
raise NotImplementedError
def plan(self, **kwargs) -> dict:
"""
Plan architecture and training params
"""
for key, item in kwargs.items():
setattr(self, key, item)
self.create_default_settings()
if self.all_boxes is None:
self.process_properties(**kwargs)
self.plot_box_area_distribution(**kwargs)
self.plot_box_distribution(**kwargs)
self.plot_class_distribution(**kwargs)
self.plot_instance_distribution(**kwargs)
return {}
def create_default_settings(self):
pass
def compute_class_weights(self) -> List[float]:
"""
Compute classification weighting for inbalanced datasets
(background samples get weight 1 / (num_classes + 1) and forground
classes are weighted with (1 - 1 / (num_classes + 1))*(1 - ni / nall))
where ni is the number of sampler for class i and n all
is the number of all ground truth samples
Returns:
List[float]: weights
"""
num_instances_dict = self.dataset_properties["num_instances"]
num_classes = len(num_instances_dict)
num_instances = [0] * num_classes
for key, item in num_instances_dict.items():
num_instances[int(key)] = int(item)
bg_weight = 1 / (num_classes + 1)
remaining_weight = 1 - bg_weight
weights = [remaining_weight * (1 - ni / sum(num_instances)) for ni in num_instances]
return [bg_weight] + weights
def get_planner_id(self) -> str:
"""
Create identifier for this planner. If available append
:attr:`plan_tag` to the base name
Returns:
str: identifier
"""
base = super().get_planner_id()
if hasattr(self, "plan_tag"):
base = base + getattr(self, "plan_tag")
return base
class BoxC001(BaseBoxesPlanner):
def __init__(self,
preprocessed_output_dir: os.PathLike,
save_dir: os.PathLike,
network_cls: Callable,
estimator: MemoryEstimator = MemoryEstimatorDetection(),
model_cfg: dict = None,
**kwargs,
):
"""
Plan training architecture with heuristics
Args:
preprocessed_output_dir: base preprocessed directory to
access properties and save analysis files
save_dir: directory to save analysis plots
network_cls: constructor of network to plan
estimator: estimate GPU memory requirements for specific GPU
architectures. Defaults to MemoryEstimatorDetection().
"""
super().__init__(
preprocessed_output_dir=preprocessed_output_dir,
save_dir=save_dir,
network_cls=network_cls,
estimator=estimator,
**kwargs,
)
self.additional_params = {}
if model_cfg is None:
model_cfg = {}
self.model_cfg = model_cfg
self.plan_anchor_for_estimation = fixed_anchor_init(self.dim)
def create_default_settings(self):
"""
Generate some default settings for the architecture
"""
# MAX_NUM_FILTERS_2D, MAX_NUM_FILTERS_3D from nnUNet
self.architecture_kwargs["max_channels"] = 480 if self.dim == 2 else 320
# BASE_NUM_FEATURES_3D from nnUNet
self.architecture_kwargs["start_channels"] = 32
# DEFAULT_BATCH_SIZE_3D from nnUNet
self.batch_size = 32 if self.dim == 2 else 2
self.max_num_pool = 999
self.min_feature_map_size = 4
self.min_decoder_level = 2
self.num_decoder_level = 4
self.architecture_kwargs["fpn_channels"] = \
self.architecture_kwargs["start_channels"] * 2
self.architecture_kwargs["head_channels"] = \
self.architecture_kwargs["fpn_channels"]
def plan(self,
target_spacing_transposed: Sequence[float],
median_shape_transposed: Sequence[float],
transpose_forward: Sequence[int],
mode: str = "3d",
) -> dict:
"""
Plan network architecture, anchors, patch size and batch size
Args:
target_spacing_transposed: spacing after data is transposed and resampled
median_shape_transposed: median shape after data is
transposed and resampled
transpose_forward: new ordering of axes for forward pass
mode: mode to use for planning (this planner only supports 3d!)
Returns:
dict: training and architecture information
See Also:
:method:`_plan_architecture`, :method:`_plan_anchors`
"""
super().plan(
transpose_forward=transpose_forward,
target_spacing_transposed=target_spacing_transposed,
median_shape_transposed=median_shape_transposed,
)
self.architecture_kwargs["class_weight"] = self.compute_class_weights()
patch_size = self._plan_architecture(
transpose_forward=transpose_forward,
target_spacing_transposed=target_spacing_transposed,
target_median_shape_transposed=median_shape_transposed,
)
anchors = self._plan_anchors(
target_spacing_transposed=target_spacing_transposed,
median_shape_transposed=median_shape_transposed,
transpose_forward=transpose_forward,
)
plan = {"patch_size": patch_size,
"batch_size": self.batch_size,
"architecture": {
"arch_name": self.network_cls.__name__,
**self.architecture_kwargs
},
"anchors": anchors,
}
logger.info(f"Using architecture plan: \n{plan}")
return plan
def _plan_anchors(self, **kwargs) -> dict:
"""
Optimize anchors
"""
boxes_np_full = self.all_boxes.astype(np.float32)
boxes_np = self.filter_boxes(boxes_np_full)
logger.info(f"Filtered {boxes_np_full.shape[0] - boxes_np.shape[0]} "
f"boxes, {boxes_np.shape[0]} boxes remaining for anchor "
"planning.")
boxes_torch = torch.from_numpy(boxes_np).float()
boxes_torch = boxes_torch - expand_to_boxes(box_center(boxes_torch))
anchor_generator = get_anchor_generator(self.dim, s_param=True)
rel_strides = self.architecture_kwargs["strides"]
filt_rel_strides = [[1] * self.dim, *rel_strides]
filt_rel_strides = [filt_rel_strides[i] for i in self.architecture_kwargs["decoder_levels"]]
strides = np.cumprod(filt_rel_strides, axis=0) / np.asarray(rel_strides[0])
params = self.find_anchors(boxes_torch, strides.astype(np.int32), anchor_generator)
scaled_params = {key: scale_with_abs_strides(item, strides, dim_idx) for dim_idx, (key, item) in enumerate(params.items())}
logger.info(f"Determined Anchors: {params}; Results in params: {scaled_params}")
self.anchors = scaled_params
self.anchors["stride"] = 1
return self.anchors
@staticmethod
def filter_boxes(boxes_np: np.ndarray,
upper_percentile: float = 99.5,
lower_percentile: float = 00.5,
) -> np.ndarray:
"""
Determine upper and lower percentiles of bounding box sizes for each
axis and remove boxes which are outside the specified range
Args:
boxes_np (np.ndarray): bounding boxes [N, dim * 2](x1, y1, x2, y2, (z1, z2))
upper_percentile: percentile for upper boundary. Defaults to 99.5.
lower_percentile: percentile for lower boundary. Defaults to 00.5.
Returns:
np.ndarray: filtered boxes
See Also:
:func:`np.percentile`
"""
mask = np.ones(boxes_np.shape[0]).astype(bool)
box_sizes = box_size_np(boxes_np)
for ax in range(box_sizes.shape[1]):
ax_sizes = box_sizes[:, ax]
upper_th = np.percentile(ax_sizes, upper_percentile)
lower_th = np.percentile(ax_sizes, lower_percentile)
ax_mask = (ax_sizes < upper_th) * (ax_sizes > lower_th)
mask = mask * ax_mask
return boxes_np[mask.astype(bool)]
def find_anchors(self,
boxes_torch: torch.Tensor,
strides: Sequence[Sequence[int]],
anchor_generator: AnchorGenerator,
) -> Dict[str, Sequence[int]]:
"""
Find anchors which maximize iou over dataset
Args:
boxes_torch: filtered ground truth boxes
strides (Sequence[Sequence[int]]): strides of network to compute
anchor sizes of lower levels
anchor_generator (AnchorGenerator): anchor generator for generate
the anchors
Returns:
Dict[Sequence[int]]: parameterization of anchors
`width` (Sequence[float]): width values for bounding boxes
`height` (Sequence[float]): height values for bounding boxes
(`depth` (Sequence[float]): dpeth values for bounding boxes)
"""
import nevergrad as ng
dim = int(boxes_torch.shape[1] // 2)
sizes = box_size(boxes_torch)
maxs = sizes.max(dim=0)[0]
best_iou = 0
# TBPSA, PSO
for algo in ["TwoPointsDE", "TwoPointsDE", "TwoPointsDE"]:
_best_iou = 0
params = []
for axis in range(dim):
# TODO: find better initialization
anchor_init = self.get_anchor_init(boxes_torch)
p = ng.p.Array(init=np.asarray(anchor_init[axis]))
p.set_integer_casting()
# p.set_bounds(1, maxs[axis].item())
p.set_bounds(lower=1)
params.append(p)
instrum = ng.p.Instrumentation(*params)
optimizer = ng.optimizers.registry[algo](
parametrization=instrum, budget=5000, num_workers=1)
with torch.no_grad():
pbar = tqdm(range(optimizer.budget), f"Anchor Opt {algo}")
for _ in pbar:
x = optimizer.ask()
anchors = anchor_generator.generate_anchors(*x.args)
anchors = compute_anchors_for_strides(
anchors, strides=strides, cat=True)
anchors = anchors
# TODO: add checks if GPU is availabe and has enough VRAM
iou = box_iou(boxes_torch.cuda(), anchors.cuda()) # boxes x anchors
mean_iou = iou.max(dim=1)[0].mean().cpu()
optimizer.tell(x, -mean_iou.item())
pbar.set_postfix(mean_iou=mean_iou)
_best_iou = mean_iou
if _best_iou > best_iou:
best_iou = _best_iou
recommendation = optimizer.provide_recommendation().value[0]
return {key: list(val) for key, val in zip(["width", "height", "depth"], recommendation)}
def get_anchor_init(self, boxes: torch.Tensor) -> Sequence[Sequence[int]]:
"""
Initialize anchors sizes for optimization
Args:
boxes: scales and transposed boxes
Returns:
Sequence[Sequence[int]]: anchor initialization
"""
return [(2, 4, 8)] * 3
def _plan_architecture(self,
target_spacing_transposed: Sequence[float],
target_median_shape_transposed: Sequence[float],
**kwargs,
) -> Sequence[int]:
"""
Plan patchsize and main aspects of the architecture
Fills entries in :param:`self.architecture_kwargs`:
`conv_kernels`
`strides`
`decoder_levels`
Args:
target_spacing_transposed: spacing after data is transposed and resampled
target_median_shape_transposed: median shape after data is
transposed and resampled
Returns:
Sequence[int]: patch size to use for training
"""
self.estimator.batch_size = self.batch_size
patch_size = np.asarray(self._get_initial_patch_size(
target_spacing_transposed, target_median_shape_transposed))
first_run = True
while True:
if first_run:
pass
else:
patch_size = self._decrease_patch_size(
patch_size, target_median_shape_transposed, pooling, must_be_divisible_by)
num_pool_per_axis, pooling, convs, patch_size, must_be_divisible_by = \
self.plan_pool_and_conv_pool_late(patch_size, target_spacing_transposed)
self.architecture_kwargs["conv_kernels"] = convs
self.architecture_kwargs["strides"] = pooling
num_resolutions = len(self.architecture_kwargs["conv_kernels"])
decoder_levels_start = min(max(0, num_resolutions - self.num_decoder_level), self.min_decoder_level)
self.architecture_kwargs["decoder_levels"] = \
tuple([i for i in range(decoder_levels_start, num_resolutions)])
print(self.architecture_kwargs["decoder_levels"])
print(self.get_anchors_for_estimation())
_, fits_in_mem = self.estimator.estimate(
min_shape=must_be_divisible_by,
target_shape=patch_size,
in_channels=self.architecture_kwargs["in_channels"],
network=self.network_cls.from_config_plan(
model_cfg=self.model_cfg,
plan_arch=self.architecture_kwargs,
plan_anchors=self.get_anchors_for_estimation()),
optimizer_cls=torch.optim.Adam,
)
if fits_in_mem:
break
first_run = False
logger.info(f"decoder levels: {self.architecture_kwargs['decoder_levels']}; \n"
f"pooling strides: {self.architecture_kwargs['strides']}; \n"
f"kernel sizes: {self.architecture_kwargs['conv_kernels']}; \n"
f"patch size: {patch_size}; \n")
return patch_size
def _decrease_patch_size(self,
patch_size: np.ndarray,
target_median_shape_transposed: np.ndarray,
pooling: Sequence[Sequence[int]],
must_be_divisible_by: Sequence[int],
) -> np.ndarray:
"""
Decrease largest physical axis. If it larger than bottleneck size is
is decreased by the minimum value to be divisable by computed pooling
strides and will be halfed otherwise.
Args:
patch_size: current patch size
target_median_shape_transposed: median shape of dataset
correctly transposed
pooling: pooling kernels of network
must_be_divisible_by: necessary divisor per axis
Returns:
np.ndarray: new patch size
"""
argsrt = np.argsort(patch_size / target_median_shape_transposed)[::-1]
pool_fct_per_axis = np.prod(pooling, 0)
bottleneck_size_per_axis = patch_size / pool_fct_per_axis
reduction = []
for i in range(len(patch_size)):
if bottleneck_size_per_axis[i] > self.min_feature_map_size:
reduction.append(must_be_divisible_by[i])
else:
reduction.append(must_be_divisible_by[i] / 2)
patch_size[argsrt[0]] -= reduction[argsrt[0]]
return patch_size
@staticmethod
def _get_initial_patch_size(target_spacing_transposed: np.ndarray,
target_median_shape_transposed: Sequence[int]) -> List[int]:
"""
Generate initial patch which relies on the spacing of underlying images.
This is based on the fact that most acquisition protocols are optimized
to focus on the most importatnt aspects.
Returns:
List[int]: initial patch size
"""
voxels_per_mm = 1 / np.array(target_spacing_transposed)
# normalize voxels per mm
input_patch_size = voxels_per_mm / voxels_per_mm.mean()
# create an isotropic patch of size 512x512x512mm
input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value
input_patch_size = np.round(input_patch_size).astype(np.int32)
# clip it to the median shape of the dataset because patches larger then that make not much sense
input_patch_size = [min(i, j) for i, j in zip(
input_patch_size, target_median_shape_transposed)]
return np.round(input_patch_size).astype(np.int32)
def plan_pool_and_conv_pool_late(self,
patch_size: Sequence[int],
spacing: Sequence[float],
) -> Tuple[List[int], List[Tuple[int]], List[Tuple[int]],
Sequence[int], Sequence[int]]:
"""
Plan pooling and convolutions of encoder network
Axis which do not need pooling in every block are pooled as late as possible
Uses kernel size 1 for anisotropic axis which are not reached by the fov yet
Args:
patch_size: target path size
spacing: target spacing transposed
Returns:
List[int]: max number of pooling operations per axis
List[Tuple[int]]: kernel sizes of pooling operations
List[Tuple[int]]: kernel sizes of convolution layers
Sequence[int]: patch size
Sequence[int]: coefficient each axes needs to be divisable by
"""
num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, \
patch_size, must_be_divisible_by = get_pool_and_conv_props(
spacing=spacing, patch_size=patch_size,
min_feature_map_size=self.min_feature_map_size,
max_numpool=self.max_num_pool)
return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
def get_anchors_for_estimation(self):
"""
Adjust anchor plan for varying number of feature maps
Returns:
dict: adjusted anchor plan
"""
num_levels = len(self.architecture_kwargs["decoder_levels"])
anchor_plan = {"stride": 1, "aspect_ratios": (0.5, 1, 2)}
if self.dim == 2:
_sizes = [(16, 32, 64)] * num_levels
anchor_plan["sizes"] = tuple(_sizes)
else:
_sizes = [(16, 32, 64)] * num_levels
anchor_plan["sizes"] = tuple(_sizes)
anchor_plan["zsizes"] = tuple(_sizes)
return anchor_plan
```
#### File: planning/properties/medical.py
```python
import numpy as np
from typing import Dict, List
from collections import defaultdict, OrderedDict
from nndet.io.load import load_properties_of_cropped
from nndet.planning.analyzer import DatasetAnalyzer
def get_sizes_and_spacings_after_cropping(analyzer: DatasetAnalyzer) -> Dict[str, List]:
"""
Load all sizes and spacings after cropping
Args:
analyzer: analyzer which calls this property
Returns:
Dict[str, List]: loaded sizes and spacings inside list
`all_sizes`: contains all sizes
`all_spacings`: contains all spacings
"""
output = defaultdict(list)
for case_id in analyzer.case_ids:
properties = load_properties_of_cropped(analyzer.cropped_data_dir / case_id)
output['all_sizes'].append(properties["size_after_cropping"])
output['all_spacings'].append(properties["original_spacing"])
return output
def get_size_reduction_by_cropping(analyzer: DatasetAnalyzer) -> Dict[str, Dict]:
"""
Compute all size reductions of each case
Args:
analyzer: analzer which calls this property
Returns:
Dict: computed size reductions
`size_reductions`: dictionary with each case id and reduction
"""
size_reduction = OrderedDict()
for case_id in analyzer.case_ids:
props = load_properties_of_cropped(analyzer.cropped_data_dir / case_id)
shape_before_crop = props["original_size_of_raw_data"]
shape_after_crop = props['size_after_cropping']
size_red = np.prod(shape_after_crop) / np.prod(shape_before_crop)
size_reduction[case_id] = size_red
return {"size_reductions": size_reduction}
```
#### File: planning/properties/registry.py
```python
from nndet.planning.properties import (
get_sizes_and_spacings_after_cropping,
get_size_reduction_by_cropping,
get_modalities,
analyze_segmentations,
analyze_intensities,
analyze_instances,
)
def medical_segmentation_props(intensity_properties: bool = True):
"""
Default set for analysis of medical segmentation images
Args:
intensity_properties (optional): analyze intensity properties. Defaults to True.
Returns:
Sequence[Callable]: properties to calculate. Results can be summarized as follows:
See Also:
:func:`nndet.planning.medical.get_sizes_and_spacings_after_cropping`,
:func:`nndet.planning.medical.get_size_reduction_by_cropping`,
:func:`nndet.planning.intensity.get_modalities`,
:func:`nndet.planning.intensity.analyze_intensities`,
:func:`nndet.planning.segmentation.analyze_segmentations`,
"""
props = [
get_sizes_and_spacings_after_cropping,
get_size_reduction_by_cropping,
get_modalities,
analyze_segmentations,
]
if intensity_properties:
props.append(analyze_intensities)
else:
props.append(lambda x: {'intensity_properties': None})
return props
def medical_instance_props(intensity_properties: bool = True):
"""
Default set for analysis of medical instance segmentation images
Args:
intensity_properties (optional): analyze intensity properties. Defaults to True.
Returns:
Sequence[Callable]: properties to calculate. Results can be summarized as follows:
See Also:
:func:`nndet.planning.medical.get_sizes_and_spacings_after_cropping`,
:func:`nndet.planning.medical.get_size_reduction_by_cropping`,
:func:`nndet.planning.intensity.get_modalities`,
:func:`nndet.planning.intensity.analyze_intensities`,
:func:`nndet.planning.instance.analyze_instances`,
"""
props = [
get_sizes_and_spacings_after_cropping,
get_size_reduction_by_cropping,
get_modalities,
analyze_instances,
]
if intensity_properties:
props.append(analyze_intensities)
else:
props.append(lambda x: {'intensity_properties': None})
return props
```
#### File: nndet/utils/registry.py
```python
import inspect
import shutil
import os
from pathlib import Path
from typing import Callable
class Registry:
def __init__(self):
self.mapping = {}
def __getitem__(self, key):
return self.mapping[key]["fn"]
def register(self, fn: Callable):
self._register(fn.__name__, fn, inspect.getfile(fn))
return fn
def _register(self, name: str, fn: Callable, path: str):
if name in self.mapping:
raise TypeError(f"Name {name} already in registry.")
else:
self.mapping[name] = {"fn": fn, "path": path}
def get(self, name: str):
return self.mapping[name]["fn"]
def copy_registered(self, target: Path):
if not target.is_dir():
target.mkdir(parents=True)
paths = [e["path"] for e in self.mapping.values()]
paths = list(set(paths))
names = [p.split('nndet')[-1] for p in paths]
names = [n.replace(os.sep, '_').rsplit('.', 1)[0] for n in names]
names = [f"{n[1:]}.py" for n in names]
for name, path in zip(names, paths):
shutil.copy(path, str(target / name))
```
#### File: nndet/utils/timer.py
```python
import time
from loguru import logger
class Timer:
def __init__(self, msg: str = "", verbose: bool = True):
self.verbose = verbose
self.msg = msg
self.tic: float = None
self.toc: float = None
self.dif: float = None
def __enter__(self):
self.tic = time.perf_counter()
def __exit__(self, exc_type, exc_val, exc_tb):
self.toc = time.perf_counter()
self.dif = self.toc - self.tic
if self.verbose:
logger.info(f"Operation '{self.msg}' took: {self.toc - self.tic} sec")
```
#### File: Task011_Kits/scripts/prepare.py
```python
import shutil
import os
import sys
from pathlib import Path
from loguru import logger
from nndet.io import save_json
from nndet.io.prepare import create_test_split
from nndet.utils.check import env_guard
from nndet.utils.info import maybe_verbose_iterable
@env_guard
def main():
det_data_dir = Path(os.getenv('det_data'))
task_data_dir = det_data_dir / "Task011_Kits"
source_data_dir = task_data_dir / "raw"
if not source_data_dir.is_dir():
raise RuntimeError(f"{source_data_dir} should contain the raw data but does not exist.")
splitted_dir = task_data_dir / "raw_splitted"
target_data_dir = task_data_dir / "raw_splitted" / "imagesTr"
target_data_dir.mkdir(exist_ok=True, parents=True)
target_label_dir = task_data_dir / "raw_splitted" / "labelsTr"
target_label_dir.mkdir(exist_ok=True, parents=True)
logger.remove()
logger.add(sys.stdout, level="INFO")
logger.add(task_data_dir / "prepare.log", level="DEBUG")
# save meta info
dataset_info = {
"name": "Kits",
"task": "Task011_Kits",
"target_class": None,
"test_labels": True,
"seg2det_stuff": [1,], # define stuff classes: kidney
"seg2det_things": [2,], # define things classes: tumor
"min_size": 3.,
"labels": {"0": "lesion"},
"labels_stuff": {"1": "kidney"},
"modalities": {"0": "CT"},
"dim": 3,
}
save_json(dataset_info, task_data_dir / "dataset.json")
# prepare cases
cases = [str(c.name) for c in source_data_dir.iterdir() if c.is_dir()]
for c in maybe_verbose_iterable(cases):
logger.info(f"Copy case {c}")
case_id = int(c.split("_")[-1])
if case_id < 210:
shutil.copy(source_data_dir / c / "imaging.nii.gz", target_data_dir / f"{c}_0000.nii.gz")
shutil.copy(source_data_dir / c / "segmentation.nii.gz", target_label_dir / f"{c}.nii.gz")
# create an artificial test split
create_test_split(splitted_dir=splitted_dir,
num_modalities=1,
test_size=0.3,
random_state=0,
shuffle=True,
)
if __name__ == '__main__':
main()
```
#### File: nnDetection/scripts/generate_example.py
```python
import os
import random
import argparse
from pathlib import Path
from multiprocessing import Pool
from itertools import repeat
import numpy as np
import SimpleITK as sitk
from loguru import logger
from nndet.io import save_json
from nndet.utils.check import env_guard
# # 2D example
# [Ignore, Not supported]
# dim = 2
# image_size = [512, 512]
# object_size = [32, 64]
# object_width = 6
# num_images_tr = 100
# num_images_ts = 100
# 3D example
dim = 3
image_size = [256, 256, 256]
object_size = [16, 32]
object_width = 4
def generate_image(image_dir, label_dir, idx):
random.seed(idx)
np.random.seed(idx)
logger.info(f"Generating case_{idx}")
selected_size = np.random.randint(object_size[0], object_size[1])
selected_class = np.random.randint(0, 2)
data = np.random.rand(*image_size)
mask = np.zeros_like(data)
top_left = [np.random.randint(0, image_size[i] - selected_size) for i in range(dim)]
if selected_class == 0:
slicing = tuple([slice(tp, tp + selected_size) for tp in top_left])
data[slicing] = data[slicing] + 0.4
data = data.clip(0, 1)
mask[slicing] = 1
elif selected_class == 1:
slicing = tuple([slice(tp, tp + selected_size) for tp in top_left])
inner_slicing = [slice(tp + object_width, tp + selected_size - object_width) for tp in top_left]
if len(inner_slicing) == 3:
inner_slicing[0] = slice(0, image_size[0])
inner_slicing = tuple(inner_slicing)
object_mask = np.zeros_like(mask).astype(bool)
object_mask[slicing] = 1
object_mask[inner_slicing] = 0
data[object_mask] = data[object_mask] + 0.4
data = data.clip(0, 1)
mask[object_mask] = 1
else:
raise NotImplementedError
if dim == 2:
data = data[None]
mask = mask[None]
data_itk = sitk.GetImageFromArray(data)
mask_itk = sitk.GetImageFromArray(mask)
mask_meta = {
"instances": {
"1": selected_class
},
}
sitk.WriteImage(data_itk, str(image_dir / f"case_{idx}_0000.nii.gz"))
sitk.WriteImage(mask_itk, str(label_dir / f"case_{idx}.nii.gz"))
save_json(mask_meta, label_dir / f"case_{idx}.json")
@env_guard
def main():
"""
Generate an example dataset for nnDetection to test the installation or
experiment with ideas.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--full',
help="Increase size of dataset. "
"Default sizes train/test 10/10 and full 1000/1000.",
action='store_true',
)
parser.add_argument(
'--num_processes',
help="Use multiprocessing to create dataset.",
type=int,
default=0,
)
args = parser.parse_args()
full = args.full
num_processes = args.num_processes
num_images_tr = 1000 if full else 10
num_images_ts = 1000 if full else 10
meta = {
"task": f"Task000D{dim}_Example",
"name": "Example",
"target_class": None,
"test_labels": True,
"labels": {"0": "Square", "1": "SquareHole"},
"modalities": {"0": "MRI"},
"dim": dim,
}
# setup paths
data_task_dir = Path(os.getenv("det_data")) / meta["task"]
data_task_dir.mkdir(parents=True, exist_ok=True)
save_json(meta, data_task_dir / "dataset.json")
raw_splitted_dir = data_task_dir / "raw_splitted"
images_tr_dir = raw_splitted_dir / "imagesTr"
images_tr_dir.mkdir(parents=True, exist_ok=True)
labels_tr_dir = raw_splitted_dir / "labelsTr"
labels_tr_dir.mkdir(parents=True, exist_ok=True)
images_ts_dir = raw_splitted_dir / "imagesTs"
images_ts_dir.mkdir(parents=True, exist_ok=True)
labels_ts_dir = raw_splitted_dir / "labelsTs"
labels_ts_dir.mkdir(parents=True, exist_ok=True)
if num_processes == 0:
for idx in range(num_images_tr):
generate_image(
images_tr_dir,
labels_tr_dir,
idx,
)
for idx in range(num_images_tr, num_images_tr + num_images_ts):
generate_image(
images_ts_dir,
labels_ts_dir,
idx,
)
else:
logger.info("Using multiprocessing to create example dataset.")
with Pool(processes=num_processes) as p:
p.starmap(
generate_image,
zip(
repeat(images_tr_dir),
repeat(labels_tr_dir),
range(num_images_tr),
)
)
with Pool(processes=num_processes) as p:
p.starmap(
generate_image,
zip(
repeat(images_ts_dir),
repeat(labels_ts_dir),
range(num_images_tr, num_images_tr + num_images_ts),
)
)
if __name__ == '__main__':
main()
```
#### File: joeranbosma/nnDetection/setup.py
```python
from setuptools import setup, find_packages
from pathlib import Path
import os
import sys
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
def resolve_requirements(file):
requirements = []
with open(file) as f:
req = f.read().splitlines()
for r in req:
if r.startswith("-r"):
requirements += resolve_requirements(
os.path.join(os.path.dirname(file), r.split(" ")[1]))
else:
requirements.append(r)
return requirements
def read_file(file):
with open(file) as f:
content = f.read()
return content
def clean():
"""Custom clean command to tidy up the project root."""
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz')
def get_extensions():
"""
Adapted from https://github.com/pytorch/vision/blob/master/setup.py
and https://github.com/facebookresearch/detectron2/blob/master/setup.py
"""
print("Build csrc")
print("Building with {}".format(sys.version_info))
this_dir = Path(os.path.dirname(os.path.abspath(__file__)))
extensions_dir = this_dir/'nndet'/'csrc'
main_file = list(extensions_dir.glob('*.cpp'))
source_cpu = [] # list((extensions_dir/'cpu').glob('*.cpp')) temporary until I added header files ...
source_cuda = list((extensions_dir/'cuda').glob('*.cu'))
print("main_file {}".format(main_file))
print("source_cpu {}".format(source_cpu))
print("source_cuda {}".format(source_cuda))
sources = main_file + source_cpu
extension = CppExtension
define_macros = []
extra_compile_args = {"cxx": []}
if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv('FORCE_CUDA', '0') == '1':
print("Adding CUDA csrc to build")
print("CUDA ARCH {}".format(os.getenv("TORCH_CUDA_ARCH_LIST")))
extension = CUDAExtension
sources += source_cuda
define_macros += [('WITH_CUDA', None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
# It's better if pytorch can do this by default ..
CC = os.environ.get("CC", None)
if CC is not None:
extra_compile_args["nvcc"].append("-ccbin={}".format(CC))
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [str(extensions_dir)]
ext_modules = [
extension(
'nndet._C',
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
requirements = resolve_requirements(os.path.join(os.path.dirname(__file__),
'requirements.txt'))
readme = read_file(os.path.join(os.path.dirname(__file__), "README.md"))
setup(
name='nndet',
version="v0.1",
packages=find_packages(),
include_package_data=True,
test_suite="unittest",
long_description=readme,
long_description_content_type='text/markdown',
install_requires=requirements,
tests_require=["coverage"],
python_requires=">=3.8",
author="Division of Medical Image Computing, German Cancer Research Center",
maintainer_email='<EMAIL>',
ext_modules=get_extensions(),
cmdclass={
'build_ext': BuildExtension,
'clean': clean,
},
entry_points={
'console_scripts': [
'nndet_example = scripts.generate_example:main',
'nndet_prep = scripts.preprocess:main',
'nndet_cls2fg = scripts.convert_cls2fg:main',
'nndet_seg2det = scripts.convert_seg2det:main',
'nndet_train = scripts.train:train',
'nndet_sweep = scripts.train:sweep',
'nndet_eval = scripts.train:evaluate',
'nndet_predict = scripts.predict:main',
'nndet_consolidate = scripts.consolidate:main',
'nndet_boxes2nii = scripts.utils:boxes2nii',
'nndet_seg2nii = scripts.utils:seg2nii',
'nndet_unpack = scripts.utils:unpack',
'nndet_env = scripts.utils:env',
]
},
)
``` |
{
"source": "joeree/cfap",
"score": 3
} |
#### File: joeree/cfap/calculator.py
```python
import tkinter as tk
class Calculator:
def __init__(self, base, bgcolor):
self.expression = ''
self.equation = tk.StringVar()
self.base = base
self.bg_color = bgcolor
self.start_up()
def key(self, event):
if event.char in '^ ( ) / 7 8 9 * 4 5 6 - 1 2 3 + 0 .':
self.expression += event.char
self.equation.set(self.expression)
elif event.char == '=':
self.equalpress()
elif event.char == '\x7f':
self.expression = self.expression[:-1]
self.equation.set(self.expression)
else:
print(event)
def press(self, num):
self.expression += str(num)
self.equation.set(self.expression)
def equalpress(self):
try:
total = str(eval(self.expression))
self.equation.set(total)
except SyntaxError:
total = ' error '
self.equation.set(total)
def clear(self):
self.expression = ''
self.equation.set('')
def start_up(self):
self.base.configure(background=self.bg_color)
self.base.title('Calculator')
expression_window = tk.Entry(self.base, textvariable=self.equation)
expression_window.grid(column=0, row=0, columnspan=4, sticky='we')
tup = [(2, 0), (2, 1), (2, 2), (2, 3),
(3, 0), (3, 1), (3, 2), (3, 3),
(4, 0), (4, 1), (4, 2), (4, 3),
(5, 0), (5, 1), (5, 2), (5, 3),
(6, 0), (6, 1)]
buttons = []
button_names = '^ ( ) / 7 8 9 * 4 5 6 - 1 2 3 + 0 .'
names = button_names.split()
for x in names:
buttons.append(tk.Button(self.base, text=x, fg='black', highlightbackground=self.bg_color,
command=lambda x=x: self.press(x), height=1, width=4))
buttons[-1].grid(row=tup[names.index(x)][0], column=tup[names.index(x)][1])
equal_button = tk.Button(self.base, text='=', fg='black', highlightbackground=self.bg_color,
command=self.equalpress, height=1, width=4)
equal_button.grid(column=2, row=6)
clear_button = tk.Button(self.base, text='Clear', fg='black', highlightbackground=self.bg_color,
command=self.clear, height=1, width=4)
clear_button.grid(column=3, row=6)
self.base.bind('<Key>', self.key)
self.base.bind('<Return>', lambda e: self.equalpress())
if __name__ == '__main__':
root = tk.Tk()
c = Calculator(root, 'lightcyan2')
root.mainloop()
``` |
{
"source": "JoeRegnier/horkos",
"score": 3
} |
#### File: horkos-extractor/config/config.py
```python
import configparser
import os
class Config:
def __init__(self):
self.config_parser = configparser.SafeConfigParser()
self.config_parser.read('config/config.ini')
def get(self, section, key):
env = section.upper() + "_" + key.upper().replace(".", "_")
if os.environ.get(env) is not None:
return os.environ.get(env)
return self.config_parser.get(section, key)
```
#### File: horkos-extractor/domain/statquery.py
```python
class statsQuery:
def __init__(self, queryName):
self.queryName = queryName
def setQueryID(self, queryID):
self.queryID = queryID
def getQueryID(self):
return self.queryID
def getQueryName(self):
return self.queryName
def setSqlQuery(self, sqlQuery):
self.sqlQuery = sqlQuery
def getSqlQuery(self):
return self.sqlQuery
def setMostRecentRevision(self, mostRecentRevision):
self.mostRecentRevision = mostRecentRevision
def getMostRecentRevision(self):
return self.mostRecentRevision
def setTrailingRevisionNumber(self, trailingRevisionNumber):
self.trailingRevisionNumber = trailingRevisionNumber
def getTrailingRevisionNumber(self):
return self.trailingRevisionNumber
def setKeyLength(self, keyLength):
self.keyLength = keyLength
def getKeyLength(self):
return self.keyLength
def setTargetConnection(self, targetConn):
self.targetConn = targetConn
def getTargetConnection(self):
return self.targetConn
def setScoreThreshold(self, Score):
self.Score = Score
def getScoreThreshold(self):
return self.Score
def setDatabaseName(self, databaseName):
self.databaseName = databaseName
def getDatabaseName(self):
return self.databaseName
def get_static_weights(self):
return self.static_weights
def set_static_weights(self, static_weights):
self.static_weights = static_weights
def __str__(self):
return self.queryName
```
#### File: horkos-extractor/score/score_engine.py
```python
class ScoreEngine():
def __init__(self, score_net, static_weights, use_net):
self.score_net = score_net
self.static_weights = static_weights
self.use_net = use_net
def get_overall_score(self, stat_techniques, key):
overall_score = 0
explanation = ""
if self.use_net == True and self.score_net is not None:
#Implement in the future
pass
#Use simple static weights
elif self.static_weights is not None:
for stat_technique in stat_techniques:
if self.static_weights[stat_technique.get_name()] is not None:
overall_score += stat_technique.get_scores()[key] * self.static_weights[stat_technique.get_name()]
if (stat_technique.get_scores()[key] > (self.static_weights[stat_technique.get_name()] / 2)):
explanation += stat_technique.get_name() + ", "
return [overall_score, explanation[0:len(explanation)-2]]
```
#### File: horkos-extractor/stat_technique/editdistance_st.py
```python
from stat_technique.statisticaltechnique import StatisticalTechnique
from util import math_util
from util import edit_distance_util
class EditDistance_ST(StatisticalTechnique):
def __init__(self, freqmap, latest_freqmap):
StatisticalTechnique.__init__(self, freqmap, latest_freqmap)
def get_name(self):
return "Edit Distance"
def process(self):
if self.freqmap is None or self.latest_freqmap is None:
return None
scores = self._process_each_bucket(self.freqmap, self.latest_freqmap)
self._set_scores(scores)
return scores
def _set_scores(self, scores):
self.scores = scores
def _process_each_bucket(self, freqmap, latest_freqmap):
scores = dict()
for key, bucket in latest_freqmap.items():
for bucketValue, freq in bucket.items():
edit_distance = self._min_edit_distance(freqmap[key], bucketValue)
scores[key + ", " + bucketValue] = self._edit_distance_to_score(edit_distance, len(bucketValue))
return scores
def _edit_distance_to_score(self, min_distance, max_length):
if min_distance >= max_length:
return 0
return 1 - (min_distance / max_length)
def _min_edit_distance(self, freqmap, test_bucket):
[min_distance, suggestion] = edit_distance_util.edit_distance_comparison(freqmap, test_bucket)
return min_distance
def _freq_comparison(self, freqmap, test_bucket):
return math_util.freqmap_comparison(freqmap, test_bucket)
def _levenshtein_distance(self, string_one, string_two):
return edit_distance_util.levenshtein_distance(string_one, string_two)
```
#### File: horkos-extractor/stat_technique/unigramprobability_st.py
```python
from stat_technique.statisticaltechnique import StatisticalTechnique
from util import math_util
class UnigramProbability_ST(StatisticalTechnique):
def __init__(self, freqmap, latest_freqmap):
StatisticalTechnique.__init__(self, freqmap, latest_freqmap)
def get_name(self):
return "Unigram Probability"
def process(self):
if self.freqmap is None or self.latest_freqmap is None:
return None
unigram_freqmap = self._freqmap_to_unigram_map(self.freqmap)
unigram_probabilities = self._process_unigram_probabilities(unigram_freqmap)
scores = self._calculate_all_scores(unigram_probabilities, self.latest_freqmap)
self._set_scores(scores)
return scores
def _set_scores(self, scores):
self.scores = scores
def _calculate_all_scores(self, probabilities, latest_freqmap):
scores = dict()
for key, bucket in latest_freqmap.items():
for bucket_value, freq in bucket.items():
scores[key+", "+bucket_value] = self._calculate_score(list(probabilities.values()), probabilities[bucket_value])
return scores
def _calculate_score(self, probabilities, test_value):
deviation_from_mean = math_util.deviation_from_mean(probabilities, test_value)
#Only want to measure scores that occur less than average
if deviation_from_mean > 0:
score = 0
else:
score = 1 - math_util.z_score_to_cheb_values(deviation_from_mean)
return score
def _freqmap_to_unigram_map(self, freqmap):
unigram_freq = dict()
for key, bucket in freqmap.items():
for bucket_value, freq in bucket.items():
if unigram_freq.get(bucket_value) is None:
unigram_freq[bucket_value] = freq
else:
unigram_freq[bucket_value] += freq
return unigram_freq
def _process_unigram_probabilities(self, unigram_freqmap):
probabilities = dict()
for bucket, value in unigram_freqmap.items():
probabilities[bucket] = self._freq_comparison(unigram_freqmap, bucket)
return probabilities
def _freq_comparison(self, freqmap, testBucket):
return math_util.freqmap_comparison(freqmap, testBucket)
```
#### File: test/stat_technique/test_editdistance_st.py
```python
import unittest
from stat_technique.editdistance_st import EditDistance_ST
class EditDistance_ST_Test(unittest.TestCase):
def test_simple(self):
freqmap = dict()
inner_freq_map = dict()
inner_freq_map["10000"] = 300
inner_freq_map["11000"] = 200
inner_freq_map["25000"] = 1
freqmap["DJ7R92"] = inner_freq_map
second_inner_freq_map = dict()
second_inner_freq_map["20000"] = 2000
second_inner_freq_map["21000"] = 3000
second_inner_freq_map["45000"] = 1
freqmap["DH6H81"] = second_inner_freq_map
latest_freqmap = dict()
third_inner_freq_map = dict()
third_inner_freq_map["25000"] = 1
third_inner_freq_map["11000"] = 1
latest_freqmap["DJ7R92"] = third_inner_freq_map
fourth_inner_freq_map = dict()
fourth_inner_freq_map["45000"] = 1
fourth_inner_freq_map["20000"] = 1
latest_freqmap["DH6H81"] = fourth_inner_freq_map
edit_distance = EditDistance_ST(freqmap, latest_freqmap)
scores = edit_distance.process()
self.assertTrue(scores["DJ7R92, 11000"] == 0.8)
self.assertTrue(scores["DH6H81, 20000"] == 0.8)
if __name__ == '__main__':
unittest.main()
```
#### File: test/stat_technique/test_normaldistribution_st.py
```python
import unittest
from stat_technique.normaldistribution_st import NormalDistribution_ST
class NormalDistribution_ST_Test(unittest.TestCase):
def test_simple(self):
freqmap = dict()
inner_freq_map = dict()
inner_freq_map["10000"] = 300
inner_freq_map["15000"] = 300
inner_freq_map["25000"] = 1
freqmap["DJ7R92"] = inner_freq_map
second_inner_freq_map = dict()
second_inner_freq_map["20000"] = 2000
second_inner_freq_map["21000"] = 3000
second_inner_freq_map["45000"] = 1
freqmap["DH6H81"] = second_inner_freq_map
latest_freqmap = dict()
third_inner_freq_map = dict()
third_inner_freq_map["25000"] = 1
third_inner_freq_map["15000"] = 1
latest_freqmap["DJ7R92"] = third_inner_freq_map
fourth_inner_freq_map = dict()
fourth_inner_freq_map["45000"] = 1
fourth_inner_freq_map["21000"] = 1
latest_freqmap["DH6H81"] = fourth_inner_freq_map
normal_distribution = NormalDistribution_ST(freqmap, latest_freqmap)
scores = normal_distribution.process()
self.assertTrue(scores["DJ7R92, 25000"] > scores["DJ7R92, 15000"])
self.assertTrue(scores["DH6H81, 45000"] > scores["DH6H81, 21000"])
```
#### File: test/stat_technique/test_unigramprobability_st.py
```python
import unittest
from stat_technique.unigramprobability_st import UnigramProbability_ST
class UnigramProbabilityST_Test(unittest.TestCase):
def test_simple(self):
freqmap = dict()
inner_freq_map = dict()
inner_freq_map["10000"] = 300
inner_freq_map["11000"] = 200
inner_freq_map["25000"] = 1
inner_freq_map["30000"] = 1
freqmap["DJ7R92"] = inner_freq_map
second_inner_freq_map = dict()
second_inner_freq_map["10000"] = 2000
second_inner_freq_map["11000"] = 3000
second_inner_freq_map["25000"] = 1000
freqmap["DH6H81"] = second_inner_freq_map
latest_freqmap = dict()
third_inner_freq_map = dict()
third_inner_freq_map["25000"] = 1
third_inner_freq_map["30000"] = 1
latest_freqmap["DJ7R92"] = third_inner_freq_map
unigram_probability_st = UnigramProbability_ST(freqmap, latest_freqmap)
scores = unigram_probability_st.process()
self.assertTrue(scores["DJ7R92, 25000"] < scores["DJ7R92, 30000"])
if __name__ == '__main__':
unittest.main()
```
#### File: ui/ui/views.py
```python
from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse
from .models import Issue, Query, Score, StaticWeight, StatTechnique
from django.db.models import Prefetch, Count, Avg
from django.db import models
from django.core import serializers
import time
import json
# Create your views here.
def index(request):
view_dict = dict()
return render(request, 'ui/index.html', view_dict)
def queries(request):
view_dict = dict()
issue_status_filter = request.GET.get('filters', '')
issue_status_filter = issue_status_filter.split(",")
queries = Query.objects.annotate(issue_count=models.Sum(
models.Case(
models.When(issue__status__in=issue_status_filter, then=1),
default=0, output_field=models.IntegerField()
)
)).values()
databases = Query.objects.values_list('database').distinct()
view_dict['queries'] = list(queries)
view_dict['staticweights'] = list(StaticWeight.objects.values())
view_dict['stattechniques'] = list(StatTechnique.objects.values())
view_dict['databases'] = list(databases)
return JsonResponse(view_dict, status=200)
def issues_api(request):
view_dict = dict()
query_id = request.GET.get('query_id', -1)
if query_id == -1:
return JsonResponse(view_dict, status=400)
issue_status_filter = request.GET.get('filters', '')
issue_status_filter = issue_status_filter.split(",")
sortby = request.GET.get('sortby', 'score')
if sortby.lower() == 'date':
view_dict["issues"] = list(Issue.objects.filter(queryID_id=query_id).filter(status__in=issue_status_filter).order_by('-date_opened').values())
else:
view_dict["issues"] = list(Issue.objects.filter(queryID_id=query_id).filter(status__in=issue_status_filter).order_by('-overall_score').values())
view_dict["scores"] = list(Score.objects.filter(issue__queryID_id=query_id).filter(issue__status__in=issue_status_filter).values())
return JsonResponse(view_dict, status=200)
def issues_chart_api(request):
view_dict = dict()
issue_status_filter = request.GET.get('filters', '')
issue_status_filter = issue_status_filter.split(",")
query_id = request.GET.get('query_id', -1)
if query_id == -1:
view_dict["issues"] = list(Issue.objects.filter(status__in=issue_status_filter).order_by('date_opened').values('id','queryID_id', 'date_opened', 'status'))
return JsonResponse(view_dict, status=200)
view_dict["issues"] = list(Issue.objects.filter(queryID_id=query_id).filter(status__in=issue_status_filter).order_by('date_opened').values('id','queryID_id', 'date_opened', 'status'))
return JsonResponse(view_dict, status=200)
def general_issue_info(request):
view_dict = dict()
query_id = request.GET.get('query_id', -1)
if query_id == '-1':
view_dict["avg_open_score"] = str(Issue.objects.filter(status='Open').aggregate(Avg('overall_score'))["overall_score__avg"])
view_dict["avg_verified_score"] = str(Issue.objects.filter(status='Verified').aggregate(Avg('overall_score'))["overall_score__avg"])
view_dict["avg_ignored_score"] = str(Issue.objects.filter(status='Ignored').aggregate(Avg('overall_score'))["overall_score__avg"])
view_dict["avg_technique_scores"] = list(Score.objects.values().filter())
return JsonResponse(view_dict, status=200)
view_dict["avg_open_score"] = str(Issue.objects.filter(queryID_id=query_id).filter(status='Open').aggregate(Avg('overall_score'))["overall_score__avg"])
view_dict["avg_verified_score"] = str(Issue.objects.filter(queryID_id=query_id).filter(status='Verified').aggregate(Avg('overall_score'))["overall_score__avg"])
view_dict["avg_ignored_score"] = str(Issue.objects.filter(queryID_id=query_id).filter(status='Ignored').aggregate(Avg('overall_score'))["overall_score__avg"])
return JsonResponse(view_dict, status=200)
def domain_spread_api(request):
issue_status_filter = request.GET.get('filters', '')
issue_status_filter = issue_status_filter.split(",")
queries = Query.objects.annotate(issue_count=models.Sum(
models.Case(
models.When(issue__status__in=issue_status_filter, then=1),
default=0, output_field=models.IntegerField()
)
)).values()
domain_map = dict()
for query in queries:
if domain_map.get(query['database']) != None:
domain_map[query['database']] = domain_map[query['database']] + query['issue_count']
else:
domain_map[query['database']] = query['issue_count']
return JsonResponse(domain_map, status=200)
def health(request):
return JsonResponse({'health': 'up'}, status=200)
def change_issue_state(request):
state = request.GET.get('status', 'Open')
issue_id = request.GET.get('id', '1')
try:
issue = Issue.objects.get(pk = issue_id)
issue.status = state
issue.save()
return JsonResponse({'issue_id':issue_id,'status':issue.status}, status=200)
except:
return JsonResponse({'issue_id':issue_id,'message':"failed"}, status=500)
def specific_issue(request):
view_dict = dict()
issue_id = request.GET.get('id', '-1')
view_dict["issue"] = Issue.objects.get(pk=issue_id)
view_dict["query"] = Query.objects.get(pk=view_dict["issue"].queryID_id)
view_dict["scores"] = Score.objects.filter(issue_id=view_dict["issue"].id)
return render(request, 'ui/specific_issue.html', view_dict)
``` |
{
"source": "joerenner/berkeley-coreference-analyser",
"score": 3
} |
#### File: berkeley-coreference-analyser/nlp_util/nlp_eval.py
```python
from __future__ import absolute_import
from __future__ import print_function
def coreference_cluster_match(gold, auto):
if len(gold) != len(auto):
return False
for gcluster in gold:
matched = False
for acluster in auto:
if acluster == gcluster:
matched = True
break
if not matched:
return False
return True
def calc_prf(match, gold, test):
'''Calculate Precision, Recall and F-Score, with:
True Positive = match
False Positive = test - match
False Negative = gold - match
>>> print calc_prf(0, 0, 0)
(1.0, 1.0, 1.0)
>>> print calc_prf(0, 0, 5)
(0.0, 1.0, 0.0)
>>> print calc_prf(0, 4, 5)
(0.0, 0.0, 0.0)
>>> print calc_prf(0, 4, 0)
(0.0, 0.0, 0.0)
>>> print calc_prf(2, 2, 8)
(0.25, 1.0, 0.4)
'''
if gold == 0:
if test == 0:
return 1.0, 1.0, 1.0
return 0.0, 1.0, 0.0
if test == 0 or match == 0:
return 0.0, 0.0, 0.0
p = match / float(test)
r = match / float(gold)
try:
f = 2 * match / (float(test + gold))
return p, r, f
except:
return 0.0, 0.0, 0.0
if __name__ == "__main__":
print("Running doctest")
import doctest
doctest.testmod()
``` |
{
"source": "joerenner/universal-anaphora-scorer",
"score": 2
} |
#### File: joerenner/universal-anaphora-scorer/ua-scorer.py
```python
import sys
from coval.ua import reader
from coval.eval import evaluator
from coval.eval.evaluator import evaluate_non_referrings
__author__ = 'ns-moosavi; juntaoy'
def main():
metric_dict = {
'lea': evaluator.lea, 'muc': evaluator.muc,
'bcub': evaluator.b_cubed, 'ceafe': evaluator.ceafe,
'ceafm':evaluator.ceafm, 'blanc':[evaluator.blancc,evaluator.blancn]}
key_file = sys.argv[1]
sys_file = sys.argv[2]
if 'remove_singletons' in sys.argv or 'remove_singleton' in sys.argv:
keep_singletons = False
else:
keep_singletons = True
if 'remove_split_antecedent' in sys.argv or 'remove_split_antecedents' in sys.argv:
keep_split_antecedent = False
else:
keep_split_antecedent = True
if 'MIN' in sys.argv or 'min' in sys.argv or 'min_spans' in sys.argv:
use_MIN = True
else:
use_MIN = False
if 'keep_non_referring' in sys.argv or 'keep_non_referrings' in sys.argv:
keep_non_referring = True
else:
keep_non_referring = False
if 'keep_bridging' in sys.argv or 'keep_bridgings' in sys.argv:
keep_bridging = True
else:
keep_bridging = False
if 'only_split_antecedent' in sys.argv or 'only_split_antecedents' in sys.argv:
only_split_antecedent = True
keep_split_antecedent = True
keep_singletons = True
keep_bridging = False
keep_non_referring=False
else:
only_split_antecedent = False
if 'evaluate_discourse_deixis' in sys.argv:
evaluate_discourse_deixis = True
keep_split_antecedent = True
keep_singletons = True
only_split_antecedent = False
keep_bridging = False
keep_non_referring = False
else:
evaluate_discourse_deixis = False
if 'all' in sys.argv:
metrics = [(k, metric_dict[k]) for k in metric_dict]
else:
metrics = []
for name in metric_dict:
if name in sys.argv:
metrics.append((name, metric_dict[name]))
if len(metrics) == 0:
metrics = [(name, metric_dict[name]) for name in metric_dict]
msg = ""
if evaluate_discourse_deixis:
msg = 'only discourse deixis'
elif only_split_antecedent:
msg = 'only split-antecedents'
else:
msg = 'corferent markables'
if keep_singletons:
msg+= ', singletons'
if keep_split_antecedent:
msg+=', split-antecedents'
if keep_non_referring:
msg+=', non-referring mentions'
if keep_bridging:
msg+=', bridging relations'
print('The scorer is evaluating ', msg,
(" using the minimum span evaluation setting " if use_MIN else ""))
evaluate(key_file, sys_file, metrics, keep_singletons,keep_split_antecedent,keep_bridging,
keep_non_referring,only_split_antecedent,evaluate_discourse_deixis, use_MIN)
def evaluate(key_file, sys_file, metrics, keep_singletons, keep_split_antecedent, keep_bridging,
keep_non_referring, only_split_antecedent,evaluate_discourse_deixis, use_MIN):
doc_coref_infos, doc_non_referring_infos, doc_bridging_infos = reader.get_coref_infos(key_file, sys_file, keep_singletons,
keep_split_antecedent, keep_bridging, keep_non_referring,evaluate_discourse_deixis,use_MIN)
conll = 0
conll_subparts_num = 0
for name, metric in metrics:
recall, precision, f1 = evaluator.evaluate_documents(doc_coref_infos,
metric,
beta=1,
only_split_antecedent=only_split_antecedent)
if name in ["muc", "bcub", "ceafe"]:
conll += f1
conll_subparts_num += 1
print(name)
print('Recall: %.2f' % (recall * 100),
' Precision: %.2f' % (precision * 100),
' F1: %.2f' % (f1 * 100))
if conll_subparts_num == 3:
conll = (conll / 3) * 100
print('CoNLL score: %.2f' % conll)
if keep_non_referring:
recall, precision, f1 = evaluate_non_referrings(
doc_non_referring_infos)
print('============================================')
print('Non-referring markable identification scores:')
print('Recall: %.2f' % (recall * 100),
' Precision: %.2f' % (precision * 100),
' F1: %.2f' % (f1 * 100))
if keep_bridging:
score_ar, score_fbm, score_fbe = evaluator.evaluate_bridgings(doc_bridging_infos)
recall_ar, precision_ar, f1_ar = score_ar
recall_fbm, precision_fbm, f1_fbm = score_fbm
recall_fbe, precision_fbe, f1_fbe = score_fbe
print('============================================')
print('Bridging anaphora recognition scores:')
print('Recall: %.2f' % (recall_ar * 100),
' Precision: %.2f' % (precision_ar * 100),
' F1: %.2f' % (f1_ar * 100))
print('Full bridging scores (Markable Level):')
print('Recall: %.2f' % (recall_fbm * 100),
' Precision: %.2f' % (precision_fbm * 100),
' F1: %.2f' % (f1_fbm * 100))
print('Full bridging scores (Entity Level):')
print('Recall: %.2f' % (recall_fbe * 100),
' Precision: %.2f' % (precision_fbe * 100),
' F1: %.2f' % (f1_fbe * 100))
main()
``` |
{
"source": "joergbrech/mapfix",
"score": 2
} |
#### File: joergbrech/mapfix/setup.py
```python
import sys
import os
from setuptools import setup, find_packages
import mapfix
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='mapfix',
version=mapfix.__version__,
author='<NAME>',
author_email='<EMAIL>',
description='Use photographed maps with your phone\'s GPS device',
long_description=read('README.rst'),
license='MIT',
keywords=(
"Python, cookiecutter, kivy, buildozer, pytest, projects, project "
"templates, example, documentation, tutorial, setup.py, package, "
"android, touch, mobile, NUI"
),
url='https://github.com/joergbrech/mapfix',
install_requires=[
'kivy>=1.8.0',
'click',
'piexif',
'pillow',
'unidecode',
'exifread',
'numpy',
'pyproj==1.9.6',
'plyer'
],
zip_safe=False,
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'mapfix=mapfix.main:main'
]
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Artistic Software',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Software Development :: User Interfaces',
],
)
``` |
{
"source": "joergbuchwald/heatsource_thm",
"score": 3
} |
#### File: joergbuchwald/heatsource_thm/heatsource.py
```python
import numpy as np
from scipy import special
import matplotlib.pyplot as plt
class ANASOL(object):
def __init__(self):
#material properties
self.E = 5.e9 #Youngs modulus
self.nu = 0.3 #Poisson ratio
self.aprime = 1.5e-5 # coefficient of volume expansion of the soil a_u = a_s if no structural changes occur
self.Q=300 # [Q]=W strength of the heat source
self.n = 0.16 #porosity of soil
self.rho_w = 999.1 #denstiy of pore water
self.c_w = 4280 #specifict heat of pore water
self.K_w = 0.6 # thermal conductivity of pore water
self.rho_s = 2290.0 #density of the solid
self.c_s = 917.654 #specific heat capacity
self.K_s = 1.838 #themal conductivity of solid
self.k = 2e-20 #coefficient of permeability
self.gravity = 9.81 #gravity
self.vis = 1e-3 #viscosity water at 20 deg
self.a_s = 1.5e-5 # coefficient of volume expansion of skeletal material (beta_s)
self.a_w = 4.0e-4 # coefficient of volume expansion of pore water (beta_w)
self.T0 = 273.15
self.Init()
def f(self, ka, R, t):
return special.erfc(R/(2*np.sqrt(ka*t)))
def g(self, ka, R, t):
return (ka*t/R**2+(1/2-ka*t/R**2)*special.erfc(R/(2*np.sqrt(ka*t)))-np.sqrt(ka*t/(np.pi*R**2))*np.exp(-R**2/(4*ka*t)))
def fstar(self,R,t):
return (self.Y*self.f(self.kappa,R,t)-self.Z*self.f(self.c,R,t))
def gstar(self,R,t):
return (self.Y*self.g(self.kappa,R,t)-self.Z*self.g(self.c,R,t))
def temperature(self,x,y,z,t):
R = self.R(x, y, z)
return (self.Q/(4*np.pi*self.K*R)*self.f(self.kappa,R,t)+self.T0)
def porepressure(self,x,y,z,t):
R = self.R(x, y, z)
return (self.X/(1-self.c/self.kappa)*self.Q/(4*np.pi*self.K*R)*(self.f(self.kappa,R,t)-self.f(self.c,R,t)))
def u_i(self,x,y,z,t,i):
R = self.R(x, y, z)
index = {"x": x, "y": y, "z": z}
return self.a_u*index[i]*self.Q/(4*np.pi*self.K*R)*self.gstar(R,t)
def R(self,x,y,z):
return np.sqrt(x**2+y**2+z**2)
def dg_dR(self,ka,i,R,t):
return ((2*i/R**3)*np.sqrt(ka*t/np.pi)*np.exp(-R*R/(4*ka*t))+(2*i*ka*t/R**4)*(self.f(ka,R,t)-1))
def dgstar_dR(self,i,R,t): # Subscript R means derivative w.r.t R
return (self.Y*self.dg_dR(self.kappa,i,R,t)-self.Z*self.dg_dR(self.c,i,R,t))
def sigma_ii(self,x,y,z,t,ii): # for normal components
R = self.R(x, y, z)
index = {"xx": x, "yy": y, "zz": z}
return ((self.Q*self.a_u/(4*np.pi*self.K*R))*(2*self.G*(self.gstar(R,t)*(1-index[ii]**2/R**2)+index[ii]*self.dgstar_dR(index[ii],R,t))
+self.lambd*(x*self.dgstar_dR(x,R,t)+y*self.dgstar_dR(y,R,t)+z*self.dgstar_dR(z,R,t)+2*self.gstar(R,t)))
-self.bprime*(self.temperature(x,y,z,t)-self.T0))
def sigma_ij(self,x,y,z,t,i,j): # for shear components
R = self.R(x, y, z)
index = {"x": x, "y": y, "z": z}
return ((self.Q*self.a_u/(4*np.pi*self.K*R))*(2*self.G*
(index[i]*self.dgstar_dR(index[j],R,t)/2+index[j]*self.dgstar_dR(index[i],R,t)/2-index[i]*index[j]*self.gstar(R,t)/R**2)))
def Init(self):
#derived constants
self.gamma_w=self.gravity*self.rho_w #unit weight of water
self.lambd=self.E*self.nu/((1+self.nu)*(1-2*self.nu))#lame constant
self.G=self.E/(2*(1+self.nu)) # shear constant
self.K=self.n*self.K_w+(1-self.n)*self.K_s #thermal conductivity
self.bprime=(self.lambd+2*self.G/3)*self.aprime
self.m=self.n*self.rho_w*self.c_w+(1-self.n)*self.rho_s*self.c_s
self.kappa=self.K/self.m #scaled heat conductivity
self.K_hydr=self.k*self.rho_w*self.gravity/self.vis #hydraulic conductivity
self.a_u=self.a_s*(1-self.n)+self.a_w*self.n
self.c=self.K_hydr*(self.lambd+2*self.G)/self.gamma_w #coefficient of consolidation
self.X=self.a_u*(self.lambd+2*self.G)-self.bprime
self.Y=1/(self.lambd+2*self.G) * (self.X/((1-self.c/self.kappa)*self.a_u)+self.bprime/self.a_u)
self.Z=1/(self.lambd+2*self.G) * (self.X/((1-self.c/self.kappa)*self.a_u))
``` |
{
"source": "joergbuchwald/vtu-pvd2h5",
"score": 2
} |
#### File: joergbuchwald/vtu-pvd2h5/vtu2nc4.py
```python
import numpy as np
from vtk import *
from vtk.util.numpy_support import vtk_to_numpy
import netCDF4 as nc4
from lxml import etree as ET
import sys
class VTU2NC4(object):
def __init__(self):
self.filenames = []
self.data = []
self.t = []
def readVTUInput(self,ifile,timestep):
self.filenames.append(ifile)
self.t.append(timestep)
reader = vtkXMLUnstructuredGridReader()
reader.SetFileName(ifile)
reader.Update()
output = reader.GetOutput()
self.data.append({})
points = vtk_to_numpy(output.GetPoints().GetData())
self.data[-1]['x'] = points[:,0]
self.data[-1]['y'] = points[:,1]
self.data[-1]['z'] = points[:,2]
pointdata = output.GetPointData()
fieldnames = []
for i in np.arange(pointdata.GetNumberOfArrays()):
fieldnames.append(pointdata.GetArrayName(i))
fielddata = vtk_to_numpy(pointdata.GetArray(fieldnames[-1]))
if len(fielddata.shape) > 1:
field_dim = fielddata.shape[1]
self.data[-1][fieldnames[-1]] = fielddata[:,:]
else:
field_dim = 1
self.data[-1][fieldnames[-1]] = fielddata[:]
return True
def readPVDInput(self,ifile):
tree = ET.parse(ifile)
root = tree.getroot()
for collection in root.getchildren():
for dataset in collection.getchildren():
self.readVTUInput(dataset.attrib['file'],int(dataset.attrib['timestep']))
return True
def writeNC4Output(self,ofile):
datafile = nc4.Dataset(ofile,'w',format='NETCDF4')
datafile.createDimension('pos',len(self.data[0]['x']))
dimensions=[False,False,False,False,False]
for fieldname in self.data[-1]:
if len(self.data[-1][fieldname].shape) > 1:
dimensions[self.data[-1][fieldname].shape[1]-2] = True
for i, dim in enumerate(dimensions):
if dim is True:
datafile.createDimension('dim'+str(i+2),i+2)
datafile.createDimension('t',len(self.t))
t = datafile.createVariable('t', np.float32, ('t',))
x = datafile.createVariable('x', np.float32, ('pos',))
y = datafile.createVariable('y', np.float32, ('pos',))
z = datafile.createVariable('z', np.float32, ('pos',))
t[:] = self.t
x[:] = self.data[-1]['x']
z[:] = self.data[-1]['x']
y[:] = self.data[-1]['x']
var = {}
for variable in self.data[-1]:
if not (variable == 'x' or variable == 'y' or variable == 'z'):
if len(self.data[-1][variable].shape) > 1:
vectorfielddim = 'dim' + str(self.data[-1][variable].shape[1])
var[variable] = datafile.createVariable(variable, np.float32, ('t', 'pos', vectorfielddim))
else:
var[variable] = datafile.createVariable(variable, np.float32, ('t', 'pos'))
for variable in var:
if len(self.data[-1][variable].shape) > 1:
for i, timestep in enumerate(self.t):
var[variable][i,:,:] = self.data[i][variable]
else:
for i, timestep in enumerate(self.t):
var[variable][i,:] = self.data[i][variable]
datafile.close()
return True
def writeXDMFOutput(self,ofile):
reader = []
multiblock = vtkMultiBlockDataSet()
for i, filename in enumerate(self.filenames):
reader.append(vtkXMLUnstructuredGridReader())
reader[i].SetFileName(filename)
reader[i].Update()
multiblock.SetBlock(i,reader[i].GetOutput())
try:
writer = vtkXdmfWriter()
except:
print("The vtkXdmf module is probably not installed. Please check!")
raise RuntimeError
writer.SetFileName(ofile)
writer.SetInputData(multiblock)
writer.SetMeshStaticOverTime(True)
writer.WriteAllTimeStepsOn()
writer.Write()
return True
if __name__ == '__main__':
if not len(sys.argv) == 3:
print("Wrong number of arguments given.")
raise RuntimeError
convert = VTU2NC4()
input_file = sys.argv[1]
output_file = sys.argv[2]
if input_file.split(".")[1] == "vtu":
convert.readVTUInput(input_file,0)
elif input_file.split(".")[1] == "pvd":
convert.readPVDInput(input_file)
else:
print("Not supported file extension.")
raise RuntimeError
if output_file.split(".")[1] == "xmdf" or output_file.split(".")[1] == "xmf":
convert.writeXDMFOutput(output_file)
elif (output_file.split(".")[1] == "nc4"
or output_file.split(".")[1] == "h5"
or output_file.split(".")[1] == "nc"):
convert.writeNC4Output(output_file)
``` |
{
"source": "joergdietrich/CredibleIntervals",
"score": 2
} |
#### File: CredibleIntervals/tests/test_posterior.py
```python
import numpy as np
from numpy.testing import assert_almost_equal
from credible_interval import Posterior
def test_posterior():
np.random.seed(20180131)
x = np.random.normal(10, 5, 10000)
posterior = Posterior(x)
assert_almost_equal(9.9243057716556589, posterior.mean)
assert_almost_equal(9.761182264312712, posterior.mode)
assert_almost_equal(5.0345710789483089, posterior.std)
``` |
{
"source": "joergdietrich/NFW",
"score": 3
} |
#### File: NFW/NFW/mass_concentration.py
```python
from __future__ import division
import numpy as np
from scipy import optimize as opt
from astropy import units as u
def _diff_c(c1, c0, delta_ratio):
return _delta_fac(c0) / _delta_fac(c1) - delta_ratio * (c0 / c1)**3
def _findc(c0, overdensity): # type: (float, float) -> float
delta_ratio = 200 / overdensity
return opt.brentq(_diff_c, .01, 1000, args=(c0, delta_ratio))
def _delta_fac(c):
return np.log(1 + c) - c / (1 + c)
def _find_m200(m200, *args):
func = args[0]
overdensity = args[1]
m_in = args[2]
my_args = args[3:]
c0 = func(m200, *my_args)
c1 = _findc(c0, overdensity)
return m200 / m_in.value - _delta_fac(c0) / _delta_fac(c1)
def _find_mdelta(mdelta, *args):
func = args[0]
overdensity = args[1]
m_in = args[2]
my_args = args[3:]
c200 = func(m_in, *my_args)
c1 = _findc(c200, overdensity)
return m_in.value / mdelta - _delta_fac(c200) / _delta_fac(c1)
pass
def mdelta_to_mdelta(m, func, overdensity_in, overdensity_out, args=()):
"""
Convert the a mass given in a variable overdensity with respect to
the critical density to the mass in another overdensity wrt the
critical density following a fixed mass concentration relation.
Parameters:
===========
m: float or astropy.Quantity
mass of halo
func: callable func(m200c, *args)
Mass-concentration scaling relation
overdensity_in: float
Overdensity in units of rho_crit at which halo mass is set
overdensity_out: float
Overdensity in units of rho_crit at which halo mass is desired
args: tuple, optional
Extra arguments passed to func, i.e., ``f(x, *args)``.
Returns:
========
mdelta: astropy.Quantity
mass of halo at Delta times critical overdensity
Notes:
======
Halo masses must be given in units expected by the M-c relation.
"""
m200 = mdelta_to_m200(m, func, overdensity_in, args)
mdelta = m200_to_mdelta(m200, func, overdensity_out, args)
return u.Quantity(mdelta, u.solMass)
def mdelta_to_m200(m, func, overdensity, args=()):
"""
Convert the a mass given in a variable overdensity with respect to
the critical density to the mass in 200 times overdensity wrt the
critical density following a fixed mass concentration relation.
Parameters:
===========
m: float or astropy.Quantity
mass of halo
func: callable func(m200c, *args)
Mass-concentration scaling relation
overdensity: float
Overdensity in units of rho_crit at which halo mass is set
args: tuple, optional
Extra arguments passed to func, i.e., ``f(x, *args)``.
Returns:
========
m200: astropy.Quantity
mass of halo at 200 times critical overdensity
Notes:
======
Halo masses must be given in units expected by the M-c relation.
"""
m_in = u.Quantity(m, u.solMass)
if overdensity == 200:
# brentq will fail for identical
return m_in
m_min = u.Quantity(1e5, u.solMass)
m_max = u.Quantity(1e20, u.solMass)
mdelta = opt.brentq(_find_m200, m_min.value, m_max.value,
args=(func, overdensity, m_in) + args)
return u.Quantity(mdelta, u.solMass)
def m200_to_mdelta(m, func, overdensity, args=()):
"""
Convert the a mass given in 200 times the critical overdensity to
the mass in another overdensity wrt the critical density following
a fixed mass concentration relation.
Parameters:
===========
m: float or astropy.Quantity
mass of halo
func: callable func(m200c, *args)
Mass-concentration scaling relation
overdensity: float
Overdensity in units of rho_crit at which halo mass is desired
args: tuple, optional
Extra arguments passed to func, i.e., ``f(x, *args)``.
Returns:
========
m200: astropy.Quantity
mass of halo at 200 times critical overdensity
Notes:
======
Halo masses must be given in units expected by the M-c relation.
"""
m_in = u.Quantity(m, u.solMass)
if overdensity == 200:
# brentq will fail for identical
return m_in
m_min = u.Quantity(1e5, u.solMass)
m_max = u.Quantity(1e20, u.solMass)
mdelta = opt.brentq(_find_mdelta, m_min.value, m_max.value,
args=(func, overdensity, m_in) + args)
return u.Quantity(mdelta, u.solMass)
def dolag_concentration(m200, z, cosmo):
"""
Compute the concentration of the Dolag et al. (2004)
mass concentration relation for a standard LCDM universe.
Parameters:
===========
m200: astropy.Quantity
Mass of halo at 200rho_crit
z: float
Halo redshift
cosmo: astropy.cosmology
Returns:
========
conc: float
Halo concentration
Notes:
======
Halo masses must be given in physical units with factors of h
divided out.
"""
m200 = np.asanyarray(m200)
z = np.asanyarray(z)
m200 = u.Quantity(m200 * cosmo.h, u.solMass).value
conc = 9.59 / (1 + z) * (m200 / 1e14)**-0.102
return conc
def duffy_concentration(m200, z, cosmo):
"""
Compute the concentration of the Duffy et al. (2008)
mass concentration relation for 200 rho_crit.
Parameters:
===========
m200: float, array_like or astropy.Quantity
Mass of halo at 200rho_crit, [Msun] if float
z: float or array_like
Halo redshift
cosmo: astropy.cosmology
Returns:
========
conc: float or array
Halo concentration
Notes:
======
Halo masses must be given in physical units with factors of h
divided out.
"""
m200 = np.asanyarray(m200)
z = np.asanyarray(z)
m200 = u.Quantity(m200 * cosmo.h, u.solMass)
a = 5.71
b = -0.084
c = -0.47
m_pivot = u.Quantity(2e12, u.solMass)
conc = a * (m200 / m_pivot)**b * (1 + z)**c
return conc.value
```
#### File: NFW/tests/test_nfw.py
```python
import numpy as np
from numpy.testing import (TestCase, assert_equal,
assert_almost_equal, assert_array_almost_equal,
assert_raises)
import astropy.cosmology
from astropy import units as u
from NFW.nfw import NFW
class TestNFW(TestCase):
@classmethod
def setup_class(cls):
cls._cosmo = astropy.cosmology.FlatLambdaCDM(70, 0.3, Tcmb0=0)
astropy.cosmology.default_cosmology.set(cls._cosmo)
def test_faulty_init(self):
assert_raises(ValueError, NFW, 1e15, 5, 0, **{'size_type': "foo"})
assert_raises(ValueError, NFW, 1e15, 5, 0,
**{'overdensity_type': "bar"})
def test_overdensity_init(self):
nfw = NFW(1e15, 4, 0.3, overdensity=500, overdensity_type="mean")
assert_equal(nfw.overdensity, 500)
assert (nfw.overdensity_type == "mean")
def test_mass_init(self):
m200 = 1e15 * u.solMass
c = 5.
z = 0.3
nfw = NFW(m200, c, z)
assert_equal(nfw.c, c)
assert_equal(nfw.z, z)
assert_almost_equal(nfw.r_s.value, 0.37244989922085564)
def test_mass_init_bckg(self):
m200 = 1e15
c = 5
z = 0.2
nfw = NFW(m200, c, z, overdensity_type='mean')
assert_almost_equal(nfw.radius_Delta(200).value, 3.708462946948883)
def test_mean_crit_consistency(self):
m200b = 1e15
c = 5
z = 0.3
nfw = NFW(m200b, c, z, overdensity_type='mean')
m200c = nfw.mass_Delta(200, overdensity_type='critical').value
assert_almost_equal(m200c / 1e15, 2.062054316492159)
def test_radius_Delta(self):
m200 = 1e15
c = 5.
z = 0.3
nfw = NFW(m200, c, z)
r200 = nfw.radius_Delta(200)
assert_almost_equal(r200.value, 1.8622494961043254)
r500 = nfw.radius_Delta(500)
assert_almost_equal(r500.value, 1.2310049155128235)
r2500 = nfw.radius_Delta(2500)
assert_almost_equal(r2500.value, 0.5519730850580377)
def test_mass_Delta(self):
m200 = 1e15
c = 5.
z = 0.3
nfw = NFW(m200, c, z)
m500 = nfw.mass_Delta(500)
assert_almost_equal(m500.value / 1e14, 7.221140, 6)
def test_projected_mass(self):
m200 = 1e15
c = 3
z = 0.3
nfw = NFW(m200, c, z)
r = np.linspace(0.2, 3, 20) * u.Mpc
m_proj = nfw.projected_mass(r) / 1e14
# Comparison array was computed by numerical integration
m_comp = np.array([1.16764258, 2.43852383, 3.73913358, 5.00209594,
6.20564153, 7.34451809, 8.41992698, 9.43555485,
10.39589583, 11.3055228 , 12.16877709, 12.98964993,
13.77175259, 14.51832709, 15.23227395, 15.91618585,
16.57238171, 17.20293864, 17.80972092, 18.39440572])
assert_array_almost_equal(m_proj.value, m_comp)
def test_density(self):
m200 = 1e15
c = 5.
z = 0.3
nfw = NFW(m200, c, z)
rho = nfw.density(1.23)
assert_almost_equal(rho.value / 1e13, 2.628799454816062)
def test_mean_density(self):
m200 = 1e15
c = 5.
z = 0.3
nfw = NFW(m200, c, z)
rho = nfw.mean_density(1.23)
assert_almost_equal(rho.value / 1e13, 9.257628230844219)
def test_mess(self):
m200 = 1e15
c = 5.
z = 0.3
nfw = NFW(m200, c, z)
m = nfw.mass(1.32)
assert_almost_equal(m.value / 1e14, 7.6572975645639385)
def test_sigma(self):
m200 = 1e15
c = 5.
z = 0.3
nfw = NFW(m200, c, z)
s = nfw.sigma(1.12)
assert_almost_equal(s.value / 1e13, 8.419216818682797)
def test_delta_sigma(self):
m200 = 1e15
c = 5.
z = 0.3
nfw = NFW(m200, c, z)
ds = nfw.delta_sigma([0.1, 1.12])
ref_arr = np.array([5.288425, 1.387852])
assert_array_almost_equal(ds.value / 1e14, ref_arr)
def test_concentration(self):
m200 = 1e15
c = 5.
z = 0.3
nfw = NFW(m200, c, z)
c500 = nfw.radius_Delta(500) / nfw.r_s
assert_almost_equal(c500, [3.3051557218506047])
c500c = nfw.concentration(500)
assert_almost_equal(c500, c500c)
c500m = nfw.concentration(500, "mean")
assert_almost_equal(c500m, 4.592128764327895)
assert_almost_equal(nfw.concentration(), 5)
def test_mass_consistency(self):
m200 = 1e15
c = 5.
z = 0.3
nfw = NFW(m200, c, z)
m500 = nfw.mass_Delta(500)
c500 = nfw.radius_Delta(500) / nfw.r_s
nfw2 = NFW(m500, c500, z, overdensity=500)
assert_almost_equal(nfw2.mass_Delta(200).value / 1e14, m200 / 1e14)
def test_radius_mass_consistency(self):
m200 = 1e15
c = 5.
z = 0.3
nfw = NFW(m200, c, z)
r200 = nfw.radius_Delta(200)
nfw2 = NFW(r200, c, z, size_type="radius")
assert_almost_equal(nfw2.mass_Delta(200).value / 1e14, m200 / 1e14)
def test_mass_unit_consistency(self):
m200 = 5e14
c = 3
z = 0.4
nfw1 = NFW(m200, c, z)
nfw2 = NFW(m200 * u.solMass, c, z)
r1 = nfw1.radius_Delta(200)
r2 = nfw2.radius_Delta(200)
r1 = u.Quantity(r1, r2.unit)
assert_almost_equal(r1.value, r2.value)
def test_radius_unit_consistency(self):
r200 = 1.5
c = 4
z = 0.2
nfw1 = NFW(r200, c, z, size_type='radius')
nfw2 = NFW(r200 * u.Mpc, c, z, size_type='radius')
nfw3 = NFW(r200 * 1000 * u.kiloparsec, c, z, size_type='radius')
m200_1 = nfw1.mass_Delta(200)
m200_2 = nfw2.mass_Delta(200)
m200_3 = nfw3.mass_Delta(200)
m200_1 = u.Quantity(m200_1, m200_3.unit)
m200_2 = u.Quantity(m200_2, m200_3.unit)
assert_almost_equal(m200_1.value, m200_2.value)
assert_almost_equal(m200_1.value, m200_3.value)
def test_cosmo_consistency(self):
save_cosmo = astropy.cosmology.default_cosmology.get()
m200 = 5e14
c = 3.5
z = 0.15
# Halo 1 with variable cosmology
nfw1 = NFW(m200, c, z)
# Halo 2 with cosmology fixed to the current one
nfw2 = NFW(m200, c, z, cosmology=save_cosmo)
# Halo 3 with cosmology fixed to WMAP9
wmap9 = astropy.cosmology.WMAP9
nfw3 = NFW(m200, c, z, cosmology=wmap9)
assert_almost_equal(nfw1.radius_Delta(200).value,
nfw2.radius_Delta(200).value,
err_msg=
"Disagreement after init with same cosmology")
astropy.cosmology.default_cosmology.set(wmap9)
try:
assert_almost_equal(nfw1.radius_Delta(200).value,
nfw3.radius_Delta(200).value,
err_msg=
"Disagreement after changing cosmology")
except:
astropy.cosmology.default_cosmology.set(save_cosmo)
raise
astropy.cosmology.default_cosmology.set(save_cosmo)
def test_var_cosmo_attr(self):
m200 = 5e14
c = 3.5
z = 0.15
nfw1 = NFW(m200, c, z)
assert nfw1.var_cosmology
nfw2 = NFW(m200, c, z,
cosmology=astropy.cosmology.default_cosmology.get())
assert(not nfw2.var_cosmology)
def test_var_cosmo_obj(self):
wmap9 = astropy.cosmology.WMAP9
save_cosmo = astropy.cosmology.default_cosmology.get()
m200 = 5e14
c = 3.5
z = 0.15
nfw = NFW(m200, c, z)
assert(nfw.cosmology is save_cosmo)
astropy.cosmology.default_cosmology.set(wmap9)
try:
assert(nfw.cosmology is wmap9)
except:
astropy.cosmology.default_cosmology.set(save_cosmo)
raise
# Ensure that accessing the cosmology property also updates
# the other properties.
assert_almost_equal(nfw.radius_Delta(325).value, 1.2524762382195782)
# Now test that the r_s property correctly handels the update
astropy.cosmology.default_cosmology.set(save_cosmo)
assert_almost_equal(nfw.r_s.value, 0.44568171135722084)
# And change the cosmology again to make sure that r_Delta handles
# the update correctly
astropy.cosmology.default_cosmology.set(wmap9)
assert_almost_equal(nfw.r_Delta.value, 1.5732366512813496)
``` |
{
"source": "Joergen/zamboni",
"score": 2
} |
#### File: comm/tests/test_models.py
```python
from datetime import datetime
from nose.tools import eq_
from addons.models import Addon
import amo.tests
from comm.models import (CommunicationNote, CommunicationThread,
CommunicationThreadCC, CommunicationThreadToken,
user_has_perm_note, user_has_perm_thread)
from users.models import UserProfile
from mkt.constants import comm as const
class PermissionTestMixin(object):
fixtures = ['base/addon_3615', 'base/user_999']
def setUp(self):
self.addon = Addon.objects.get()
self.user = UserProfile.objects.get(username='regularuser')
self.thread = CommunicationThread.objects.create(addon=self.addon)
self.author = UserProfile.objects.create(email='lol', username='lol')
self.note = CommunicationNote.objects.create(
thread=self.thread, author=self.author, note_type=0, body='xyz')
self.obj = None
def _eq_obj_perm(self, val):
if self.type == 'note':
eq_(user_has_perm_note(self.obj, self.user), val)
else:
eq_(user_has_perm_thread(self.obj, self.user), val)
def test_no_perm(self):
self._eq_obj_perm(False)
def test_has_perm_public(self):
self.obj.update(read_permission_public=True)
self._eq_obj_perm(True)
def test_has_perm_dev(self):
self.obj.update(read_permission_developer=True)
self.addon.addonuser_set.create(user=self.user)
self._eq_obj_perm(True)
def test_has_perm_rev(self):
self.obj.update(read_permission_reviewer=True)
self.grant_permission(self.user, 'Apps:Review')
self._eq_obj_perm(True)
def test_has_perm_senior_rev(self):
self.obj.update(read_permission_senior_reviewer=True)
self.grant_permission(self.user, 'Apps:ReviewEscalated')
self._eq_obj_perm(True)
def test_has_perm_moz_contact(self):
self.obj.update(read_permission_mozilla_contact=True)
self.addon.update(
mozilla_contact=','.join([self.user.email, '<EMAIL>']))
self._eq_obj_perm(True)
def test_has_perm_staff(self):
self.obj.update(read_permission_staff=True)
self.grant_permission(self.user, 'Admin:*')
self._eq_obj_perm(True)
class TestCommunicationNote(PermissionTestMixin, amo.tests.TestCase):
def setUp(self):
super(TestCommunicationNote, self).setUp()
self.type = 'note'
self.obj = self.note
def test_has_perm_author(self):
self.obj.update(author=self.user)
self._eq_obj_perm(True)
def test_manager(self):
eq_(CommunicationNote.objects.count(), 1)
eq_(CommunicationNote.objects.with_perms(self.user,
self.thread).count(), 0)
self.note.update(author=self.user)
eq_(CommunicationNote.objects.count(), 1)
eq_(CommunicationNote.objects.with_perms(self.user,
self.thread).count(), 1)
class TestCommunicationThread(PermissionTestMixin, amo.tests.TestCase):
def setUp(self):
super(TestCommunicationThread, self).setUp()
self.type = 'thread'
self.obj = self.thread
def test_has_perm_posted(self):
self.note.update(author=self.user)
self._eq_obj_perm(True)
def test_has_perm_cc(self):
CommunicationThreadCC.objects.create(user=self.user, thread=self.obj)
self._eq_obj_perm(True)
class TestThreadTokenModel(amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/user_999']
def setUp(self):
addon = Addon.objects.get(pk=3615)
self.thread = CommunicationThread(addon=addon)
user = UserProfile.objects.all()[0]
self.token = CommunicationThreadToken(thread=self.thread, user=user)
self.token.modified = datetime.now()
self.token.use_count = 0
def test_live_thread_token_is_valid(self):
"""
Test `is_valid()` when the token is fresh (not expired).
"""
assert self.token.is_valid()
def test_expired_thread_token_is_valid(self):
"""
Test `is_valid()` when the token has expired.
"""
self.token.modified = self.days_ago(const.THREAD_TOKEN_EXPIRY + 1)
assert not self.token.is_valid()
def test_unused_token_is_valid(self):
"""
Test `is_valid()` when the token is unused.
"""
assert self.token.is_valid()
def test_max_used_thread_token_is_valid(self):
"""
Test `is_valid()` when the token has been fully used.
"""
self.token.use_count = const.MAX_TOKEN_USE_COUNT
assert not self.token.is_valid()
def test_reset_uuid(self):
"""
Test `reset_uuid()` generates a differ uuid.
"""
self.thread.save()
self.token.thread = self.thread
self.token.save()
uuid = self.token.uuid
assert uuid
self.token.reset_uuid()
assert self.token.uuid
assert uuid != self.token.uuid
```
#### File: management/commands/load_prices.py
```python
from optparse import make_option
import pprint
import requests
from django.core.management.base import BaseCommand
from market.models import Price, PriceCurrency
domains = {
'prod': 'https://marketplace.firefox.com',
'stage': 'https://marketplace.allizom.org',
'dev': 'https://marketplace-dev.allizom.org'
}
endpoint = '/api/v1/webpay/prices/'
class Command(BaseCommand):
help = """
Load prices and pricecurrencies from the specified marketplace.
Defaults to prod.
"""
option_list = BaseCommand.option_list + (
make_option('--prod',
action='store_const',
const=domains['prod'],
dest='domain',
default=domains['prod'],
help='Use prod as source of data.'),
make_option('--stage',
action='store_const',
const=domains['stage'],
dest='domain',
help='Use stage as source of data.'),
make_option('--dev',
action='store_const',
const=domains['dev'],
dest='domain',
help='Use use dev as source of data.'),
make_option('--delete',
action='store_true',
dest='delete',
default=False,
help='Start by deleting all prices.'),
make_option('--noop',
action='store_true',
dest='noop',
default=False,
help=('Show data that would be added, '
'but do not create objects.')),
)
def handle(self, *args, **kw):
data = requests.get(kw['domain'] + endpoint).json()
if kw['delete']:
Price.objects.all().delete()
PriceCurrency.objects.all().delete()
if kw['noop']:
pprint.pprint(data['objects'], indent=2)
else:
for p in data['objects']:
pr = Price.objects.create(name=p['name'].split(' ')[-1],
price=p['price'])
for pc in p['prices']:
pr.pricecurrency_set.create(currency=pc['currency'],
price=pc['price'],
provider=pc['provider'],
method=pc['method'],
region=pc['region'])
```
#### File: market/tests/test_models.py
```python
import datetime
from decimal import Decimal
from django.utils import translation
import mock
from nose.tools import eq_, ok_
import amo
import amo.tests
from addons.models import Addon, AddonUser
from constants.payments import PROVIDER_BANGO
from market.models import AddonPremium, PreApprovalUser, Price, Refund
from mkt.constants import apps
from mkt.constants.regions import (ALL_REGION_IDS, BR, HU,
SPAIN, US, WORLDWIDE)
from stats.models import Contribution
from users.models import UserProfile
class TestPremium(amo.tests.TestCase):
fixtures = ['market/prices.json', 'base/addon_3615.json']
def setUp(self):
self.tier_one = Price.objects.get(pk=1)
self.addon = Addon.objects.get(pk=3615)
def test_is_complete(self):
ap = AddonPremium(addon=self.addon)
assert not ap.is_complete()
ap.price = self.tier_one
assert not ap.is_complete()
ap.addon.paypal_id = 'asd'
assert ap.is_complete()
class TestPrice(amo.tests.TestCase):
fixtures = ['market/prices.json']
def setUp(self):
self.tier_one = Price.objects.get(pk=1)
if hasattr(Price, '_currencies'):
del Price._currencies # needed to pick up fixtures.
def test_active(self):
eq_(Price.objects.count(), 2)
eq_(Price.objects.active().count(), 1)
def test_active_order(self):
Price.objects.create(name='USD', price='0.00')
Price.objects.create(name='USD', price='1.99')
eq_(list(Price.objects.active().values_list('price', flat=True)),
[Decimal('0.00'), Decimal('0.99'), Decimal('1.99')])
def test_method_default_all(self):
price = Price.objects.create(name='USD', price='0.00')
eq_(price.method, 2)
def test_method_specified(self):
price = Price.objects.create(name='USD', price='0.99', method=0)
eq_(price.method, 0)
def test_currency(self):
eq_(self.tier_one.pricecurrency_set.count(), 3)
def test_get(self):
eq_(Price.objects.get(pk=1).get_price(), Decimal('0.99'))
def test_get_tier(self):
translation.activate('en_CA')
eq_(Price.objects.get(pk=1).get_price(), Decimal('0.99'))
eq_(Price.objects.get(pk=1).get_price_locale(), u'US$0.99')
def test_get_tier_and_locale(self):
translation.activate('pt_BR')
eq_(Price.objects.get(pk=2).get_price(), Decimal('1.99'))
eq_(Price.objects.get(pk=2).get_price_locale(), u'US$1,99')
def test_no_region(self):
eq_(Price.objects.get(pk=2).get_price_locale(region=HU.id), None)
def test_fallback(self):
translation.activate('foo')
eq_(Price.objects.get(pk=1).get_price(), Decimal('0.99'))
eq_(Price.objects.get(pk=1).get_price_locale(), u'$0.99')
def test_transformer(self):
price = Price.objects.get(pk=1)
price.get_price_locale()
# Warm up Price._currencies.
with self.assertNumQueries(0):
eq_(price.get_price_locale(), u'$0.99')
def test_get_tier_price(self):
eq_(Price.objects.get(pk=2).get_price_locale(region=BR.id), 'R$1.01')
def test_get_free_tier_price(self):
price = self.make_price('0.00')
eq_(price.get_price_locale(region=US.id), '$0.00')
def test_euro_placement(self):
with self.activate('en-us'):
eq_(Price.objects.get(pk=2).get_price_locale(region=SPAIN.id),
u'\u20ac0.50')
with self.activate('es'):
eq_(Price.objects.get(pk=2).get_price_locale(region=SPAIN.id),
u'0,50\xa0\u20ac')
def test_prices(self):
currencies = Price.objects.get(pk=1).prices()
eq_(len(currencies), 2)
eq_(currencies[0]['currency'], 'PLN')
def test_wrong_currency(self):
bad = 4999
ok_(bad not in ALL_REGION_IDS)
ok_(not Price.objects.get(pk=1).get_price('foo', region=bad))
def test_prices_provider(self):
currencies = Price.objects.get(pk=1).prices(provider=PROVIDER_BANGO)
eq_(len(currencies), 2)
def test_region_ids_by_slug(self):
eq_(Price.objects.get(pk=2).region_ids_by_slug(),
(BR.id, SPAIN.id, WORLDWIDE.id))
class TestPriceCurrencyChanges(amo.tests.TestCase):
def setUp(self):
self.addon = amo.tests.addon_factory()
self.make_premium(self.addon)
self.currency = self.addon.premium.price.pricecurrency_set.all()[0]
@mock.patch('addons.tasks.index_objects')
def test_save(self, index_objects):
self.currency.save()
eq_(index_objects.call_args[0][0], [self.addon.pk])
@mock.patch('addons.tasks.index_objects')
def test_delete(self, index_objects):
self.currency.delete()
eq_(index_objects.call_args[0][0], [self.addon.pk])
class ContributionMixin(object):
def setUp(self):
self.addon = Addon.objects.get(pk=3615)
self.user = UserProfile.objects.get(pk=999)
def create(self, type):
return Contribution.objects.create(type=type, addon=self.addon,
user=self.user)
def purchased(self):
return (self.addon.addonpurchase_set
.filter(user=self.user, type=amo.CONTRIB_PURCHASE)
.exists())
def type(self):
return self.addon.addonpurchase_set.get(user=self.user).type
class TestContribution(ContributionMixin, amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/users']
def test_purchase(self):
self.create(amo.CONTRIB_PURCHASE)
assert self.purchased()
def test_refund(self):
self.create(amo.CONTRIB_REFUND)
assert not self.purchased()
def test_purchase_and_refund(self):
self.create(amo.CONTRIB_PURCHASE)
self.create(amo.CONTRIB_REFUND)
assert not self.purchased()
eq_(self.type(), amo.CONTRIB_REFUND)
def test_refund_and_purchase(self):
# This refund does nothing, there was nothing there to refund.
self.create(amo.CONTRIB_REFUND)
self.create(amo.CONTRIB_PURCHASE)
assert self.purchased()
eq_(self.type(), amo.CONTRIB_PURCHASE)
def test_really_cant_decide(self):
self.create(amo.CONTRIB_PURCHASE)
self.create(amo.CONTRIB_REFUND)
self.create(amo.CONTRIB_PURCHASE)
self.create(amo.CONTRIB_REFUND)
self.create(amo.CONTRIB_PURCHASE)
assert self.purchased()
eq_(self.type(), amo.CONTRIB_PURCHASE)
def test_purchase_and_chargeback(self):
self.create(amo.CONTRIB_PURCHASE)
self.create(amo.CONTRIB_CHARGEBACK)
assert not self.purchased()
eq_(self.type(), amo.CONTRIB_CHARGEBACK)
def test_other_user(self):
other = UserProfile.objects.get(email='<EMAIL>')
Contribution.objects.create(type=amo.CONTRIB_PURCHASE,
addon=self.addon, user=other)
self.create(amo.CONTRIB_PURCHASE)
self.create(amo.CONTRIB_REFUND)
eq_(self.addon.addonpurchase_set.filter(user=other).count(), 1)
def set_role(self, role):
AddonUser.objects.create(addon=self.addon, user=self.user, role=role)
self.create(amo.CONTRIB_PURCHASE)
installed = self.user.installed_set.filter(addon=self.addon)
eq_(installed.count(), 1)
eq_(installed[0].install_type, apps.INSTALL_TYPE_DEVELOPER)
def test_user_dev(self):
self.set_role(amo.AUTHOR_ROLE_DEV)
def test_user_owner(self):
self.set_role(amo.AUTHOR_ROLE_OWNER)
def test_user_installed_dev(self):
self.create(amo.CONTRIB_PURCHASE)
eq_(self.user.installed_set.filter(addon=self.addon).count(), 1)
def test_user_not_purchased(self):
self.addon.update(premium_type=amo.ADDON_PREMIUM)
eq_(list(self.user.purchase_ids()), [])
def test_user_purchased(self):
self.addon.update(premium_type=amo.ADDON_PREMIUM)
self.addon.addonpurchase_set.create(user=self.user)
eq_(list(self.user.purchase_ids()), [3615L])
def test_user_refunded(self):
self.addon.update(premium_type=amo.ADDON_PREMIUM)
self.addon.addonpurchase_set.create(user=self.user,
type=amo.CONTRIB_REFUND)
eq_(list(self.user.purchase_ids()), [])
def test_user_cache(self):
# Tests that the purchase_ids caches.
self.addon.update(premium_type=amo.ADDON_PREMIUM)
eq_(list(self.user.purchase_ids()), [])
self.create(amo.CONTRIB_PURCHASE)
eq_(list(self.user.purchase_ids()), [3615L])
# This caches.
eq_(list(self.user.purchase_ids()), [3615L])
self.create(amo.CONTRIB_REFUND)
eq_(list(self.user.purchase_ids()), [])
class TestRefundContribution(ContributionMixin, amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestRefundContribution, self).setUp()
self.contribution = self.create(amo.CONTRIB_PURCHASE)
def do_refund(self, expected, status, refund_reason=None,
rejection_reason=None):
"""Checks that a refund is enqueued and contains the correct values."""
self.contribution.enqueue_refund(status, self.user,
refund_reason=refund_reason,
rejection_reason=rejection_reason)
expected.update(contribution=self.contribution, status=status)
eq_(Refund.objects.count(), 1)
refund = Refund.objects.filter(**expected)
eq_(refund.exists(), True)
return refund[0]
def test_pending(self):
reason = 'this is bloody bullocks, mate'
expected = dict(refund_reason=reason,
requested__isnull=False,
approved=None,
declined=None)
refund = self.do_refund(expected, amo.REFUND_PENDING, reason)
self.assertCloseToNow(refund.requested)
def test_pending_to_approved(self):
reason = 'this is bloody bullocks, mate'
expected = dict(refund_reason=reason,
requested__isnull=False,
approved=None,
declined=None)
refund = self.do_refund(expected, amo.REFUND_PENDING, reason)
self.assertCloseToNow(refund.requested)
# Change `requested` date to some date in the past.
requested_date = refund.requested - datetime.timedelta(hours=1)
refund.requested = requested_date
refund.save()
expected = dict(refund_reason=reason,
requested__isnull=False,
approved__isnull=False,
declined=None)
refund = self.do_refund(expected, amo.REFUND_APPROVED)
eq_(refund.requested, requested_date,
'Expected date `requested` to remain unchanged.')
self.assertCloseToNow(refund.approved)
def test_approved_instant(self):
expected = dict(refund_reason='',
requested__isnull=False,
approved__isnull=False,
declined=None)
refund = self.do_refund(expected, amo.REFUND_APPROVED_INSTANT)
self.assertCloseToNow(refund.requested)
self.assertCloseToNow(refund.approved)
def test_pending_to_declined(self):
refund_reason = 'please, bro'
rejection_reason = 'sorry, brah'
expected = dict(refund_reason=refund_reason,
rejection_reason='',
requested__isnull=False,
approved=None,
declined=None)
refund = self.do_refund(expected, amo.REFUND_PENDING, refund_reason)
self.assertCloseToNow(refund.requested)
requested_date = refund.requested - datetime.timedelta(hours=1)
refund.requested = requested_date
refund.save()
expected = dict(refund_reason=refund_reason,
rejection_reason=rejection_reason,
requested__isnull=False,
approved=None,
declined__isnull=False)
refund = self.do_refund(expected, amo.REFUND_DECLINED,
rejection_reason=rejection_reason)
eq_(refund.requested, requested_date,
'Expected date `requested` to remain unchanged.')
self.assertCloseToNow(refund.declined)
class TestRefundManager(amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
self.addon = Addon.objects.get(id=3615)
self.user = UserProfile.objects.get(email='<EMAIL>')
self.expected = {}
for status in amo.REFUND_STATUSES.keys():
c = Contribution.objects.create(addon=self.addon, user=self.user,
type=amo.CONTRIB_PURCHASE)
self.expected[status] = Refund.objects.create(contribution=c,
status=status,
user=self.user)
def test_all(self):
eq_(sorted(Refund.objects.values_list('id', flat=True)),
sorted(e.id for e in self.expected.values()))
def test_pending(self):
eq_(list(Refund.objects.pending(self.addon)),
[self.expected[amo.REFUND_PENDING]])
def test_approved(self):
eq_(list(Refund.objects.approved(self.addon)),
[self.expected[amo.REFUND_APPROVED]])
def test_instant(self):
eq_(list(Refund.objects.instant(self.addon)),
[self.expected[amo.REFUND_APPROVED_INSTANT]])
def test_declined(self):
eq_(list(Refund.objects.declined(self.addon)),
[self.expected[amo.REFUND_DECLINED]])
def test_by_addon(self):
other = Addon.objects.create(type=amo.ADDON_WEBAPP)
c = Contribution.objects.create(addon=other, user=self.user,
type=amo.CONTRIB_PURCHASE)
ref = Refund.objects.create(contribution=c, status=amo.REFUND_DECLINED,
user=self.user)
declined = Refund.objects.filter(status=amo.REFUND_DECLINED)
eq_(sorted(r.id for r in declined),
sorted(r.id for r in [self.expected[amo.REFUND_DECLINED], ref]))
eq_(sorted(r.id for r in Refund.objects.by_addon(addon=self.addon)),
sorted(r.id for r in self.expected.values()))
eq_(list(Refund.objects.by_addon(addon=other)), [ref])
class TestUserPreApproval(amo.tests.TestCase):
fixtures = ['base/users']
def setUp(self):
self.user = UserProfile.objects.get(pk=999)
def test_get_preapproval(self):
eq_(self.user.get_preapproval(), None)
pre = PreApprovalUser.objects.create(user=self.user)
eq_(self.user.get_preapproval(), pre)
def test_has_key(self):
assert not self.user.has_preapproval_key()
pre = PreApprovalUser.objects.create(user=self.user, paypal_key='')
assert not self.user.has_preapproval_key()
pre.update(paypal_key='123')
assert UserProfile.objects.get(pk=self.user.pk).has_preapproval_key()
```
#### File: zamboni/lib/cef_loggers.py
```python
from django.conf import settings
from django.http import HttpRequest
from cef import log_cef as _log_cef
metlog = settings.METLOG
class CEFLogger:
"""Abstract base CEF logger.
Class attributes to set in a concrete class:
**sig_prefix**
Prefix to the CEF signature. Example: RECEIPT
**cs2label**
cs2label parameter. Example: ReceiptTransaction
**msg_prefix**
Prefix to all CEF log messages. Example: Receipt
**default_severity**
If set, this should be a 0-10 int.
"""
sig_prefix = ''
cs2label = None
msg_prefix = ''
default_severity = None
def log(self, environ, app, msg, longer, severity=None,
extra_kwargs=None):
"""Log something important using the CEF library.
Parameters:
**environ**
Typically a Django request object. It can also be
a plain dict.
**app**
An app/addon object.
**msg**
A short message about the incident.
**longer**
A more description message about the incident.
**severity=None**
A 0-10 int to override the default severity.
**extra_kwargs**
A dict to override anything sent to the CEF library.
"""
c = {'cef.product': getattr(settings, 'CEF_PRODUCT', 'AMO'),
'cef.vendor': getattr(settings, 'CEF_VENDOR', 'Mozilla'),
'cef.version': getattr(settings, 'CEF_VERSION', '0'),
'cef.device_version': getattr(settings,
'CEF_DEVICE_VERSION',
'0'),
'cef.file': getattr(settings, 'CEF_FILE', 'syslog'), }
user = getattr(environ, 'amo_user', None)
# Sometimes app is a string, eg: "unknown". Boo!
try:
app_str = app.pk
except AttributeError:
app_str = app
kwargs = {'username': getattr(user, 'name', ''),
'suid': str(getattr(user, 'pk', '')),
'signature': '%s%s' % (self.sig_prefix, msg.upper()),
'msg': longer, 'config': c,
# Until the CEF log can cope with unicode app names, just
# use primary keys.
'cs2': app_str, 'cs2Label': self.cs2label}
if extra_kwargs:
kwargs.update(extra_kwargs)
if not severity:
severity = self.default_severity
if not severity:
raise ValueError('CEF severity was not defined')
if isinstance(environ, HttpRequest):
environ = environ.META.copy()
if settings.USE_METLOG_FOR_CEF:
return metlog.cef('%s %s' % (self.msg_prefix, msg), severity,
environ, **kwargs)
else:
return _log_cef('%s %s' % (self.msg_prefix, msg),
severity, environ, **kwargs)
class ReceiptCEFLogger(CEFLogger):
sig_prefix = 'RECEIPT'
cs2label = 'ReceiptTransaction'
msg_prefix = 'Receipt'
default_severity = 5
receipt_cef = ReceiptCEFLogger()
class AppPayCEFLogger(CEFLogger):
"""
Anything to do with app payments.
"""
sig_prefix = 'APP_PAY'
cs2label = 'AppPayment'
msg_prefix = 'AppPayment'
default_severity = 5
app_pay_cef = AppPayCEFLogger()
```
#### File: es/tests/test_commands.py
```python
import os
import subprocess
import sys
import time
import mock
from nose.tools import eq_
from pyelasticsearch.exceptions import ElasticHttpNotFoundError
from django.conf import settings
from django.db import connection
import amo.search
import amo.tests
from addons.models import AddonCategory, Category
from amo.urlresolvers import reverse
from amo.utils import urlparams
from es.management.commands.reindex import (call_es, database_flagged,
unflag_database)
from es.management.commands.fixup_mkt_index import Command as FixupCommand
from mkt.site.fixtures import fixture
from mkt.webapps.models import Webapp, WebappIndexer
class TestIndexCommand(amo.tests.ESTestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
super(TestIndexCommand, self).setUp()
if database_flagged():
unflag_database()
self.url = reverse('search.search')
self.webapp = Webapp.objects.get(id=337141)
self.apps = [self.webapp]
self.cat = Category.objects.create(name='Games', type=amo.ADDON_WEBAPP)
AddonCategory.objects.create(addon=self.webapp, category=self.cat)
# Emit post-save signal so the app gets reindexed.
self.webapp.save()
self.refresh()
# XXX I have not find a better way for now
source = os.path.join(os.path.dirname(__file__), 'settings.tmpl')
self.settings = 'settings_%s' % os.urandom(5).encode('hex')
self.target = os.path.join(settings.ROOT, self.settings + '.py')
self.target_pyc = self.target + 'c'
with open(source) as f:
data = {'DB': settings.DATABASES['default']['NAME']}
with open(self.target, 'w') as target:
target.write(f.read() % data)
# any index created during the test will be deleted
self.indices = call_es('_status').json()['indices'].keys()
def tearDown(self):
for file_ in (self.target, self.target_pyc):
if os.path.exists(file_):
os.remove(file_)
current_indices = call_es('_status').json()['indices'].keys()
for index in current_indices:
if index not in self.indices:
call_es(index, method='DELETE')
def check_results(self, params, expected, sorted=True):
r = self.client.get(urlparams(self.url, **params), follow=True)
eq_(r.status_code, 200, str(r.content))
got = self.get_results(r)
if sorted:
got.sort()
expected.sort()
eq_(got, expected,
'Got: %s. Expected: %s. Parameters: %s' % (got, expected, params))
return r
def get_results(self, r, sort=False):
"""Return pks of add-ons shown on search results page."""
pager = r.context['pager']
results = []
for page_num in range(pager.paginator.num_pages):
results.extend([item.pk for item
in pager.paginator.page(page_num + 1)])
if sort:
results = sorted(results)
return results
def _create_app(self, name='app', signal=True):
webapp = Webapp.objects.create(status=amo.STATUS_PUBLIC,
name=name,
type=amo.ADDON_WEBAPP)
AddonCategory.objects.create(addon=webapp, category=self.cat)
webapp.save(_signal=signal)
return webapp
def test_reindexation(self):
# adding a web app
webapp2 = self._create_app('neat app 2')
self.refresh()
# this search should return both apps
r = self.check_results({'sort': 'popularity'},
[webapp2.pk, self.webapp.pk])
# adding 5 more apps
webapps = [self._create_app('moarneatapp %d' % i)
for i in range(5)]
self.refresh()
# XXX is there a cleaner way ?
# all I want is to have those webapp in the DB
# so the reindex command sees them
connection._commit()
connection.clean_savepoints()
# right now, the DB should be composed of
# two indexes, and two aliases, let's check
# we have two aliases
aliases = call_es('_aliases').json()
old_aliases = [(index, aliases['aliases'].keys()[0])
for index, aliases in aliases.items()
if len(aliases['aliases']) > 0 and
index.startswith('test')]
old_aliases.sort()
# now doing a reindexation in a background process
args = [sys.executable, 'manage.py', 'reindex', '--prefix=test_',
'--settings=%s' % self.settings]
indexer = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=settings.ROOT)
try:
# we should be able to continue some searching in the foreground
# and always get our documents
#
# we should also be able to index new documents, and
# they should not be lost
count = 1
wanted = [app.pk for app in webapps] + [webapp2.pk, self.webapp.pk]
# let's add more apps, and also do some searches
while indexer.poll() is None and count < 8:
r = self.client.get(urlparams(self.url, sort='popularity'),
follow=True)
eq_(r.status_code, 200, str(r.content))
got = self.get_results(r)
got.sort()
self.assertEqual(len(got), len(wanted), (got, wanted))
wanted.append(self._create_app('moar %d' % count).pk)
self.refresh()
connection._commit()
connection.clean_savepoints()
count += 1
time.sleep(.1)
if count < 3:
raise AssertionError("Could not index enough objects for the "
"test to be meaningful.")
except Exception:
indexer.terminate()
raise
stdout, stderr = indexer.communicate()
self.assertTrue('Reindexation done' in stdout, stdout + '\n' + stderr)
amo.search.get_es().refresh()
# the reindexation is done, let's double check we have all our docs
self.check_results({'sort': 'popularity'}, wanted)
# let's check the aliases as well, we should have 2
aliases = call_es('_aliases').json()
new_aliases = [(index, aliases['aliases'].keys()[0])
for index, aliases in aliases.items()
if len(aliases['aliases']) > 0 and
index.startswith('test')]
new_aliases.sort()
self.assertTrue(len(new_aliases), 2)
# and they should be new aliases
self.assertNotEqual(new_aliases, old_aliases)
def test_remove_index(self):
# Putting a test_amo index in the way.
es = amo.search.get_es()
for index in es.get_indices().keys():
for prefix in ('test_amo', 'test_amo_stats'):
if index.startswith(prefix + '-'):
es.delete_alias(prefix, [index])
es.delete_index(index)
es.create_index(prefix)
# reindexing the first app
self.webapp.save()
self.refresh()
# now doing a reindexation in a background process
args = [sys.executable, 'manage.py', 'reindex', '--prefix=test_',
'--settings=%s' % self.settings]
indexer = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=settings.ROOT)
stdout, stderr = indexer.communicate()
class TestFixupCommand(amo.tests.ESTestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
super(TestFixupCommand, self).setUp()
self.index = WebappIndexer.get_index()
self.doctype = WebappIndexer.get_mapping_type_name()
self.es = WebappIndexer.get_es()
self.app = Webapp.objects.get(pk=337141)
def test_missing(self):
try:
self.es.delete(self.index, self.doctype, self.app.id)
except ElasticHttpNotFoundError:
pass # Already not in the index.
FixupCommand().handle()
self.es.refresh(self.index)
# If not there this will throw `ElasticHttpNotFoundError`.
self.es.get(self.index, self.doctype, self.app.id, fields='id')
def test_missing_no_deleted(self):
self.app.update(status=amo.STATUS_DELETED)
try:
self.es.delete(self.index, self.doctype, self.app.id)
except ElasticHttpNotFoundError:
pass # Already not in the index.
FixupCommand().handle()
self.es.refresh(self.index)
with self.assertRaises(ElasticHttpNotFoundError):
self.es.get(self.index, self.doctype, self.app.id, fields='id')
```
#### File: mkt/account/api.py
```python
from mkt.api.authentication import (OAuthAuthentication,
SharedSecretAuthentication)
from mkt.api.authorization import OwnerAuthorization
from mkt.api.resources import AppResource
from mkt.constants.apps import INSTALL_TYPE_USER
from mkt.webapps.models import Webapp
class InstalledResource(AppResource):
class Meta(AppResource.Meta):
authentication = (SharedSecretAuthentication(), OAuthAuthentication())
authorization = OwnerAuthorization()
detail_allowed_methods = []
list_allowed_methods = ['get']
resource_name = 'installed/mine'
slug_lookup = None
def obj_get_list(self, request=None, **kwargs):
return Webapp.objects.no_cache().filter(
installed__user=request.amo_user,
installed__install_type=INSTALL_TYPE_USER)
```
#### File: mkt/api/base.py
```python
import json
import logging
import sys
import traceback
from collections import defaultdict
from django.conf import settings
from django.core.urlresolvers import reverse
from django.conf.urls.defaults import url
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.db.models.sql import EmptyResultSet
from django.http import HttpResponseNotFound
import commonware.log
from rest_framework.mixins import ListModelMixin
from rest_framework.routers import Route, SimpleRouter
from rest_framework.relations import HyperlinkedRelatedField
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from tastypie import fields, http
from tastypie.bundle import Bundle
from tastypie.exceptions import (ImmediateHttpResponse, NotFound,
UnsupportedFormat)
from tastypie.fields import ToOneField
from tastypie.http import HttpConflict
from tastypie.resources import ModelResource, Resource
from access import acl
from translations.fields import PurifiedField, TranslatedField
from .exceptions import AlreadyPurchased, DeserializationError
from .http import HttpTooManyRequests
from .serializers import Serializer
log = commonware.log.getLogger('z.api')
tasty_log = logging.getLogger('django.request.tastypie')
def list_url(name, **kw):
kw['resource_name'] = name
return ('api_dispatch_list', kw)
def get_url(name, pk, **kw):
kw.update({'resource_name': name, 'pk': pk})
return ('api_dispatch_detail', kw)
def http_error(errorclass, reason, extra_data=None):
response = errorclass()
data = {'reason': reason}
if extra_data:
data.update(extra_data)
response.content = json.dumps(data)
return ImmediateHttpResponse(response)
def handle_500(resource, request, exception):
response_class = http.HttpApplicationError
if isinstance(exception, (NotFound, ObjectDoesNotExist)):
response_class = HttpResponseNotFound
# Print some nice 500 errors back to the clients if not in debug mode.
exc_info = sys.exc_info()
tasty_log.error('%s: %s %s\n%s' % (request.path,
exception.__class__.__name__,
exception,
traceback.format_tb(exc_info[2])),
extra={'status_code': 500, 'request': request},
exc_info=exc_info)
data = {
'error_message': str(exception),
'error_code': getattr(exception, 'id',
exception.__class__.__name__),
'error_data': getattr(exception, 'data', {})
}
serialized = resource.serialize(request, data, 'application/json')
return response_class(content=serialized,
content_type='application/json; charset=utf-8')
class Marketplace(object):
"""
A mixin with some general Marketplace stuff.
"""
class Meta(object):
serializer = Serializer()
def _handle_500(self, request, exception):
return handle_500(self, request, exception)
def dispatch(self, request_type, request, **kwargs):
# OAuth authentication uses the method in the signature. So we need
# to store the original method used to sign the request.
request.signed_method = request.method
if 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:
request.method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE']
log.info('Request: %s' % request.META.get('PATH_INFO'))
ct = request.META.get('CONTENT_TYPE')
try:
return (super(Marketplace, self)
.dispatch(request_type, request, **kwargs))
except DeserializationError:
if ct:
error = "Unable to deserialize request body as '%s'" % ct
else:
error = 'Content-Type header required'
raise self.non_form_errors((('__all__', error),),)
except UnsupportedFormat:
msgs = []
if ct not in self._meta.serializer.supported_formats:
msgs.append(('__all__',
"Unsupported Content-Type header '%s'" % ct))
accept = request.META.get('HTTP_ACCEPT')
if accept and accept != 'application/json':
msgs.append(('__all__',
"Unsupported Accept header '%s'" % accept))
raise self.non_form_errors(msgs)
except PermissionDenied:
# Reraise PermissionDenied as 403, otherwise you get 500.
raise http_error(http.HttpForbidden, 'Permission denied.')
except AlreadyPurchased:
raise http_error(HttpConflict, 'Already purchased app.')
def non_form_errors(self, error_list):
"""
Raises passed field errors as an immediate HttpBadRequest response.
Similar to Marketplace.form_errors, except that it allows you to raise
form field errors outside of form validation.
Accepts a list of two-tuples, consisting of a field name and error
message.
Example usage:
errors = []
if 'app' in bundle.data:
errors.append(('app', 'Cannot update the app of a rating.'))
if 'user' in bundle.data:
errors.append(('user', 'Cannot update the author of a rating.'))
if errors:
raise self.non_form_errors(errors)
"""
errors = defaultdict(list)
for e in error_list:
errors[e[0]].append(e[1])
response = http.HttpBadRequest(json.dumps({'error_message': errors}),
content_type='application/json')
return ImmediateHttpResponse(response=response)
def form_errors(self, forms):
errors = {}
if not isinstance(forms, list):
forms = [forms]
for f in forms:
# If we've got form objects, get the error object off it.
# Otherwise assume we've just been passed a form object.
form_errors = getattr(f, 'errors', f)
if isinstance(form_errors, list): # Cope with formsets.
for e in form_errors:
errors.update(e)
continue
errors.update(dict(form_errors.items()))
response = http.HttpBadRequest(json.dumps({'error_message': errors}),
content_type='application/json')
return ImmediateHttpResponse(response=response)
def _auths(self):
auths = self._meta.authentication
if not isinstance(auths, (list, tuple)):
auths = [self._meta.authentication]
return auths
def is_authenticated(self, request):
"""
An override of the tastypie Authentication to accept an iterator
of Authentication methods. If so it will go through in order, when one
passes, it will use that.
Any authentication method can still return a HttpResponse to break out
of the loop if they desire.
"""
for auth in self._auths():
log.info('Trying authentication with %s' % auth.__class__.__name__)
auth_result = auth.is_authenticated(request)
if isinstance(auth_result, http.HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if auth_result:
log.info('Logged in using %s' % auth.__class__.__name__)
return
raise http_error(http.HttpUnauthorized, 'Authentication required.')
def get_throttle_identifiers(self, request):
return set(a.get_identifier(request) for a in self._auths())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
# Never throttle users with Apps:APIUnthrottled or "safe" requests.
if (not settings.API_THROTTLE or
request.method in ('GET', 'HEAD', 'OPTIONS') or
acl.action_allowed(request, 'Apps', 'APIUnthrottled')):
return
identifiers = self.get_throttle_identifiers(request)
# Check to see if they should be throttled.
if any(self._meta.throttle.should_be_throttled(identifier)
for identifier in identifiers):
# Throttle limit exceeded.
raise http_error(HttpTooManyRequests,
'Throttle limit exceeded.')
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
identifiers = self.get_throttle_identifiers(request)
for identifier in identifiers:
self._meta.throttle.accessed(identifier,
url=request.get_full_path(),
request_method=request_method)
def cached_obj_get_list(self, request=None, **kwargs):
"""Do not interfere with cache machine caching."""
return self.obj_get_list(request=request, **kwargs)
def cached_obj_get(self, request=None, **kwargs):
"""Do not interfere with cache machine caching."""
return self.obj_get(request, **kwargs)
def is_valid(self, bundle, request=None):
"""A simple wrapper to return form errors in the format we want."""
errors = self._meta.validation.is_valid(bundle, request)
if errors:
raise self.form_errors(errors)
def dehydrate_objects(self, objects, request=None):
"""
Dehydrates each object using the full_dehydrate and then
returns the data for each object. This is useful for compound
results that return sub objects data. If you need request in the
dehydration, pass that through (eg: accessing region)
"""
return [self.full_dehydrate(Bundle(obj=o, request=request)).data
for o in objects]
class MarketplaceResource(Marketplace, Resource):
"""
Use this if you would like to expose something that is *not* a Django
model as an API.
"""
def get_resource_uri(self, *args, **kw):
return ''
class MarketplaceModelResource(Marketplace, ModelResource):
"""Use this if you would like to expose a Django model as an API."""
def get_resource_uri(self, bundle_or_obj):
# Fix until my pull request gets pulled into tastypie.
# https://github.com/toastdriven/django-tastypie/pull/490
kwargs = {
'resource_name': self._meta.resource_name,
}
if isinstance(bundle_or_obj, Bundle):
kwargs['pk'] = bundle_or_obj.obj.pk
else:
kwargs['pk'] = bundle_or_obj.pk
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
return self._build_reverse_url("api_dispatch_detail", kwargs=kwargs)
@classmethod
def should_skip_field(cls, field):
# We don't want to skip translated fields.
if isinstance(field, (PurifiedField, TranslatedField)):
return False
return True if getattr(field, 'rel') else False
def get_object_or_404(self, cls, **filters):
"""
A wrapper around our more familiar get_object_or_404, for when we need
to get access to an object that isn't covered by get_obj.
"""
if not filters:
raise http_error(http.HttpNotFound, 'Not found.')
try:
return cls.objects.get(**filters)
except (cls.DoesNotExist, cls.MultipleObjectsReturned):
raise http_error(http.HttpNotFound, 'Not found.')
def get_by_resource_or_404(self, request, **kwargs):
"""
A wrapper around the obj_get to just get the object.
"""
try:
obj = self.obj_get(request, **kwargs)
except ObjectDoesNotExist:
raise http_error(http.HttpNotFound, 'Not found.')
return obj
def base_urls(self):
"""
If `slug_lookup` is specified on the Meta of a resource, add
in an extra resource that allows lookup by that slug field. This
assumes that the slug won't be all numbers. If the slug is numeric, it
will hit the pk URL pattern and chaos will ensue.
"""
if not getattr(self._meta, 'slug_lookup', None):
return super(MarketplaceModelResource, self).base_urls()
return super(MarketplaceModelResource, self).base_urls()[:3] + [
url(r'^(?P<resource_name>%s)/(?P<pk>\d+)/$' %
self._meta.resource_name,
self.wrap_view('dispatch_detail'),
name='api_dispatch_detail'),
url(r"^(?P<resource_name>%s)/(?P<%s>[^/<>\"']+)/$" %
(self._meta.resource_name, self._meta.slug_lookup),
self.wrap_view('dispatch_detail'),
name='api_dispatch_detail')
]
class GenericObject(dict):
"""
tastypie-friendly subclass of dict that allows direct attribute assignment
of dict items. Best used as `object_class` when not using a `ModelResource`
subclass.
"""
def __getattr__(self, name):
try:
return self.__getitem__(name)
except KeyError:
return None
def __setattr__(self, name, value):
self.__setitem__(name, value)
class CORSResource(object):
"""
A mixin to provide CORS support to your API.
"""
def method_check(self, request, allowed=None):
"""
This is the first entry point from dispatch and a place to check CORS.
It will set a value on the request for the middleware to pick up on
the response and add in the headers, so that any immediate http
responses (which are usually errors) get the headers.
Optionally, you can specify the methods that will be specifying the
`cors_allowed` attribute on the resource meta. Otherwise, it will use
the combination of allowed_methods specified on the resource.
"""
request.CORS = getattr(self._meta, 'cors_allowed', None) or allowed
return super(CORSResource, self).method_check(request, allowed=allowed)
class PotatoCaptchaResource(object):
"""
A mixin adding the fields required by PotatoCaptcha to the resource.
"""
tuber = fields.CharField(attribute='tuber')
sprout = fields.CharField(attribute='sprout')
def remove_potato(self, bundle):
for field in ['tuber', 'sprout']:
if field in bundle.data:
del bundle.data[field]
return bundle
def alter_detail_data_to_serialize(self, request, data):
"""
Remove `sprout` from bundle data before returning serialized object to
the consumer.
"""
sup = super(PotatoCaptchaResource, self)
bundle = sup.alter_detail_data_to_serialize(request, data)
return self.remove_potato(bundle)
def check_potatocaptcha(data):
if data.get('tuber', False):
return Response(json.dumps({'tuber': 'Invalid value'}), 400)
if data.get('sprout', None) != 'potato':
return Response(json.dumps({'sprout': 'Invalid value'}), 400)
class CompatRelatedField(HyperlinkedRelatedField):
"""
Upsell field for connecting Tastypie resources to
django-rest-framework instances, this got complicated.
"""
def __init__(self, *args, **kwargs):
self.tastypie = kwargs.pop('tastypie')
return super(CompatRelatedField, self).__init__(*args, **kwargs)
def to_native(self, obj):
if getattr(obj, 'pk', None) is None:
return
self.tastypie['pk'] = obj.pk
return reverse('api_dispatch_detail', kwargs=self.tastypie)
def get_object(self, queryset, view_name, view_args, view_kwargs):
return queryset.get(pk=view_kwargs['pk'])
class CompatToOneField(ToOneField):
"""
Tastypie field to relate a resource to a django-rest-framework view.
"""
def __init__(self, *args, **kwargs):
self.url_name = kwargs.pop('url_name', None)
self.extra_fields = kwargs.pop('extra_fields', None)
return super(CompatToOneField, self).__init__(*args, **kwargs)
def dehydrate_related(self, bundle, related_resource):
uri = reverse(self.url_name, kwargs={'pk': bundle.obj.pk})
if self.full:
raise NotImplementedError
elif self.extra_fields:
result = {'resource_uri': uri}
for field in self.extra_fields:
result[field] = getattr(bundle.obj, field)
return result
else:
return uri
def get_related_resource(self, related_instance):
return
class AppRouter(SimpleRouter):
routes = [
# List route.
Route(
url=r'^{lookup}/{prefix}/$',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
# Detail route.
Route(
url=r'^{lookup}/{prefix}/$',
mapping={
'get': 'retrieve',
'put': 'update',
'post': 'detail_post',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
)
]
class SlugRouter(SimpleRouter):
def get_urls(self):
"""
Use the registered viewsets to generate a list of URL patterns.
We can't use the superclass' implementation of get_urls since
we want slug and pk urls for some resources, and it assumes
one url per resource.
"""
ret = []
for prefix, viewset, basename in self.registry:
routes = self.get_routes(viewset)
for route in routes:
# Only actions which actually exist on the viewset will be
# bound.
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
# Build the url pattern
if route.name.endswith('detail'):
slug_field = getattr(viewset, 'slug_lookup', None)
ret.append(self.create_url(prefix, viewset, basename,
route, mapping, '(?P<pk>\d+)'))
if slug_field:
ret.append(self.create_url(
prefix, viewset, basename, route, mapping,
'(?P<%s>[^/<>"\']+)' % (slug_field,)))
else:
ret.append(self.create_url(prefix, viewset, basename,
route, mapping))
return ret
def create_url(self, prefix, viewset, basename, route, mapping, lookup=''):
regex = route.url.format(prefix=prefix, lookup=lookup,
trailing_slash=self.trailing_slash)
view = viewset.as_view(mapping, **route.initkwargs)
name = route.name.format(basename=basename)
return url(regex, view, name=name)
class CORSMixin(object):
"""
Mixin to enable CORS for DRF API.
"""
def finalize_response(self, request, response, *args, **kwargs):
request._request.CORS = self.cors_allowed_methods
return super(CORSMixin, self).finalize_response(
request, response, *args, **kwargs)
class SlugOrIdMixin(object):
"""
Because the `SlugRouter` is overkill. If the name of your
`slug` is called something else, override `self.slug_field`.
"""
def get_object(self, queryset=None):
if 'pk' in self.kwargs and not self.kwargs['pk'].isdigit():
# If the `pk` contains anything other than a digit, it's a `slug`.
self.kwargs.update(pk=None, slug=self.kwargs['pk'])
return super(SlugOrIdMixin, self).get_object(queryset=queryset)
class SilentListModelMixin(ListModelMixin):
"""
DRF's ListModelMixin that returns a 204_NO_CONTENT rather than flipping a
500 or 404.
"""
def list(self, *args, **kwargs):
try:
res = super(SilentListModelMixin, self).list(*args, **kwargs)
except EmptyResultSet:
return Response([])
if res.status_code == 404:
return Response([])
return res
class AppViewSet(GenericViewSet):
def initialize_request(self, request, *args, **kwargs):
"""
Pass the value in the URL through to the form defined on the
ViewSet, which will populate the app property with the app object.
You must define a form which will take an app object.
"""
request = (super(AppViewSet, self)
.initialize_request(request, *args, **kwargs))
self.app = None
form = self.form({'app': kwargs.get('pk')})
if form.is_valid():
self.app = form.cleaned_data['app']
return request
```
#### File: mkt/api/fields.py
```python
from rest_framework import fields
from amo.utils import to_language
class TranslationSerializerField(fields.WritableField):
"""
Django-rest-framework custom serializer field for our TranslatedFields.
- When deserializing, in `from_native`, it accepts both a string or a
dictionary. If a string is given, it'll be considered to be in the
default language.
- When serializing, its behavior depends on the parent's serializer context:
If a request was included, and its method is 'GET', and a 'lang' parameter
was passed, then only returns one translation (letting the TranslatedField
figure out automatically which language to use).
Else, just returns a dict with all translations for the given `field_name`
on `obj`, with languages as the keys.
"""
def __init__(self, *args, **kwargs):
super(TranslationSerializerField, self).__init__(*args, **kwargs)
# Default to return all translations for each field.
self.return_all_translations = True
def initialize(self, parent, field_name):
super(TranslationSerializerField, self).initialize(parent, field_name)
request = self.context.get('request', None)
if request and request.method == 'GET' and 'lang' in request.GET:
# A specific language was requested, we only return one translation
# per field.
self.return_all_translations = False
def field_to_native(self, obj, field_name):
field = getattr(obj, field_name)
if not self.return_all_translations:
return unicode(field)
else:
translations = field.__class__.objects.filter(id=field.id,
localized_string__isnull=False)
return dict((to_language(trans.locale), unicode(trans))
for trans in translations)
def from_native(self, data):
if isinstance(data, basestring):
return data.strip()
elif isinstance(data, dict):
for key, value in data.items():
data[key] = value.strip()
return data
data = super(TranslationSerializerField, self).from_native(data)
return unicode(data)
```
#### File: mkt/api/renderers.py
```python
import json
from django.http.multipartparser import parse_header
from rest_framework.renderers import JSONRenderer
class SuccinctJSONRenderer(JSONRenderer):
"""
JSONRenderer subclass that strips spaces from the output.
"""
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
indent = renderer_context.get('indent', None)
# Pass to the superclass if the Accept header is set with an explicit
# indent, if an indent level is manually passed, or if you're attempting
# to render `None`.
if accepted_media_type:
base_media_type, params = parse_header(
accepted_media_type.encode('ascii'))
indent = params.get('indent', indent)
if data is None or indent:
return super(SuccinctJSONRenderer, self).render(data,
accepted_media_type,
renderer_context)
return json.dumps(data, cls=self.encoder_class, indent=indent,
ensure_ascii=self.ensure_ascii, separators=(',', ':'))
```
#### File: api/tests/test_throttle.py
```python
from django.test.client import RequestFactory
from mock import patch
from tastypie.exceptions import ImmediateHttpResponse
from mkt.api.base import HttpTooManyRequests, MarketplaceResource
from mkt.api.tests.test_oauth import BaseOAuth
class ThrottleTests(object):
"""
Mixin to add tests that ensure API endpoints are being appropriately
throttled.
Note: subclasses will need to define the resource being tested.
"""
resource = None
request = RequestFactory().post('/')
def test_should_throttle(self):
if not self.resource:
return
with patch.object(self.resource._meta, 'throttle') as throttle:
throttle.should_be_throttled.return_value = True
with self.assertImmediate(HttpTooManyRequests):
self.resource.throttle_check(self.request)
def test_shouldnt_throttle(self):
with patch.object(self, 'resource') as resource:
resource._meta.throttle.should_be_throttled.return_value = False
try:
self.resource.throttle_check(self.request)
except ImmediateHttpResponse:
self.fail('Unthrottled request raises ImmediateHttpResponse')
class TestThrottle(ThrottleTests, BaseOAuth):
resource = MarketplaceResource()
```
#### File: mkt/constants/ratingsbodies.py
```python
from tower import ugettext_lazy as _lazy
DESC_GENERAL = _lazy(u'General Audiences')
DESC_3 = _lazy(u'Not recommended for users younger than 3 years of age')
DESC_6 = _lazy(u'Not recommended for users younger than 6 years of age')
DESC_7 = _lazy(u'Not recommended for users younger than 7 years of age')
DESC_10 = _lazy(u'Not recommended for users younger than 10 years of age')
DESC_12 = _lazy(u'Not recommended for users younger than 12 years of age')
DESC_13 = _lazy(u'Not recommended for users younger than 13 years of age')
DESC_14 = _lazy(u'Not recommended for users younger than 14 years of age')
DESC_16 = _lazy(u'Not recommended for users younger than 16 years of age')
DESC_17 = _lazy(u'Not recommended for users younger than 17 years of age')
DESC_18 = _lazy(u'Not recommended for users younger than 18 years of age')
DESC_REJECTED = _lazy(u'Rejected for All Audiences')
RATING_DESCS = {
'0': DESC_GENERAL,
'3': DESC_3,
'6': DESC_6,
'7': DESC_7,
'10': DESC_10,
'12': DESC_12,
'13': DESC_13,
'14': DESC_14,
'16': DESC_16,
'17': DESC_17,
'18': DESC_18,
'X': DESC_REJECTED,
}
class RATING(object):
"""
Content rating.
name -- how we name the rating, for translated display on all pages.
description -- for general translated display on consumer pages.
iarc_name -- how IARC names the rating, to talk with IARC.
slug -- for CSS classes, to create icons. Dynamic. generated for most.
"""
class RATING_BODY(object):
"""
Content rating body.
name -- for general translated display on all pages.
description -- for general translated display on all pages.
iarc_name -- how IARC names the ratings body, to talk with IARC.
ratings -- list of RATINGs associated with this body.
full_name -- in case we ever want to display the full translated name.
url -- in case we ever want to link to the ratings body page for more info.
"""
class CLASSIND_L(RATING):
id = 0
name = '0+'
description = RATING_DESCS['0']
iarc_name = '0+'
class CLASSIND_10(RATING):
id = 1
name = '10+'
description = RATING_DESCS['10']
iarc_name = '10+'
class CLASSIND_12(RATING):
id = 2
name = '12+'
description = RATING_DESCS['12']
iarc_name = '12+'
class CLASSIND_14(RATING):
id = 3
name = '14+'
description = RATING_DESCS['14']
iarc_name = '14+'
class CLASSIND_16(RATING):
id = 4
name = '16+'
description = RATING_DESCS['16']
iarc_name = '16+'
class CLASSIND_18(RATING):
id = 5
name = '18+'
description = RATING_DESCS['18']
iarc_name = '18+'
class CLASSIND(RATING_BODY):
"""
The Brazilian game ratings body (aka. DEJUS, DJCTQ).
"""
id = 0
name = 'CLASSIND'
description = _lazy(u'Brazil')
iarc_name = 'CLASSIND'
ratings = (CLASSIND_L, CLASSIND_10, CLASSIND_12, CLASSIND_14, CLASSIND_16,
CLASSIND_18)
full_name = _lazy(u'Department of Justice, Rating, Titles and '
u'Qualification')
url = ('http://portal.mj.gov.br/classificacao/data/Pages/'
'MJ6BC270E8PTBRNN.htm')
class GENERIC_3(RATING):
id = 0
name = '3+'
description = RATING_DESCS['3']
iarc_name = '3+'
class GENERIC_7(RATING):
id = 1
name = '7+'
description = RATING_DESCS['7']
iarc_name = '7+'
class GENERIC_12(RATING):
id = 2
name = '12+'
description = RATING_DESCS['12']
iarc_name = '12+'
class GENERIC_16(RATING):
id = 3
name = '16+'
description = RATING_DESCS['16']
iarc_name = '16+'
class GENERIC_18(RATING):
id = 4
name = '18+'
description = RATING_DESCS['18']
iarc_name = '18+'
class GENERIC(RATING_BODY):
"""
The generic game ratings body (used in Germany, for example).
"""
id = 1
name = _lazy('Generic')
description = '' # No comment.
iarc_name = 'Generic'
ratings = (GENERIC_3, GENERIC_7, GENERIC_12, GENERIC_16, GENERIC_18)
full_name = _lazy(u'Generic')
class USK_0(RATING):
id = 0
name = '0+'
description = RATING_DESCS['0']
iarc_name = '0+'
class USK_6(RATING):
id = 1
name = '6+'
description = RATING_DESCS['6']
iarc_name = '6+'
class USK_12(RATING):
id = 2
name = '12+'
description = RATING_DESCS['12']
iarc_name = '12+'
class USK_16(RATING):
id = 3
name = '16+'
description = RATING_DESCS['16']
iarc_name = '16+'
class USK_18(RATING):
id = 4
name = '18+'
description = RATING_DESCS['18']
iarc_name = '18+'
class USK_REJECTED(RATING):
id = 5
name = _lazy('Rating Rejected')
description = RATING_DESCS['X']
iarc_name = 'Rating Rejected'
class USK(RATING_BODY):
"""
The organization responsible for game ratings in Germany
(aka. Unterhaltungssoftware Selbstkontrolle).
"""
id = 2
name = 'USK'
description = _lazy(u'Germany')
iarc_name = 'USK'
ratings = (USK_0, USK_6, USK_12, USK_16, USK_18, USK_REJECTED)
full_name = _lazy(u'Entertainment Software Self-Regulation Body')
url = 'http://www.usk.de/en/'
class ESRB_E(RATING):
"""Everybody."""
id = 0
name = _lazy('Everyone')
description = RATING_DESCS['0']
iarc_name = 'Everyone'
slug = '0'
class ESRB_10(RATING):
id = 1
name = _lazy('Everyone 10+') # L10n: `10+` is age ten and over.
slug = '10'
description = RATING_DESCS['10']
iarc_name = 'Everyone 10+'
class ESRB_T(RATING):
id = 2
name = _lazy('Teen')
slug = '13'
description = RATING_DESCS['13']
iarc_name = 'Teen'
class ESRB_M(RATING):
id = 3
name = _lazy('Mature 17+') # L10n: `17+` is age seventeen and over.
slug = '17'
description = RATING_DESCS['17']
iarc_name = 'Mature 17+'
class ESRB_A(RATING):
id = 4
name = _lazy('Adults Only 18+') # L10n: `18+` is age eighteen and over.
slug = '18'
description = RATING_DESCS['18']
iarc_name = 'Adults Only'
class ESRB_RP(RATING):
id = 4
name = _lazy('Rating Pending')
slug = 'pending'
description = RATING_DESCS['18']
iarc_name = 'Rating Pending'
class ESRB(RATING_BODY):
"""
The North American game ratings body (i.e. USA, Canada).
"""
id = 3
name = 'ESRB'
description = _lazy(u'N. America') # L10n: `N.` stands for North.
iarc_name = 'ESRB'
ratings = (ESRB_E, ESRB_10, ESRB_T, ESRB_M, ESRB_A)
full_name = _lazy(u'Entertainment Software Rating Board')
url = 'http://esrb.org'
class PEGI_3(RATING):
id = 0
name = '3+'
description = RATING_DESCS['3']
iarc_name = '3+'
class PEGI_7(RATING):
id = 1
name = '7+'
description = RATING_DESCS['7']
iarc_name = '7+'
class PEGI_12(RATING):
id = 2
name = '12+'
description = RATING_DESCS['12']
iarc_name = '12+'
class PEGI_16(RATING):
id = 3
name = '16+'
description = RATING_DESCS['16']
iarc_name = '16+'
class PEGI_18(RATING):
id = 4
name = '18+'
description = RATING_DESCS['18']
iarc_name = '18+'
class PEGI(RATING_BODY):
"""
The European game ratings body (i.e. UK, Poland, Spain).
"""
id = 4
name = 'PEGI'
description = _lazy(u'Europe')
iarc_name = 'PEGI'
ratings = (PEGI_3, PEGI_7, PEGI_12, PEGI_16, PEGI_18)
full_name = _lazy(u'Pan European Game Information')
url = 'http://www.pegi.info'
RATINGS_BODIES = {
CLASSIND.id: CLASSIND,
GENERIC.id: GENERIC,
USK.id: USK,
ESRB.id: ESRB,
PEGI.id: PEGI,
}
# Attach ratings bodies to ratings.
for rb in RATINGS_BODIES.values():
for r in rb.ratings:
r.ratingsbody = rb
def ALL_RATINGS():
"""
List of all ratings with waffled bodies.
"""
import waffle
ALL_RATINGS = []
for rb in RATINGS_BODIES.values():
if rb in (CLASSIND, GENERIC) or waffle.switch_is_active('iarc'):
ALL_RATINGS.extend(rb.ratings)
return ALL_RATINGS
def RATINGS_BY_NAME():
"""
Create a list of tuples (choices) after we know the locale since this
attempts to concatenate two lazy translations in constants file.
"""
import waffle
all_ratings = ALL_RATINGS()
ratings_choices = []
for rb in RATINGS_BODIES.values():
if rb in (CLASSIND, GENERIC) or waffle.switch_is_active('iarc'):
for r in rb.ratings:
ratings_choices.append(
(all_ratings.index(r), u'%s - %s' % (rb.name, r.name)))
return ratings_choices
```
#### File: developers/tests/test_models.py
```python
from datetime import datetime, timedelta
from nose.tools import eq_, ok_
from mock import Mock, patch
from django.core.exceptions import ObjectDoesNotExist
import amo
import amo.tests
from addons.models import Addon
from market.models import AddonPremium, Price
from users.models import UserProfile
from devhub.models import ActivityLog
from mkt.developers.models import (AddonPaymentAccount, CantCancel,
PaymentAccount, SolitudeSeller)
from mkt.site.fixtures import fixture
class TestActivityLogCount(amo.tests.TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
now = datetime.now()
bom = datetime(now.year, now.month, 1)
self.lm = bom - timedelta(days=1)
self.user = UserProfile.objects.filter()[0]
amo.set_user(self.user)
def test_not_review_count(self):
amo.log(amo.LOG['EDIT_VERSION'], Addon.objects.get())
eq_(len(ActivityLog.objects.monthly_reviews()), 0)
def test_review_count(self):
amo.log(amo.LOG['APPROVE_VERSION'], Addon.objects.get())
result = ActivityLog.objects.monthly_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 1)
eq_(result[0]['user'], self.user.pk)
def test_review_count_few(self):
for x in range(0, 5):
amo.log(amo.LOG['APPROVE_VERSION'], Addon.objects.get())
result = ActivityLog.objects.monthly_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 5)
def test_review_last_month(self):
log = amo.log(amo.LOG['APPROVE_VERSION'], Addon.objects.get())
log.update(created=self.lm)
eq_(len(ActivityLog.objects.monthly_reviews()), 0)
def test_not_total(self):
amo.log(amo.LOG['EDIT_VERSION'], Addon.objects.get())
eq_(len(ActivityLog.objects.total_reviews()), 0)
def test_total_few(self):
for x in range(0, 5):
amo.log(amo.LOG['APPROVE_VERSION'], Addon.objects.get())
result = ActivityLog.objects.total_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 5)
def test_total_last_month(self):
log = amo.log(amo.LOG['APPROVE_VERSION'], Addon.objects.get())
log.update(created=self.lm)
result = ActivityLog.objects.total_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 1)
eq_(result[0]['user'], self.user.pk)
def test_log_admin(self):
amo.log(amo.LOG['OBJECT_EDITED'], Addon.objects.get())
eq_(len(ActivityLog.objects.admin_events()), 1)
eq_(len(ActivityLog.objects.for_developer()), 0)
def test_log_not_admin(self):
amo.log(amo.LOG['EDIT_VERSION'], Addon.objects.get())
eq_(len(ActivityLog.objects.admin_events()), 0)
eq_(len(ActivityLog.objects.for_developer()), 1)
class TestPaymentAccount(amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
self.user = UserProfile.objects.filter()[0]
solsel_patcher = patch('mkt.developers.models.SolitudeSeller.create')
self.solsel = solsel_patcher.start()
self.solsel.return_value = self.seller = (
SolitudeSeller.objects.create(
resource_uri='selleruri', user=self.user))
self.solsel.patcher = solsel_patcher
client_patcher = patch('mkt.developers.models.client')
self.client = client_patcher.start()
self.client.patcher = client_patcher
def tearDown(self):
self.solsel.patcher.stop()
self.client.patcher.stop()
def test_create_bango(self):
# Return a seller object without hitting Bango.
self.client.api.bango.package.post.return_value = {
'resource_uri': 'zipzap',
'package_id': 123,
}
res = PaymentAccount.create_bango(
self.user, {'account_name': '<NAME>'})
eq_(res.name, '<NAME>')
eq_(res.user, self.user)
eq_(res.seller_uri, 'selleruri')
eq_(res.bango_package_id, 123)
eq_(res.uri, 'zipzap')
self.client.api.bango.package.post.assert_called_with(
data={'paypalEmailAddress': '<EMAIL>',
'seller': 'selleruri'})
self.client.api.bango.bank.post.assert_called_with(
data={'seller_bango': 'zipzap'})
def test_cancel(self):
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='foo',
solitude_seller=self.seller)
addon = Addon.objects.get()
AddonPaymentAccount.objects.create(
addon=addon, provider='bango', account_uri='foo',
payment_account=res, product_uri='bpruri')
res.cancel()
assert res.inactive
assert not AddonPaymentAccount.objects.exists()
def test_cancel_shared(self):
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='foo',
solitude_seller=self.seller, shared=True)
addon = Addon.objects.get()
AddonPaymentAccount.objects.create(
addon=addon, provider='bango', account_uri='foo',
payment_account=res, product_uri='bpruri')
with self.assertRaises(CantCancel):
res.cancel()
def test_get_details(self):
package = Mock()
package.get.return_value = {'full': {'vendorName': 'a',
'some_other_value': 'b'}}
self.client.api.bango.package.return_value = package
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='/foo/bar/123',
solitude_seller=self.seller)
deets = res.get_details()
eq_(deets['account_name'], res.name)
eq_(deets['vendorName'], 'a')
assert 'some_other_value' not in deets
self.client.api.bango.package.assert_called_with('123')
package.get.assert_called_with(data={'full': True})
def test_update_account_details(self):
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='foo',
solitude_seller=self.seller)
res.update_account_details(
account_name='<NAME>',
vendorName='new vendor name',
something_other_value='not a package key')
eq_(res.name, 'new name')
self.client.api.by_url(res.uri).patch.assert_called_with(
data={'vendorName': 'new vendor name'})
class TestAddonPaymentAccount(amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999') + ['market/prices']
def setUp(self):
self.user = UserProfile.objects.filter()[0]
amo.set_user(self.user)
self.app = Addon.objects.get()
self.app.premium_type = amo.ADDON_PREMIUM
self.price = Price.objects.filter()[0]
AddonPremium.objects.create(addon=self.app, price=self.price)
self.seller = SolitudeSeller.objects.create(
resource_uri='sellerres', user=self.user
)
self.account = PaymentAccount.objects.create(
solitude_seller=self.seller,
user=self.user, name='paname', uri='acuri',
inactive=False, seller_uri='selluri',
bango_package_id=123
)
@patch('uuid.uuid4', Mock(return_value='lol'))
@patch('mkt.developers.models.generate_key', Mock(return_value='poop'))
@patch('mkt.developers.models.client')
def test_create(self, client):
client.api.generic.product.get_object.return_value = {
'resource_uri': 'gpuri'}
client.api.bango.product.get_object.return_value = {
'resource_uri': 'bpruri', 'bango_id': 'bango#', 'seller': 'selluri'
}
apa = AddonPaymentAccount.create(
'bango', addon=self.app, payment_account=self.account)
eq_(apa.addon, self.app)
eq_(apa.provider, 'bango')
eq_(apa.account_uri, 'acuri')
eq_(apa.product_uri, 'bpruri')
client.api.bango.premium.post.assert_called_with(
data={'bango': 'bango#', 'price': self.price.price,
'currencyIso': 'USD', 'seller_product_bango': 'bpruri'})
eq_(client.api.bango.rating.post.call_args_list[0][1]['data'],
{'bango': 'bango#', 'rating': 'UNIVERSAL',
'ratingScheme': 'GLOBAL', 'seller_product_bango': 'bpruri'})
eq_(client.api.bango.rating.post.call_args_list[1][1]['data'],
{'bango': 'bango#', 'rating': 'GENERAL',
'ratingScheme': 'USA', 'seller_product_bango': 'bpruri'})
@patch('uuid.uuid4', Mock(return_value='lol'))
@patch('mkt.developers.models.generate_key', Mock(return_value='poop'))
@patch('mkt.developers.models.client')
def test_create_with_free_in_app(self, client):
client.api.generic.product.get_object.return_value = {
'resource_uri': 'gpuri'}
client.api.bango.product.get_object.return_value = {
'resource_uri': 'bpruri', 'bango_id': 'bango#', 'seller': 'selluri'
}
self.app.update(premium_type=amo.ADDON_FREE_INAPP)
apa = AddonPaymentAccount.create(
'bango', addon=self.app, payment_account=self.account)
eq_(apa.addon, self.app)
eq_(apa.provider, 'bango')
eq_(apa.account_uri, 'acuri')
eq_(apa.product_uri, 'bpruri')
assert not client.api.bango.premium.post.called
@patch('mkt.developers.models.client')
def test_create_new(self, client):
client.api.bango.product.get_object.side_effect = ObjectDoesNotExist
client.api.bango.product.post.return_value = {
'resource_uri': '', 'bango_id': 1}
AddonPaymentAccount.create(
'bango', addon=self.app, payment_account=self.account)
ok_('packageId' in
client.api.bango.product.post.call_args[1]['data'])
@patch('mkt.developers.models.client')
def test_update_price(self, client):
new_price = 123456
get = Mock()
get.get_object.return_value = {'bango_id': 'bango#'}
client.api.by_url.return_value = get
payment_account = PaymentAccount.objects.create(
user=self.user, name='paname', uri='/path/to/object',
solitude_seller=self.seller)
apa = AddonPaymentAccount.objects.create(
addon=self.app, provider='bango', account_uri='acuri',
payment_account=payment_account,
product_uri='bpruri')
apa.update_price(new_price)
client.api.bango.premium.post.assert_called_with(
data={'bango': 'bango#', 'price': new_price,
'currencyIso': 'USD', 'seller_product_bango': 'bpruri'})
client.api.bango.rating.post.assert_called_with(
data={'bango': 'bango#', 'rating': 'GENERAL',
'ratingScheme': 'USA', 'seller_product_bango': 'bpruri'})
```
#### File: purchase/tests/test_utils_.py
```python
import amo
import amo.tests
import waffle
from users.models import UserProfile
from mkt.purchase.utils import payments_enabled
from mkt.site.fixtures import fixture
from test_utils import RequestFactory
class TestUtils(amo.tests.TestCase):
fixtures = fixture('user_2519')
def setUp(self):
self.req = RequestFactory().get('/')
def test_settings(self):
with self.settings(PAYMENT_LIMITED=False):
assert payments_enabled(self.req)
def test_not_flag(self):
with self.settings(PAYMENT_LIMITED=True):
assert not payments_enabled(self.req)
def test_flag(self):
profile = UserProfile.objects.get(pk=2519)
flag = waffle.models.Flag.objects.create(name='override-app-payments')
flag.everyone = None
flag.users.add(profile.user)
flag.save()
self.req.user = profile.user
with self.settings(PAYMENT_LIMITED=True):
assert payments_enabled(self.req)
```
#### File: receipts/tests/test_views.py
```python
import calendar
import json
import time
import uuid
from django.conf import settings
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
from test_utils import RequestFactory
from addons.models import AddonUser
import amo
import amo.tests
from amo.helpers import absolutify
from amo.urlresolvers import reverse
from devhub.models import AppLog
from mkt.constants import apps
from mkt.site.fixtures import fixture
from mkt.webapps.models import Webapp
from mkt.receipts.utils import create_test_receipt
from mkt.receipts.views import devhub_verify
from services.verify import decode_receipt, settings as verify_settings
from users.models import UserProfile
from zadmin.models import DownloadSource
from .test_models import TEST_LEEWAY
@mock.patch.object(settings, 'WEBAPPS_RECEIPT_KEY',
amo.tests.AMOPaths.sample_key())
class TestInstall(amo.tests.TestCase):
fixtures = fixture('user_999', 'user_editor', 'user_editor_group',
'group_editor')
def setUp(self):
self.addon = amo.tests.app_factory(manifest_url='http://cbc.ca/man')
self.url = self.addon.get_detail_url('record')
self.user = UserProfile.objects.get(email='<EMAIL>')
assert self.client.login(username=self.user.email, password='password')
def test_pending_free_for_reviewer(self):
self.addon.update(status=amo.STATUS_PENDING)
assert self.client.login(username='<EMAIL>',
password='password')
eq_(self.client.post(self.url).status_code, 200)
def test_pending_free_for_developer(self):
AddonUser.objects.create(addon=self.addon, user=self.user)
self.addon.update(status=amo.STATUS_PENDING)
eq_(self.client.post(self.url).status_code, 200)
def test_pending_free_for_anonymous(self):
self.addon.update(status=amo.STATUS_PENDING)
eq_(self.client.post(self.url).status_code, 404)
def test_pending_paid_for_reviewer(self):
self.addon.update(status=amo.STATUS_PENDING,
premium_type=amo.ADDON_PREMIUM)
assert self.client.login(username='<EMAIL>',
password='password')
eq_(self.client.post(self.url).status_code, 200)
# Because they aren't using reviewer tools, they'll get a normal
# install record and receipt.
eq_(self.addon.installed.all()[0].install_type,
apps.INSTALL_TYPE_USER)
def test_pending_paid_for_admin(self):
self.addon.update(status=amo.STATUS_PENDING,
premium_type=amo.ADDON_PREMIUM)
self.grant_permission(self.user, '*:*')
eq_(self.client.post(self.url).status_code, 200)
# Check ownership ignores admin users.
eq_(self.addon.installed.all()[0].install_type,
apps.INSTALL_TYPE_USER)
def test_pending_paid_for_developer(self):
AddonUser.objects.create(addon=self.addon, user=self.user)
self.addon.update(status=amo.STATUS_PENDING,
premium_type=amo.ADDON_PREMIUM)
eq_(self.client.post(self.url).status_code, 200)
eq_(self.user.installed_set.all()[0].install_type,
apps.INSTALL_TYPE_DEVELOPER)
def test_pending_paid_for_anonymous(self):
self.addon.update(status=amo.STATUS_PENDING,
premium_type=amo.ADDON_PREMIUM)
eq_(self.client.post(self.url).status_code, 404)
def test_not_record_addon(self):
self.addon.update(type=amo.ADDON_EXTENSION)
res = self.client.post(self.url)
eq_(res.status_code, 404)
eq_(self.user.installed_set.count(), 0)
@mock.patch('mkt.webapps.models.Webapp.has_purchased')
def test_paid(self, has_purchased):
has_purchased.return_value = True
self.addon.update(premium_type=amo.ADDON_PREMIUM)
eq_(self.client.post(self.url).status_code, 200)
def test_own_payments(self):
self.addon.update(premium_type=amo.ADDON_OTHER_INAPP)
eq_(self.client.post(self.url).status_code, 200)
@mock.patch('mkt.webapps.models.Webapp.has_purchased')
def test_not_paid(self, has_purchased):
has_purchased.return_value = False
self.addon.update(premium_type=amo.ADDON_PREMIUM)
eq_(self.client.post(self.url).status_code, 403)
def test_record_logged_out(self):
self.client.logout()
res = self.client.post(self.url)
eq_(res.status_code, 200)
@mock.patch('mkt.receipts.views.receipt_cef.log')
def test_log_metrics(self, cef):
res = self.client.post(self.url)
eq_(res.status_code, 200)
logs = AppLog.objects.filter(addon=self.addon)
eq_(logs.count(), 1)
eq_(logs[0].activity_log.action, amo.LOG.INSTALL_ADDON.id)
@mock.patch('mkt.receipts.views.record_action')
@mock.patch('mkt.receipts.views.receipt_cef.log')
def test_record_metrics(self, cef, record_action):
res = self.client.post(self.url)
eq_(res.status_code, 200)
eq_(record_action.call_args[0][0], 'install')
eq_(record_action.call_args[0][2], {'app-domain': u'http://cbc.ca',
'app-id': self.addon.pk,
'anonymous': False})
@mock.patch('mkt.receipts.views.record_action')
@mock.patch('mkt.receipts.views.receipt_cef.log')
def test_record_metrics_packaged_app(self, cef, record_action):
# Mimic packaged app.
self.addon.update(is_packaged=True, manifest_url=None,
app_domain='app://f.c')
res = self.client.post(self.url)
eq_(res.status_code, 200)
eq_(record_action.call_args[0][0], 'install')
eq_(record_action.call_args[0][2], {
'app-domain': 'app://f.c',
'app-id': self.addon.pk,
'anonymous': False})
@mock.patch('mkt.receipts.views.receipt_cef.log')
def test_cef_logs(self, cef):
res = self.client.post(self.url)
eq_(res.status_code, 200)
eq_(len(cef.call_args_list), 1)
eq_([x[0][2] for x in cef.call_args_list], ['sign'])
@mock.patch('mkt.receipts.views.receipt_cef.log')
def test_record_install(self, cef):
res = self.client.post(self.url)
eq_(res.status_code, 200)
installed = self.user.installed_set.all()
eq_(len(installed), 1)
eq_(installed[0].install_type, apps.INSTALL_TYPE_USER)
@mock.patch('mkt.receipts.views.receipt_cef.log')
def test_record_multiple_installs(self, cef):
self.client.post(self.url)
res = self.client.post(self.url)
eq_(res.status_code, 200)
eq_(self.user.installed_set.count(), 1)
@mock.patch.object(settings, 'WEBAPPS_RECEIPT_KEY',
amo.tests.AMOPaths.sample_key())
@mock.patch('mkt.receipts.views.receipt_cef.log')
def test_record_receipt(self, cef):
res = self.client.post(self.url)
content = json.loads(res.content)
assert content.get('receipt'), content
def test_installed_client_data(self):
download_source = DownloadSource.objects.create(name='mkt-home')
device_type = 'mobile'
user_agent = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0)'
self.addon.update(type=amo.ADDON_WEBAPP)
res = self.client.post(self.url,
data={'device_type': device_type,
'is_chromeless': False,
'src': download_source.name},
HTTP_USER_AGENT=user_agent)
eq_(res.status_code, 200)
eq_(self.user.installed_set.count(), 1)
ins = self.user.installed_set.get()
eq_(ins.client_data.download_source, download_source)
eq_(ins.client_data.device_type, device_type)
eq_(ins.client_data.user_agent, user_agent)
eq_(ins.client_data.is_chromeless, False)
eq_(not ins.client_data.language, False)
eq_(not ins.client_data.region, False)
class TestReceiptVerify(amo.tests.TestCase):
fixtures = fixture('user_999', 'user_editor', 'user_editor_group',
'group_editor')
def setUp(self):
super(TestReceiptVerify, self).setUp()
self.app = Webapp.objects.create(app_slug='foo', guid=uuid.uuid4())
self.url = reverse('receipt.verify',
args=[self.app.guid])
self.log = AppLog.objects.filter(addon=self.app)
self.reviewer = UserProfile.objects.get(pk=5497308)
def get_mock(self, user=None, **kwargs):
self.verify = mock.Mock()
self.verify.return_value = json.dumps(kwargs)
self.verify.check_without_purchase.return_value = json.dumps(
{'status': 'ok'})
self.verify.invalid.return_value = json.dumps({'status': 'invalid'})
self.verify.user_id = user.pk if user else self.reviewer.pk
return self.verify
def test_post_required(self):
eq_(self.client.get(self.url).status_code, 405)
@mock.patch('mkt.receipts.views.Verify')
def test_empty(self, verify):
vfy = self.get_mock(user=self.reviewer, status='invalid')
# Because the receipt was empty, this never got set and so
# we didn't log it.
vfy.user_id = None
verify.return_value = vfy
res = self.client.post(self.url)
eq_(res.status_code, 200)
eq_(self.log.count(), 0)
eq_(json.loads(res.content)['status'], 'invalid')
@mock.patch('mkt.receipts.views.Verify')
def test_good(self, verify):
verify.return_value = self.get_mock(user=self.reviewer, status='ok')
res = self.client.post(self.url)
eq_(res.status_code, 200)
eq_(self.log.count(), 1)
eq_(json.loads(res.content)['status'], 'ok')
@mock.patch('mkt.receipts.views.Verify')
def test_not_reviewer(self, verify):
self.reviewer.groups.clear()
verify.return_value = self.get_mock(user=self.reviewer, status='ok')
res = self.client.post(self.url)
eq_(res.status_code, 200)
eq_(self.log.count(), 0)
eq_(json.loads(res.content)['status'], 'invalid')
@mock.patch('mkt.receipts.views.Verify')
def test_not_there(self, verify):
verify.return_value = self.get_mock(user=self.reviewer, status='ok')
self.reviewer.delete()
res = self.client.post(self.url)
eq_(res['Access-Control-Allow-Origin'], '*')
eq_(json.loads(res.content)['status'], 'invalid')
@mock.patch('mkt.receipts.views.Verify')
def test_logs(self, verify):
verify.return_value = self.get_mock(user=self.reviewer, status='ok')
eq_(self.log.count(), 0)
res = self.client.post(self.url)
eq_(self.log.count(), 1)
eq_(res.status_code, 200)
@mock.patch('mkt.receipts.views.Verify')
def test_logs_developer(self, verify):
developer = UserProfile.objects.get(pk=999)
AddonUser.objects.create(addon=self.app, user=developer)
verify.return_value = self.get_mock(user=developer, status='ok')
res = self.client.post(self.url)
eq_(res['Access-Control-Allow-Origin'], '*')
eq_(self.log.count(), 1)
eq_(res.status_code, 200)
class TestReceiptIssue(amo.tests.TestCase):
fixtures = fixture('user_999', 'user_editor', 'user_editor_group',
'group_editor', 'webapp_337141')
def setUp(self):
super(TestReceiptIssue, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.url = reverse('receipt.issue', args=[self.app.app_slug])
self.reviewer = UserProfile.objects.get(pk=5497308)
self.user = UserProfile.objects.get(pk=999)
@mock.patch('mkt.receipts.views.create_receipt')
def test_issued(self, create_receipt):
create_receipt.return_value = 'foo'
self.client.login(username=self.reviewer.email, password='password')
res = self.client.post(self.url)
eq_(res.status_code, 200)
eq_(create_receipt.call_args[1]['flavour'], 'reviewer')
eq_(self.reviewer.installed_set.all()[0].install_type,
apps.INSTALL_TYPE_REVIEWER)
def test_get(self):
self.client.login(username=self.reviewer.email, password='password')
res = self.client.get(self.url)
eq_(res.status_code, 405)
def test_issued_anon(self):
res = self.client.post(self.url)
eq_(res.status_code, 403)
def test_issued_not_reviewer(self):
self.client.login(username=self.user, password='password')
res = self.client.post(self.url)
eq_(res.status_code, 403)
@mock.patch('mkt.receipts.views.create_receipt')
def test_issued_developer(self, create_receipt):
create_receipt.return_value = 'foo'
AddonUser.objects.create(user=self.user, addon=self.app)
self.client.login(username=self.user.email, password='password')
res = self.client.post(self.url)
eq_(res.status_code, 200)
eq_(create_receipt.call_args[1]['flavour'], 'developer')
eq_(self.user.installed_set.all()[0].install_type,
apps.INSTALL_TYPE_DEVELOPER)
@mock.patch('mkt.receipts.views.create_receipt')
def test_unicode_name(self, create_receipt):
"""
Regression test to ensure that the CEF log works. Pass through the
app.pk instead of the full unicode name, until the CEF library is
fixed, or metlog is used.
"""
create_receipt.return_value = 'foo'
self.app.name = u'\u0627\u0644\u062a\u0637\u0628-news'
self.app.save()
self.client.login(username=self.reviewer.email, password='password')
res = self.client.post(self.url)
eq_(res.status_code, 200)
class TestReceiptCheck(amo.tests.TestCase):
fixtures = fixture('user_999', 'user_editor', 'user_editor_group',
'group_editor', 'webapp_337141')
def setUp(self):
super(TestReceiptCheck, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.app.update(status=amo.STATUS_PENDING)
self.url = reverse('receipt.check',
args=[self.app.guid])
self.reviewer = UserProfile.objects.get(pk=5497308)
self.user = UserProfile.objects.get(pk=999)
def test_anon(self):
eq_(self.client.get(self.url).status_code, 302)
def test_not_reviewer(self):
self.client.login(username=self.user.email, password='password')
eq_(self.client.get(self.url).status_code, 403)
def test_not_there(self):
self.client.login(username=self.reviewer.email, password='password')
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(json.loads(res.content)['status'], False)
def test_there(self):
self.client.login(username=self.reviewer.email, password='password')
amo.log(amo.LOG.RECEIPT_CHECKED, self.app, user=self.reviewer)
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(json.loads(res.content)['status'], True)
class RawRequestFactory(RequestFactory):
"""A request factory that does not encode the body."""
def _encode_data(self, data, content_type):
return data
# Ooof.
@mock.patch.object(verify_settings, 'WEBAPPS_RECEIPT_KEY',
amo.tests.AMOPaths.sample_key())
@mock.patch.object(settings, 'WEBAPPS_RECEIPT_KEY',
amo.tests.AMOPaths.sample_key())
@mock.patch.object(settings, 'SITE_URL', 'https://foo.com')
@mock.patch.object(verify_settings, 'DOMAIN', 'foo.com')
class TestDevhubReceipts(amo.tests.TestCase):
def setUp(self):
self.issue = reverse('receipt.test.issue')
def test_install_page(self):
eq_(self.client.get(reverse('receipt.test.install')).status_code, 200)
def test_details_page(self):
eq_(self.client.get(reverse('receipt.test.details')).status_code, 200)
def test_issue_get(self):
eq_(self.client.get(self.issue).status_code, 405)
def test_issue_none(self):
data = {'receipt_type': 'none', 'manifest_url': 'http://foo.com/'}
res = self.client.post(self.issue, data=data)
eq_(json.loads(res.content)['receipt'], '')
def test_bad_url(self):
data = {'receipt_type': 'none', 'manifest_url': ''}
res = self.client.post(self.issue, data=data)
ok_(json.loads(res.content)['error'], '')
def test_issue_expired(self):
data = {'receipt_type': 'expired', 'manifest_url': 'http://foo.com/'}
res = self.client.post(self.issue, data=data)
data = decode_receipt(json.loads(res.content)['receipt']
.encode('ascii'))
eq_(data['verify'], absolutify(reverse('receipt.test.verify',
kwargs={'status': 'expired'})))
ok_(data['exp'] > (calendar.timegm(time.gmtime()) +
(60 * 60 * 24) - TEST_LEEWAY))
def test_issue_other(self):
data = {'receipt_type': 'foo', 'manifest_url': ''}
res = self.client.post(self.issue, data=data)
ok_(json.loads(res.content)['error'])
def test_verify_fails(self):
req = RawRequestFactory().post('/', '')
res = devhub_verify(req, 'expired')
eq_(json.loads(res.content)['status'], 'invalid')
def test_verify(self):
url = absolutify(reverse('receipt.test.verify',
kwargs={'status': 'expired'}))
receipt = create_test_receipt('http://foo', 'expired')
req = RawRequestFactory().post(url, receipt)
res = devhub_verify(req, 'expired')
eq_(json.loads(res.content)['status'], 'expired')
```
#### File: mkt/regions/api.py
```python
from mkt.api.base import MarketplaceResource
class RegionResource(MarketplaceResource):
class Meta(MarketplaceResource.Meta):
allowed_methods = []
fields = ('name', 'slug', 'mcc', 'adolescent')
resource_name = 'region'
include_resource_uri = False
def full_dehydrate(self, bundle):
bundle.data = {}
for field in self._meta.fields:
bundle.data[field] = getattr(bundle.obj, field)
return bundle
```
#### File: mkt/regions/__init__.py
```python
from threading import local
from mkt.constants.regions import *
_local = local()
def get_region():
return getattr(_local, 'region', WORLDWIDE.slug)
def get_region_id():
return REGIONS_DICT[get_region()].id
def set_region(slug):
"""
Sets the slug of the region for the current request lifecycle.
"""
_local.region = slug
```
#### File: regions/tests/test_utils_.py
```python
from nose.tools import eq_, assert_raises
from mkt.constants import regions
from mkt.regions.utils import parse_region
def test_parse_region():
eq_(parse_region('worldwide'), regions.WORLDWIDE)
eq_(parse_region('br'), regions.BR)
eq_(parse_region('7'), regions.BR)
eq_(parse_region(7), regions.BR)
eq_(parse_region(regions.BR), regions.BR)
assert_raises(KeyError, parse_region, '')
```
#### File: mkt/regions/utils.py
```python
from mkt.constants import regions
def parse_region(region):
"""
Returns a region class definition given a slug, id, or class definition.
"""
if isinstance(region, type) and issubclass(region, regions.REGION):
return region
if str(region).isdigit():
# Look up the region by ID.
return regions.REGIONS_CHOICES_ID_DICT[int(region)]
else:
# Look up the region by slug.
return regions.REGIONS_DICT[region]
```
#### File: mkt/stats/helpers.py
```python
from django.utils.http import urlquote
from jingo import register
import jinja2
from access import acl
@register.function
@jinja2.contextfunction
def check_contrib_stats_perms(context, addon):
request = context['request']
if addon.has_author(request.amo_user) or acl.action_allowed(request,
'RevenueStats', 'View'):
return True
@register.function
@jinja2.contextfunction
def stats_url(context, action, metric=None):
"""
Simplifies the templates a bit, no need to pass in addon into
parameters as it is inferred from the context and it makes the function
call shorter.
"""
addon = context['addon']
if metric:
action = '%s_%s' % (metric, action)
return addon.get_stats_url(action=action)
@register.function
def url_quote(url):
return urlquote(url)
```
#### File: mkt/stats/search.py
```python
from django.db.models import Count, Q, Sum
import amo
import amo.search
from amo.utils import create_es_index_if_missing
from stats.models import Contribution
from mkt.webapps.models import Installed
def get_finance_total(qs, addon, field=None, **kwargs):
"""
sales/revenue/refunds per app overall
field -- breakdown field name contained by kwargs
"""
q = Q()
if field:
kwargs_copy = {field: kwargs[field]}
q = handle_kwargs(q, field, kwargs)
revenue = (qs.values('addon').filter(q, refund=None, **kwargs).
annotate(revenue=Sum('price_tier__price')))
sales = (qs.values('addon').filter(q, refund=None, **kwargs).
annotate(sales=Count('id')))
refunds = (qs.filter(q, refund__isnull=False, **kwargs).
values('addon').annotate(refunds=Count('id')))
document = {
'addon': addon,
'count': sales[0]['sales'] if sales.count() else 0,
'revenue': revenue[0]['revenue'] if revenue.count() else 0,
'refunds': refunds[0]['refunds'] if refunds.count() else 0,
}
if field:
# Edge case, handle None values.
if kwargs_copy[field] is None:
kwargs_copy[field] = ''
document[field] = kwargs_copy[field]
# Non-USD-normalized revenue, calculated from currency's amount rather
# than price tier.
if field == 'currency':
document['revenue_non_normalized'] = (qs.values('addon')
.filter(q, refund=None, **kwargs)
.annotate(revenue=Sum('amount'))
[0]['revenue'] if revenue.count() else 0)
return document
def get_finance_daily(contribution):
"""
sales per day
revenue per day
refunds per day
"""
addon_id = contribution['addon']
date = contribution['created'].date()
return {
'date': date,
'addon': addon_id,
'count': Contribution.objects.filter(
addon__id=addon_id,
refund=None,
created__year=date.year,
created__month=date.month,
created__day=date.day).count() or 0,
# TODO: non-USD-normalized revenue (daily_by_currency)?
'revenue': Contribution.objects.filter(
addon__id=addon_id,
refund=None,
type=amo.CONTRIB_PURCHASE,
created__year=date.year,
created__month=date.month,
created__day=date.day)
.aggregate(revenue=Sum('price_tier__price'))['revenue']
or 0,
'refunds': Contribution.objects.filter(
addon__id=addon_id,
refund__isnull=False,
created__year=date.year,
created__month=date.month,
created__day=date.day).count() or 0,
}
def get_installed_daily(installed):
"""
installs per day
"""
addon_id = installed['addon']
date = installed['created'].date()
return {
'date': date,
'addon': addon_id,
'count': Installed.objects.filter(
addon__id=addon_id,
created__year=date.year,
created__month=date.month,
created__day=date.day).count()
}
def setup_mkt_indexes(index=None, aliased=True):
"""
Define explicit ES mappings for models. If a field is not explicitly
defined and a field is inserted, ES will dynamically guess the type and
insert it, in a schemaless manner.
"""
es = amo.search.get_es()
for model in [Contribution]:
index_ = index or model._get_index()
index_ = create_es_index_if_missing(index_, aliased=aliased)
mapping = {
'properties': {
'id': {'type': 'long'},
'date': {'format': 'dateOptionalTime',
'type': 'date'},
'count': {'type': 'long'},
'revenue': {'type': 'double'},
# Try to tell ES not to 'analyze' the field to querying with
# hyphens and lowercase letters.
'currency': {'type': 'string',
'index': 'not_analyzed'},
'source': {'type': 'string',
'index': 'not_analyzed'},
'inapp': {'type': 'string',
'index': 'not_analyzed'}
}
}
es.put_mapping(model._meta.db_table, mapping, index_)
def handle_kwargs(q, field, kwargs, join_field=None):
"""
Processes kwargs to combine '' and None values and make it ready for
filters. Returns Q object to use in filter.
"""
if join_field:
join_field += field
kwargs[join_field] = kwargs[field]
# Have '' and None have the same meaning.
if not kwargs[field]:
q = Q(**{field + '__in': ['', None]})
del(kwargs[field])
# We are using the join field to filter so get rid of the plain one.
if join_field and field in kwargs:
del(kwargs[field])
return q
```
#### File: stats/tests/test_monolith_views.py
```python
import csv
import datetime
from decimal import Decimal
import json
import random
from django.conf import settings
import mock
from nose.tools import eq_
from test_utils import RequestFactory
from access.models import Group, GroupUser
from addons.models import Addon, AddonUser
import amo
import amo.tests
from amo.urlresolvers import reverse
from apps.stats.models import GlobalStat
from market.models import Price
from mkt.webapps.models import Installed
from mkt.site.fixtures import fixture
from mkt.stats import search, tasks, views
from mkt.stats.views import (FINANCE_SERIES, get_series_column,
get_series_line, pad_missing_stats)
from stats.models import Contribution
from users.models import UserProfile
class StatsTest(amo.tests.ESTestCase):
fixtures = fixture('user_999')
def setUp(self):
super(StatsTest, self).setUp()
self.create_switch('monolith-stats')
self.user = UserProfile.objects.get(username='regularuser')
self.public_app = amo.tests.app_factory(name='public',
app_slug='pub', type=1, status=4, public_stats=True)
self.private_app = amo.tests.app_factory(name='private',
app_slug='priv', type=1, status=4, public_stats=False)
self.url_args = {'start': '20090601', 'end': '20090930',
'app_slug': self.private_app.app_slug}
def login_as_visitor(self):
self.login(self.user)
def get_view_response(self, view, **kwargs):
view_args = self.url_args.copy()
head = kwargs.pop('head', False)
view_args.update(kwargs)
url = reverse(view, kwargs=view_args)
if head:
return self.client.head(url, follow=True)
return self.client.get(url, follow=True)
def views_gen(self, **kwargs):
# common set of views
for series in views.SERIES:
if series == 'my_apps':
# skip my_apps, as it has different routes
continue
for group in views.SERIES_GROUPS:
view = 'mkt.stats.%s_series' % series
args = kwargs.copy()
args['group'] = group
yield (view, args)
def public_views_gen(self, **kwargs):
# all views are potentially public, except for contributions
for view, args in self.views_gen(**kwargs):
if not view in ['mkt.stats.%s_series' % series for series in
FINANCE_SERIES]:
yield (view, args)
def private_views_gen(self, **kwargs):
# only contributions views are always private
for view, args in self.views_gen(**kwargs):
if view in ['mkt.stats.%s_series' % series for series in
FINANCE_SERIES]:
yield (view, args)
class TestStatsPermissions(StatsTest):
"""Tests to make sure all restricted data remains restricted."""
@amo.tests.mock_es # We're checking only headers, not content.
def _check_it(self, views, status):
for view, kwargs in views:
response = self.get_view_response(view, head=True, **kwargs)
eq_(response.status_code, status,
'unexpected http status for %s. got %s. expected %s' % (
view, response.status_code, status))
def test_private_app_no_groups(self):
# Logged in but no groups
self.login_as_visitor()
self._check_it(self.private_views_gen(format='json'), 403)
@mock.patch.object(settings, 'MONOLITH_SERVER', 'http://0.0.0.0:0')
@mock.patch('monolith.client.Client')
def test_private_app_stats_group(self, mocked_client):
# Logged in with stats group.
group = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=self.user, group=group)
self.login_as_visitor()
self._check_it(self.public_views_gen(format='json'), 200)
self._check_it(self.private_views_gen(format='json'), 403)
@mock.patch.object(settings, 'MONOLITH_SERVER', 'http://0.0.0.0:0')
@mock.patch('monolith.client.Client')
def test_private_app_contrib_stats_group(self, mocked_client):
# Logged in with stats and contrib stats group.
group1 = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=self.user, group=group1)
group2 = Group.objects.create(name='Revenue Stats',
rules='RevenueStats:View')
GroupUser.objects.create(user=self.user, group=group2)
self.login_as_visitor()
self._check_it(self.public_views_gen(format='json'), 200)
self._check_it(self.private_views_gen(format='json'), 200)
def test_private_app_anonymous(self):
# Not logged in
self.client.logout()
self._check_it(self.private_views_gen(format='json'), 403)
@mock.patch.object(settings, 'MONOLITH_SERVER', 'http://0.0.0.0:0')
@mock.patch('monolith.client.Client')
def test_public_app_no_groups(self, mocked_client):
# Logged in but no groups
self.login_as_visitor()
self._check_it(self.public_views_gen(
app_slug=self.public_app.app_slug, format='json'), 200)
self._check_it(self.private_views_gen(
app_slug=self.public_app.app_slug, format='json'), 403)
@mock.patch.object(settings, 'MONOLITH_SERVER', 'http://0.0.0.0:0')
@mock.patch('monolith.client.Client')
def test_public_app_stats_group(self, mocked_client):
# Logged in with stats group.
group = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=self.user, group=group)
self.login_as_visitor()
self._check_it(self.public_views_gen(
app_slug=self.public_app.app_slug, format='json'), 200)
self._check_it(self.private_views_gen(
app_slug=self.public_app.app_slug, format='json'), 403)
@mock.patch.object(settings, 'MONOLITH_SERVER', 'http://0.0.0.0:0')
@mock.patch('monolith.client.Client')
def test_public_app_contrib_stats_group(self, mocked_client):
# Logged in with stats and contrib stats group.
group1 = Group.objects.create(name='Stats', rules='Stats:View')
GroupUser.objects.create(user=self.user, group=group1)
group2 = Group.objects.create(name='Revenue Stats',
rules='RevenueStats:View')
GroupUser.objects.create(user=self.user, group=group2)
self.login_as_visitor()
self._check_it(self.public_views_gen(
app_slug=self.public_app.app_slug, format='json'), 200)
self._check_it(self.private_views_gen(
app_slug=self.public_app.app_slug, format='json'), 200)
@mock.patch.object(settings, 'MONOLITH_SERVER', 'http://0.0.0.0:0')
@mock.patch('monolith.client.Client')
def test_public_app_anonymous(self, mocked_client):
# Not logged in
self.client.logout()
self._check_it(self.public_views_gen(app_slug=self.public_app.app_slug,
format='json'), 200)
self._check_it(self.private_views_gen(
app_slug=self.public_app.app_slug, format='json'), 403)
@mock.patch.object(settings, 'MONOLITH_SERVER', 'http://0.0.0.0:0')
@mock.patch('monolith.client.Client')
def test_non_public_app_redirect(self, mocked_client):
# Non-public status redirects to detail page.
app = amo.tests.app_factory(status=2, public_stats=True)
response = self.client.get(app.get_stats_url())
eq_(response.status_code, 302)
@mock.patch.object(settings, 'MONOLITH_SERVER', 'http://0.0.0.0:0')
@mock.patch('monolith.client.Client')
def test_non_public_app_owner_no_redirect(self, mocked_client):
# Non-public status, but owner of app, does not redirect to detail
# page.
self.login_as_visitor()
app = amo.tests.app_factory(status=2, public_stats=True)
AddonUser.objects.create(addon_id=app.id, user=self.user)
response = self.client.get(app.get_stats_url())
eq_(response.status_code, 200)
class TestMyApps(StatsTest):
def setUp(self):
super(TestMyApps, self).setUp()
self.req = RequestFactory().get('/')
self.req.amo_user = self.user
AddonUser.objects.create(addon=self.public_app, user=self.user)
Installed.objects.create(addon=self.public_app, user=self.user)
def test_anonymous(self):
del self.req.amo_user
eq_(views._my_apps(self.req), [])
def test_some(self):
eq_(views._my_apps(self.req), [self.public_app])
def test_deleted(self):
self.public_app.update(status=amo.STATUS_DELETED)
eq_(views._my_apps(self.req), [])
class TestInstalled(amo.tests.ESTestCase):
test_es = True
fixtures = fixture('user_admin', 'group_admin', 'user_admin_group',
'user_999', 'webapp_337141')
def setUp(self):
self.today = datetime.date.today()
self.webapp = Addon.objects.get(pk=337141)
self.user = UserProfile.objects.get(pk=999)
self.client.login(username='<EMAIL>', password='password')
self.in_ = Installed.objects.create(addon=self.webapp, user=self.user)
installed = {'addon': self.in_.addon.id, 'created': self.in_.created}
Installed.index(search.get_installed_daily(installed),
id=self.in_.pk)
self.refresh('users_install')
def get_url(self, start, end, fmt='json'):
return reverse('mkt.stats.installs_series',
args=[self.webapp.app_slug, 'day',
start.strftime('%Y%m%d'),
end.strftime('%Y%m%d'), fmt])
def get_multiple_url(self, start, end, fmt='json'):
return reverse('mkt.stats.my_apps_series',
args=['day',
start.strftime('%Y%m%d'),
end.strftime('%Y%m%d'), fmt])
def test_installed(self):
res = self.client.get(self.get_url(self.today, self.today))
data = json.loads(res.content)
eq_(data[0]['count'], 1)
def test_installed_anon(self):
self.client.logout()
res = self.client.get(self.get_url(self.today, self.today))
eq_(res.status_code, 403)
def test_installed_anon_public(self):
self.client.logout()
self.webapp.update(public_stats=True)
res = self.client.get(self.get_url(self.today, self.today))
eq_(res.status_code, 200)
def setup_multiple(self):
self.client.login(username=self.user.email, password='password')
AddonUser.objects.create(addon=self.webapp, user=self.user)
def test_multiple_json(self):
self.setup_multiple()
res = self.client.get(self.get_multiple_url(self.today, self.today))
eq_(json.loads(res.content)[0]['name'], self.webapp.name)
def test_multiple_csv(self):
self.setup_multiple()
res = self.client.get(self.get_multiple_url(self.today, self.today,
fmt='csv'))
rows = list(csv.reader(res.content.split('\n')))
eq_(rows[5][0], str(self.webapp.name))
def test_anonymous(self):
self.client.logout()
res = self.client.get(reverse('mkt.stats.my_apps_overview'))
self.assertLoginRequired(res)
class TestGetSeriesLine(amo.tests.ESTestCase):
fixtures = fixture('user_999')
def setUp(self):
# Create apps and contributions to index.
self.app = amo.tests.app_factory()
user = UserProfile.objects.get(username='regularuser')
price_tier = Price.objects.create(price='0.99')
# Create a sale for each day in the expected range.
self.expected_days = (1, 2, 3, 4, 5)
for day in self.expected_days:
# Create different amounts of contribs for each day.
for x in range(0, day):
c = Contribution.objects.create(addon_id=self.app.pk,
user=user,
amount='0.99',
price_tier=price_tier,
type=amo.CONTRIB_PURCHASE)
c.update(created=datetime.datetime(2012, 5, day, 0, 0, 0))
tasks.index_finance_daily(Contribution.objects.all())
self.refresh(timesleep=1)
def test_basic(self):
"""
Check a sale (count) is found for each day in the expected range.
"""
d_range = (datetime.date(2012, 05, 01), datetime.date(2012, 05, 15))
stats = list(get_series_line(Contribution, 'day', addon=self.app.pk,
date__range=d_range))
dates_with_sales = [c['date'] for c in stats if c['count'] > 0]
days = [d.day for d in dates_with_sales]
for day in self.expected_days:
eq_(day in days, True)
def test_desc_order(self):
"""
Check the returned data is in descending order by date.
"""
d_range = (datetime.date(2012, 05, 01), datetime.date(2012, 05, 15))
stats = list(get_series_line(Contribution, 'day', addon=self.app.pk,
date__range=d_range))
eq_(stats, sorted(stats, key=lambda x: x['date'], reverse=True))
def test_revenue(self):
"""
Check each day's revenue is correct.
"""
d_range = (datetime.date(2012, 05, 01), datetime.date(2012, 05, 05))
stats = list(get_series_line(Contribution, 'day',
primary_field='revenue',
addon=self.app.pk,
date__range=d_range))
for stat, day in zip(stats, sorted(self.expected_days, reverse=True)):
expected_revenue = day * .99
eq_(round(stat['count'], 2), round(expected_revenue, 2))
class TestGetSeriesColumn(amo.tests.ESTestCase):
fixtures = fixture('user_999')
def setUp(self):
super(TestGetSeriesColumn, self).setUp()
# Create apps and contributions to index.
self.app = amo.tests.app_factory()
self.user = UserProfile.objects.get(username='regularuser')
price_tier = Price.objects.create(price='0.99')
# Create some revenue for several different currencies.
self.expected = [
{'currency': 'CAD', 'count': 0},
{'currency': 'EUR', 'count': 0},
{'currency': 'USD', 'count': 0}
]
for expected in self.expected:
for x in range(random.randint(1, 4)):
# Amount doesn't matter for this stat since based off of price
# tier (USD normalized).
Contribution.objects.create(addon_id=self.app.pk,
user=self.user,
amount=random.randint(0, 10),
currency=expected['currency'],
price_tier=price_tier)
expected['count'] += Decimal(price_tier.price)
expected['count'] = int(expected['count'])
tasks.index_finance_total_by_currency([self.app.pk])
self.refresh(timesleep=1)
def test_basic_revenue(self):
stats = list(get_series_column(Contribution, addon=self.app.pk,
primary_field='revenue',
category_field='currency'))
for stat in stats:
stat['currency'] = stat['currency'].upper()
stat['count'] = int(stat['count'])
stats = sorted(stats, key=lambda stat: stat['currency'])
eq_(stats, self.expected)
def test_desc_order(self):
stats = list(get_series_column(Contribution, addon=self.app.pk,
primary_field='revenue',
category_field='currency'))
for stat in stats:
stat['count'] = int(stat['count'])
eq_(stats, sorted(stats, key=lambda stat: stat['count'], reverse=True))
class TestPadMissingStats(amo.tests.ESTestCase):
def test_basic(self):
days = [datetime.date(2012, 4, 29), datetime.date(2012, 5, 1),
datetime.date(2012, 5, 3), datetime.date(2012, 5, 5)]
expected_days = [datetime.date(2012, 4, 30), datetime.date(2012, 5, 2),
datetime.date(2012, 5, 4)]
dummies = pad_missing_stats(days, 'day')
days = [dummy['date'].date() for dummy in dummies]
for day in expected_days:
eq_(day in days, True)
def test_with_date_range(self):
date_range = (datetime.date(2012, 5, 1), datetime.date(2012, 5, 5))
days = [datetime.date(2012, 5, 3)]
expected_days = [datetime.date(2012, 5, 2), datetime.date(2012, 5, 4)]
dummies = pad_missing_stats(days, 'day', date_range=date_range)
days = [dummy['date'].date() for dummy in dummies]
for day in expected_days:
eq_(day in days, True)
def test_with_fields(self):
fields = ['test_field', 'fest_tield']
days = [datetime.date(2012, 5, 1), datetime.date(2012, 5, 3)]
dummies = pad_missing_stats(days, 'day', fields=fields)
for dummy in dummies:
for field in fields:
eq_(field in dummy, True)
def test_group_week(self):
days = [datetime.date(2012, 5, 1), datetime.date(2012, 5, 15)]
expected_days = [datetime.date(2012, 5, 8)]
dummies = pad_missing_stats(days, 'week')
days = [dummy['date'].date() for dummy in dummies]
for day in expected_days:
eq_(day in days, True)
def test_group_month(self):
days = [datetime.date(2012, 5, 1), datetime.date(2012, 7, 1)]
expected_days = [datetime.date(2012, 6, 1)]
dummies = pad_missing_stats(days, 'month')
days = [dummy['date'].date() for dummy in dummies]
for day in expected_days:
eq_(day in days, True)
class TestOverall(amo.tests.TestCase):
def setUp(self):
self.keys = ['apps_count_new', 'apps_count_installed',
'apps_review_count_new']
def test_url(self):
self.assert3xx(self.client.get(reverse('mkt.stats.overall')),
reverse('mkt.stats.apps_count_new'))
def get_url(self, name):
return (reverse('mkt.stats.%s' % name) +
'/%s-day-20090601-20090630.json' % name)
def test_stats(self):
for stat in self.keys:
GlobalStat.objects.create(name=stat, count=1,
date=datetime.date(2009, 06, 12))
for stat in self.keys:
res = self.client.get(self.get_url(stat))
content = json.loads(res.content)
eq_(content[0]['date'], '2009-06-12')
eq_(content[0]['count'], 1)
```
#### File: stats/tests/test_search.py
```python
from datetime import datetime
from nose.tools import eq_
import amo.tests
from mkt.constants import apps
from mkt.site.fixtures import fixture
from mkt.stats import search
from mkt.webapps.models import Installed
from users.models import UserProfile
class InstalledTests(amo.tests.TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.user = UserProfile.objects.get(username='regularuser')
self.first_app = amo.tests.app_factory(name='public',
app_slug='pub', type=1, status=4, public_stats=True)
self.second_app = amo.tests.app_factory(name='private',
app_slug='priv', type=1, status=4, public_stats=False)
def test_no_installs(self):
data = {'created': datetime.now(),
'addon': self.first_app.id}
result = search.get_installed_daily(data)
eq_(result['date'], data['created'].date())
eq_(result['addon'], data['addon'])
eq_(result['count'], 0)
def test_only_one_app(self):
Installed.objects.create(addon=self.first_app, user=self.user,
install_type=apps.INSTALL_TYPE_USER)
data = {'created': datetime.now(),
'addon': self.first_app.id}
result = search.get_installed_daily(data)
eq_(result['date'], data['created'].date())
eq_(result['addon'], data['addon'])
eq_(result['count'], 1)
def test_multiple_installs(self):
# Due to the unique together we use different install types to deal
# with that constraint.
Installed.objects.create(addon=self.first_app, user=self.user,
install_type=apps.INSTALL_TYPE_USER)
Installed.objects.create(addon=self.first_app, user=self.user,
install_type=apps.INSTALL_TYPE_DEVELOPER)
data = {'created': datetime.now(),
'addon': self.first_app.id}
result = search.get_installed_daily(data)
eq_(result['date'], data['created'].date())
eq_(result['addon'], data['addon'])
eq_(result['count'], 2)
def test_two_apps(self):
Installed.objects.create(addon=self.first_app, user=self.user,
install_type=apps.INSTALL_TYPE_USER)
Installed.objects.create(addon=self.second_app, user=self.user,
install_type=apps.INSTALL_TYPE_USER)
data = {'created': datetime.now(),
'addon': self.first_app.id}
result = search.get_installed_daily(data)
eq_(result['date'], data['created'].date())
eq_(result['addon'], data['addon'])
eq_(result['count'], 1)
```
#### File: submit/tests/test_api.py
```python
import base64
import json
from nose import SkipTest
from nose.tools import eq_
from mock import patch
import amo.tests
from addons.models import Addon, AddonUser
from files.models import FileUpload
from users.models import UserProfile
from mkt.api.tests.test_oauth import BaseOAuth
from mkt.api.tests.test_handlers import CreateHandler
from mkt.site.fixtures import fixture
from mkt.webapps.models import Webapp
class ValidationHandler(BaseOAuth):
fixtures = fixture('user_2519', 'user_admin')
def setUp(self):
super(ValidationHandler, self).setUp()
self.list_url = ('api_dispatch_list', {'resource_name': 'validation'})
self.get_url = None
self.user = UserProfile.objects.get(pk=2519)
def test_has_cors(self):
self.assertCORS(self.client.get(self.list_url), 'post')
def create(self):
res = self.client.post(self.list_url,
data=json.dumps({'manifest':
'http://foo.com'}))
self.get_url = ('api_dispatch_detail',
{'resource_name': 'validation',
'pk': json.loads(res.content)['id']})
return res
def get(self):
return FileUpload.objects.all()[0]
def get_error(self, response):
return json.loads(response.content)['error_message']
class TestAddValidationHandler(ValidationHandler):
def test_verbs(self):
self._allowed_verbs(self.list_url, ['post'])
def test_good(self):
res = self.create()
eq_(res.status_code, 201) # Note! This should be a 202.
content = json.loads(res.content)
eq_(content['processed'], True)
obj = FileUpload.objects.get(uuid=content['id'])
eq_(obj.user, self.user)
@patch('mkt.api.resources.tasks.fetch_manifest')
def test_fetch(self, fetch):
self.create()
assert fetch.called
def test_missing(self):
res = self.client.post(self.list_url, data=json.dumps({}))
eq_(res.status_code, 400)
eq_(self.get_error(res)['manifest'], ['This field is required.'])
def test_bad(self):
res = self.client.post(self.list_url,
data=json.dumps({'manifest': 'blurgh'}))
eq_(res.status_code, 400)
eq_(self.get_error(res)['manifest'], ['Enter a valid URL.'])
def test_anon(self):
res = self.anon.post(self.list_url,
data=json.dumps({'manifest':
'http://foo.com'}))
eq_(res.status_code, 201)
class TestPackagedValidation(amo.tests.AMOPaths, ValidationHandler):
def setUp(self):
super(TestPackagedValidation, self).setUp()
name = 'mozball.zip'
path = self.packaged_app_path(name)
self.file = base64.b64encode(open(path).read())
self.data = {'data': self.file, 'name': name,
'type': 'application/zip'}
def create(self):
res = self.client.post(self.list_url,
data=json.dumps({'upload': self.data}))
if res.status_code < 400:
self.get_url = ('api_dispatch_detail',
{'resource_name': 'validation',
'pk': json.loads(res.content)['id']})
return res
def test_good(self):
raise SkipTest('Caused zipfile IOErrors')
res = self.create()
eq_(res.status_code, 201) # Note! This should be a 202.
content = json.loads(res.content)
eq_(content['processed'], True)
obj = FileUpload.objects.get(uuid=content['id'])
eq_(obj.user, self.user)
@patch('mkt.constants.MAX_PACKAGED_APP_SIZE', 2)
def test_too_big(self):
res = self.create()
eq_(res.status_code, 413)
eq_(json.loads(res.content)['reason'],
'Packaged app too large for submission by this method. '
'Packages must be smaller than 2 bytes.')
def form_errors(self, data, errors):
self.data = data
res = self.create()
eq_(res.status_code, 400)
eq_(self.get_error(res)['upload'], errors)
def test_missing(self):
self.form_errors({'data': self.file, 'name': 'mozball.zip'},
[u'Type and data are required.'])
def test_missing_name(self):
self.form_errors({'data': self.file, 'type': 'application/zip'},
[u'Name not specified.'])
def test_wrong(self):
self.form_errors({'data': self.file, 'name': 'mozball.zip',
'type': 'application/foo'},
[u'Type must be application/zip.'])
def test_invalid(self):
self.form_errors({'data': 'x', 'name': 'mozball.zip',
'type': 'application/foo'},
[u'File must be base64 encoded.'])
class TestGetValidationHandler(ValidationHandler):
def create(self):
res = FileUpload.objects.create(user=self.user, path='http://foo.com')
self.get_url = ('api_dispatch_detail',
{'resource_name': 'validation', 'pk': res.pk})
return res
def test_verbs(self):
self.create()
self._allowed_verbs(self.get_url, ['get'])
def test_check(self):
self.create()
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
def test_anon(self):
self.create()
res = self.anon.get(self.get_url)
eq_(res.status_code, 200)
def test_not_found(self):
url = ('api_dispatch_detail',
{'resource_name': 'validation', 'pk': '123123123'})
res = self.client.get(url)
eq_(res.status_code, 404)
def test_not_run(self):
self.create()
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
eq_(json.loads(res.content)['processed'], False)
def test_pass(self):
obj = self.create()
obj.update(valid=True)
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['processed'], True)
eq_(data['valid'], True)
def test_failure(self):
obj = self.create()
error = '{"errors": 1, "messages": [{"tier": 1, "message": "nope"}]}'
obj.update(valid=False, validation=error)
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['processed'], True)
eq_(data['valid'], False)
class TestAppStatusHandler(CreateHandler, amo.tests.AMOPaths):
fixtures = fixture('user_2519', 'platform_all')
def setUp(self):
super(TestAppStatusHandler, self).setUp()
self.list_url = ('api_dispatch_list', {'resource_name': 'status'})
def create_app(self):
obj = self.create()
res = self.client.post(('api_dispatch_list', {'resource_name': 'app'}),
data=json.dumps({'manifest': obj.uuid}))
pk = json.loads(res.content)['id']
self.get_url = ('api_dispatch_detail',
{'resource_name': 'status', 'pk': pk})
return Webapp.objects.get(pk=pk)
def test_verbs(self):
self._allowed_verbs(self.list_url, [])
def test_has_no_cors(self):
res = self.client.get(self.list_url)
assert 'access-control-allow-origin' not in res
def test_status(self):
self.create_app()
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['disabled_by_user'], False)
eq_(data['status'], 'incomplete')
def test_disable(self):
app = self.create_app()
res = self.client.patch(self.get_url,
data=json.dumps({'disabled_by_user': True}))
eq_(res.status_code, 202, res.content)
app = app.__class__.objects.get(pk=app.pk)
eq_(app.disabled_by_user, True)
eq_(app.status, amo.STATUS_NULL)
def test_change_status_fails(self):
self.create_app()
res = self.client.patch(self.get_url,
data=json.dumps({'status': 'pending'}))
eq_(res.status_code, 400)
assert isinstance(self.get_error(res)['status'], list)
@patch('mkt.webapps.models.Webapp.is_complete')
def test_change_status_passes(self, is_complete):
is_complete.return_value = True, []
app = self.create_app()
res = self.client.patch(self.get_url,
data=json.dumps({'status': 'pending'}))
eq_(res.status_code, 202, res.content)
eq_(app.__class__.objects.get(pk=app.pk).status, amo.STATUS_PENDING)
@patch('mkt.webapps.models.Webapp.is_complete')
def test_cant_skip(self, is_complete):
is_complete.return_value = True, []
app = self.create_app()
res = self.client.patch(self.get_url,
data=json.dumps({'status': 'public'}))
eq_(res.status_code, 400)
assert 'available choices' in self.get_error(res)['status'][0]
eq_(Addon.objects.get(pk=app.pk).status, amo.STATUS_NULL)
def test_public_waiting(self):
app = self.create_app()
app.update(status=amo.STATUS_PUBLIC_WAITING)
res = self.client.patch(self.get_url,
data=json.dumps({'status': 'public'}))
eq_(res.status_code, 202)
eq_(app.__class__.objects.get(pk=app.pk).status, amo.STATUS_PUBLIC)
class TestPreviewHandler(BaseOAuth, amo.tests.AMOPaths):
fixtures = fixture('user_2519', 'webapp_337141')
def setUp(self):
super(TestPreviewHandler, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.user = UserProfile.objects.get(pk=2519)
AddonUser.objects.create(user=self.user, addon=self.app)
self.file = base64.b64encode(open(self.preview_image(), 'r').read())
self.list_url = ('api_dispatch_list', {'resource_name': 'preview'},
{'app': self.app.pk})
self.good = {'file': {'data': self.file, 'type': 'image/jpg'},
'position': 1}
def test_has_cors(self):
self.assertCORS(self.client.get(self.list_url), 'post')
def test_no_addon(self):
_list_url = ('api_dispatch_list', {'resource_name': 'preview'})
res = self.client.post(_list_url, data=json.dumps(self.good))
eq_(res.status_code, 404)
def test_post_preview(self):
res = self.client.post(self.list_url, data=json.dumps(self.good))
eq_(res.status_code, 201)
previews = self.app.previews
eq_(previews.count(), 1)
eq_(previews.all()[0].position, 1)
def test_wrong_url(self):
url = list(self.list_url)
url[-1]['app'] = 'booyah'
res = self.client.post(url, data=json.dumps(self.good))
eq_(res.status_code, 400)
eq_(self.get_error(res)['app'], [u'Enter a whole number.'])
def test_not_mine(self):
self.app.authors.clear()
res = self.client.post(self.list_url, data=json.dumps(self.good))
eq_(res.status_code, 403)
def test_position_missing(self):
data = {'file': {'data': self.file, 'type': 'image/jpg'}}
res = self.client.post(self.list_url, data=json.dumps(data))
eq_(res.status_code, 400)
eq_(self.get_error(res)['position'], ['This field is required.'])
def test_preview_missing(self):
res = self.client.post(self.list_url, data=json.dumps({}))
eq_(res.status_code, 400)
eq_(self.get_error(res)['position'], ['This field is required.'])
def create(self):
self.client.post(self.list_url, data=json.dumps(self.good))
self.preview = self.app.previews.all()[0]
self.get_url = ('api_dispatch_detail',
{'resource_name': 'preview', 'pk': self.preview.pk})
def test_delete(self):
self.create()
res = self.client.delete(self.get_url)
eq_(res.status_code, 204)
eq_(self.app.previews.count(), 0)
def test_delete_not_mine(self):
self.create()
self.app.authors.clear()
res = self.client.delete(self.get_url)
eq_(res.status_code, 403)
def test_delete_not_there(self):
self.get_url = ('api_dispatch_detail',
{'resource_name': 'preview', 'pk': 123})
res = self.client.delete(self.get_url)
eq_(res.status_code, 404)
def test_get(self):
self.create()
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
def test_get_not_mine(self):
self.create()
self.app.authors.clear()
res = self.client.get(self.get_url)
eq_(res.status_code, 403)
def test_get_not_there(self):
self.get_url = ('api_dispatch_detail',
{'resource_name': 'preview', 'pk': 123})
res = self.client.get(self.get_url)
eq_(res.status_code, 404)
```
#### File: mkt/webapps/cron.py
```python
import datetime
import os
import shutil
import stat
import time
from django.conf import settings
from django.db.models import Count
import commonware.log
import cronjobs
from celery.task.sets import TaskSet
from lib.es.utils import raise_if_reindex_in_progress
import amo
from amo.utils import chunked
from .models import Installed, Webapp
from .tasks import update_trending, webapp_update_weekly_downloads
log = commonware.log.getLogger('z.cron')
@cronjobs.register
def update_weekly_downloads():
"""Update the weekly "downloads" from the users_install table."""
raise_if_reindex_in_progress()
interval = datetime.datetime.today() - datetime.timedelta(days=7)
counts = (Installed.objects.values('addon')
.filter(created__gte=interval,
addon__type=amo.ADDON_WEBAPP)
.annotate(count=Count('addon')))
ts = [webapp_update_weekly_downloads.subtask(args=[chunk])
for chunk in chunked(counts, 1000)]
TaskSet(ts).apply_async()
@cronjobs.register
def clean_old_signed(seconds=60 * 60):
"""Clean out apps signed for reviewers."""
log.info('Removing old apps signed for reviewers')
root = settings.SIGNED_APPS_REVIEWER_PATH
for path in os.listdir(root):
full = os.path.join(root, path)
age = time.time() - os.stat(full)[stat.ST_ATIME]
if age > seconds:
log.debug('Removing signed app: %s, %dsecs old.' % (full, age))
shutil.rmtree(full)
@cronjobs.register
def update_app_trending():
"""
Update trending for all apps.
In testing on the server, each calculation takes about 2.5s. A chunk size
of 50 means each task will take about 2 minutes.
"""
chunk_size = 50
all_ids = list(Webapp.objects.values_list('id', flat=True))
for ids in chunked(all_ids, chunk_size):
update_trending.delay(ids)
```
#### File: webpay/tests/test_resources.py
```python
import json
from decimal import Decimal
import jwt
from django.core import mail
from mock import patch
from nose.tools import eq_, ok_
from amo import CONTRIB_PENDING, CONTRIB_PURCHASE
from amo.tests import TestCase
from amo.urlresolvers import reverse
from constants.payments import PROVIDER_BANGO
from market.models import Price, PriceCurrency
from users.models import UserProfile, GroupUser
from mkt.api.base import get_url, list_url
from mkt.api.tests.test_oauth import BaseOAuth
from mkt.constants import regions
from mkt.purchase.tests.utils import PurchaseTest
from mkt.site.fixtures import fixture
from mkt.webpay.models import ProductIcon
from stats.models import Contribution
class TestPrepare(PurchaseTest, BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519', 'prices')
def setUp(self):
BaseOAuth.setUp(self, api_name='webpay')
self.create_switch('marketplace')
self.list_url = list_url('prepare')
self.user = UserProfile.objects.get(pk=2519)
def test_allowed(self):
self._allowed_verbs(self.list_url, ['post'])
def test_anon(self):
eq_(self.anon.post(self.list_url, data={}).status_code, 401)
def test_good(self):
self.setup_base()
self.setup_package()
res = self.client.post(self.list_url, data=json.dumps({'app': 337141}))
contribution = Contribution.objects.get()
eq_(res.status_code, 201)
eq_(res.json['contribStatusURL'], reverse('api_dispatch_detail',
kwargs={'api_name': 'webpay', 'resource_name': 'status',
'uuid': contribution.uuid}))
ok_(res.json['webpayJWT'])
@patch('mkt.webapps.models.Webapp.has_purchased')
def test_already_purchased(self, has_purchased):
has_purchased.return_value = True
self.setup_base()
self.setup_package()
res = self.client.post(self.list_url, data=json.dumps({'app': 337141}))
eq_(res.status_code, 409)
eq_(res.content, '{"reason": "Already purchased app."}')
def _post(self):
return self.client.post(self.list_url,
data=json.dumps({'app': 337141}))
def test_waffle_fallback(self):
self.setup_base()
self.setup_package()
flag = self.create_flag('override-app-purchase', everyone=None)
flag.users.add(self.user.user)
with self.settings(PURCHASE_LIMITED=True):
eq_(self._post().status_code, 201)
class TestStatus(BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
super(TestStatus, self).setUp(api_name='webpay')
self.contribution = Contribution.objects.create(
addon_id=337141, user_id=2519, type=CONTRIB_PURCHASE,
uuid='some:uid')
self.get_url = ('api_dispatch_detail', {
'api_name': 'webpay', 'resource_name': 'status',
'uuid': self.contribution.uuid})
def test_allowed(self):
self._allowed_verbs(self.get_url, ['get'])
def test_get(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
eq_(res.json['status'], 'complete')
def test_no_contribution(self):
self.contribution.delete()
res = self.client.get(self.get_url)
eq_(res.status_code, 200, res.content)
eq_(res.json['status'], 'incomplete', res.content)
def test_incomplete(self):
self.contribution.update(type=CONTRIB_PENDING)
res = self.client.get(self.get_url)
eq_(res.status_code, 200, res.content)
eq_(res.json['status'], 'incomplete', res.content)
def test_no_purchase(self):
self.contribution.addon.addonpurchase_set.get().delete()
res = self.client.get(self.get_url)
eq_(res.status_code, 200, res.content)
eq_(res.json['status'], 'incomplete', res.content)
class TestPrices(BaseOAuth):
def make_currency(self, amount, tier, currency, region):
return PriceCurrency.objects.create(price=Decimal(amount), tier=tier,
currency=currency, provider=PROVIDER_BANGO, region=region.id)
def setUp(self):
super(TestPrices, self).setUp(api_name='webpay')
self.price = Price.objects.create(name='1', price=Decimal(1))
self.currency = self.make_currency(3, self.price, 'DE', regions.DE)
self.us_currency = self.make_currency(3, self.price, 'USD', regions.US)
self.list_url = list_url('prices')
self.get_url = get_url('prices', self.price.pk)
# If regions change, this will blow up.
assert regions.BR.default_currency == 'BRL'
def get_currencies(self, data):
return [p['currency'] for p in data['prices']]
def test_list_allowed(self):
self._allowed_verbs(self.list_url, ['get'])
self._allowed_verbs(self.get_url, ['get'])
def test_single(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
eq_(res.json['pricePoint'], '1')
eq_(res.json['name'], 'Tier 1')
# Ensure that price is in the JSON since solitude depends upon it.
eq_(res.json['price'], '1.00')
def test_price_point(self):
res = self.client.get(self.list_url + ({'pricePoint': '1'},))
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['meta']['total_count'], 1)
eq_(data['objects'][0]['pricePoint'], '1')
def test_list(self):
res = self.client.get(self.list_url)
eq_(res.json['meta']['total_count'], 1)
self.assertSetEqual(self.get_currencies(res.json['objects'][0]),
['USD', 'DE'])
def test_list_filtered(self):
self.currency.update(provider=0)
res = self.client.get(self.list_url + ({'provider': 'bango'},))
eq_(self.get_currencies(res.json['objects'][0]), ['USD'])
def test_prices(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
self.assertSetEqual(self.get_currencies(res.json), ['USD', 'DE'])
def test_prices_filtered(self):
self.currency.update(provider=0)
res = self.client.get(self.get_url + ({'provider': 'bango'},))
eq_(res.status_code, 200)
self.assertSetEqual(self.get_currencies(res.json), ['USD'])
def test_has_cors(self):
self.assertCORS(self.client.get(self.get_url), 'get')
@patch('mkt.webpay.resources.PriceResource.dehydrate_prices')
def test_other_cors(self, prices):
prices.side_effect = ValueError
res = self.client.get(self.get_url)
eq_(res.status_code, 500)
self.assertCORS(res, 'get')
def test_locale(self):
self.make_currency(5, self.price, 'BRL', regions.BR)
res = self.client.get(self.get_url, HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(res.status_code, 200)
eq_(res.json['localized']['locale'], 'R$5,00')
def test_locale_list(self):
# Check that for each price tier a different localisation is
# returned.
self.make_currency(2, self.price, 'BRL', regions.BR)
price_two = Price.objects.create(name='2', price=Decimal(1))
self.make_currency(12, price_two, 'BRL', regions.BR)
res = self.client.get(self.list_url, HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(res.status_code, 200)
eq_(res.json['objects'][0]['localized']['locale'], 'R$2,00')
eq_(res.json['objects'][1]['localized']['locale'], 'R$12,00')
def test_no_locale(self):
# This results in a region of BR and a currency of BRL. But there
# isn't a price tier for that currency. So we don't know what to show.
res = self.client.get(self.get_url, HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(res.status_code, 200)
eq_(res.json['localized'], {})
class TestNotification(BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
super(TestNotification, self).setUp(api_name='webpay')
self.grant_permission(self.profile, 'Transaction:NotifyFailure')
self.contribution = Contribution.objects.create(addon_id=337141,
uuid='sample:uuid')
self.list_url = ('api_dispatch_list', {'resource_name': 'failure'})
self.get_url = ['api_dispatch_detail',
{'resource_name': 'failure',
'pk': self.contribution.pk}]
def test_list_allowed(self):
self._allowed_verbs(self.get_url, ['patch'])
def test_notify(self):
url = 'https://someserver.com'
res = self.client.patch(self.get_url,
data=json.dumps({'url': url, 'attempts': 5}))
eq_(res.status_code, 202)
eq_(len(mail.outbox), 1)
msg = mail.outbox[0]
assert url in msg.body
eq_(msg.recipients(), [u'<EMAIL>'])
def test_no_permission(self):
GroupUser.objects.filter(user=self.profile).delete()
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 401)
def test_missing(self):
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 400)
def test_not_there(self):
self.get_url[1]['pk'] += 1
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 404)
def test_no_uuid(self):
self.contribution.update(uuid=None)
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 404)
class TestProductIconResource(BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
super(TestProductIconResource, self).setUp(api_name='webpay')
self.list_url = list_url('product/icon')
p = patch('mkt.webpay.resources.tasks.fetch_product_icon')
self.fetch_product_icon = p.start()
self.addCleanup(p.stop)
self.data = {
'ext_size': 128,
'ext_url': 'http://someappnoreally.com/icons/icon_128.png',
'size': 64
}
def post(self, data, with_perms=True):
if with_perms:
self.grant_permission(self.profile, 'ProductIcon:Create')
return self.client.post(self.list_url, data=json.dumps(data))
def test_list_allowed(self):
self._allowed_verbs(self.list_url, ['get', 'post'])
def test_missing_fields(self):
res = self.post({'ext_size': 1})
eq_(res.status_code, 400)
def test_post(self):
res = self.post(self.data)
eq_(res.status_code, 202)
self.fetch_product_icon.delay.assert_called_with(self.data['ext_url'],
self.data['ext_size'],
self.data['size'])
def test_post_without_perms(self):
res = self.post(self.data, with_perms=False)
eq_(res.status_code, 401)
def test_anon_get(self):
data = {
'ext_size': 128,
'ext_url': 'http://someappnoreally.com/icons/icon_128.png',
'size': 64,
'format': 'png'
}
icon = ProductIcon.objects.create(**data)
# We don't need to filter by these:
data.pop('size')
data.pop('format')
res = self.anon.get(self.list_url, data=data)
eq_(res.status_code, 200)
ob = json.loads(res.content)['objects'][0]
eq_(ob['url'], icon.url())
class TestSigCheck(TestCase):
def test(self):
key = 'marketplace'
aud = 'webpay'
secret = 'third door on the right'
with self.settings(APP_PURCHASE_SECRET=secret,
APP_PURCHASE_KEY=key,
APP_PURCHASE_AUD=aud):
res = self.client.post(reverse('webpay.sig_check'))
eq_(res.status_code, 201, res)
data = json.loads(res.content)
req = jwt.decode(data['sig_check_jwt'].encode('ascii'), secret)
eq_(req['iss'], key)
eq_(req['aud'], aud)
eq_(req['typ'], 'mozilla/payments/sigcheck/v1')
```
#### File: zadmin/tests/test_views.py
```python
import json
from django.conf import settings
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.urlresolvers import reverse
from editors.models import RereviewQueue
from users.models import UserProfile
class TestGenerateError(amo.tests.TestCase):
fixtures = ['base/users']
def setUp(self):
self.client.login(username='<EMAIL>', password='password')
metlog = settings.METLOG
METLOG_CONF = {
'logger': 'zamboni',
'plugins': {'cef': ('metlog_cef.cef_plugin:config_plugin',
{'override': True})},
'sender': {'class': 'metlog.senders.DebugCaptureSender'},
}
from metlog.config import client_from_dict_config
self.metlog = client_from_dict_config(METLOG_CONF, metlog)
self.metlog.sender.msgs.clear()
def test_metlog_statsd(self):
self.url = reverse('zadmin.generate-error')
self.client.post(self.url,
{'error': 'metlog_statsd'})
eq_(len(self.metlog.sender.msgs), 1)
msg = json.loads(self.metlog.sender.msgs[0])
eq_(msg['severity'], 6)
eq_(msg['logger'], 'zamboni')
eq_(msg['payload'], '1')
eq_(msg['type'], 'counter')
eq_(msg['fields']['rate'], 1.0)
eq_(msg['fields']['name'], 'z.zadmin')
def test_metlog_json(self):
self.url = reverse('zadmin.generate-error')
self.client.post(self.url,
{'error': 'metlog_json'})
eq_(len(self.metlog.sender.msgs), 1)
msg = json.loads(self.metlog.sender.msgs[0])
eq_(msg['type'], 'metlog_json')
eq_(msg['logger'], 'zamboni')
eq_(msg['fields']['foo'], 'bar')
eq_(msg['fields']['secret'], 42)
def test_metlog_cef(self):
self.url = reverse('zadmin.generate-error')
self.client.post(self.url,
{'error': 'metlog_cef'})
eq_(len(self.metlog.sender.msgs), 1)
msg = json.loads(self.metlog.sender.msgs[0])
eq_(msg['type'], 'cef')
eq_(msg['logger'], 'zamboni')
def test_metlog_sentry(self):
self.url = reverse('zadmin.generate-error')
self.client.post(self.url,
{'error': 'metlog_sentry'})
msgs = [json.loads(m) for m in self.metlog.sender.msgs]
eq_(len(msgs), 1)
msg = msgs[0]
eq_(msg['type'], 'sentry')
class TestAddonAdmin(amo.tests.TestCase):
fixtures = ['base/users', 'base/337141-steamcube', 'base/addon_3615']
def setUp(self):
self.login('<EMAIL>')
self.url = reverse('admin:addons_addon_changelist')
def test_no_webapps(self):
res = self.client.get(self.url, follow=True)
eq_(res.status_code, 200)
doc = pq(res.content)
rows = doc('#result_list tbody tr')
eq_(rows.length, 1)
eq_(rows.find('a').attr('href'), '337141/')
class TestManifestRevalidation(amo.tests.TestCase):
fixtures = ['webapps/337141-steamcube', 'base/users']
def setUp(self):
self.url = reverse('zadmin.manifest_revalidation')
def _test_revalidation(self):
current_count = RereviewQueue.objects.count()
response = self.client.post(self.url)
eq_(response.status_code, 200)
self.assertTrue('Manifest revalidation queued' in response.content)
eq_(RereviewQueue.objects.count(), current_count + 1)
def test_revalidation_by_reviewers(self):
# Sr Reviewers users should be able to use the feature.
user = UserProfile.objects.get(email='<EMAIL>')
self.grant_permission(user, 'ReviewerAdminTools:View')
assert self.client.login(username='<EMAIL>',
password='password')
self._test_revalidation()
def test_revalidation_by_admin(self):
# Admin users should be able to use the feature.
assert self.client.login(username='<EMAIL>',
password='password')
self._test_revalidation()
def test_unpriviliged_user(self):
# Unprivileged user should not be able to reach the feature.
assert self.client.login(username='<EMAIL>',
password='password')
eq_(self.client.post(self.url).status_code, 403)
``` |
{
"source": "joergeschmann/counselor",
"score": 2
} |
#### File: counselor/endpoint/check_endpoint.py
```python
from typing import List
from counselor.endpoint.common import Response
from counselor.endpoint.http_endpoint import HttpEndpoint, EndpointConfig
class CheckEndpoint(HttpEndpoint):
"""
At the moment checks are implemented as periodic http requests with our own watcher implementation.
The next step is to involve the consul agent.
TODO: implement
"""
def __init__(self, endpoint_config: EndpointConfig, url_parts: List[str]):
if url_parts is None:
url_parts = ["agent", "check"]
super().__init__(endpoint_config, url_parts)
def register(self, name, script=None, check_id=None, interval=None, ttl=None, notes=None, http=None):
response = self.put_response(url_parts=['register'], query=None, payload={
'ID': check_id,
'Name': name,
'Notes': notes,
'Script': script,
'HTTP': http,
'Interval': interval,
'TTL': ttl
})
return Response.create_from_http_response(response)
def deregister(self, check_id):
response = self.put_response(url_parts=['deregister', check_id])
return Response.create_from_http_response(response)
def ttl_pass(self, check_id):
response = self.put_response(url_parts=['pass', check_id])
return Response.create_from_http_response(response)
def ttl_warn(self, check_id):
response = self.put_response(url_parts=['warn', check_id])
return Response.create_from_http_response(response)
def ttl_fail(self, check_id):
response = self.put_response(url_parts=['fail', check_id])
return Response.create_from_http_response(response)
```
#### File: counselor/endpoint/kv_endpoint.py
```python
import logging
from typing import List
from counselor.endpoint.common import Response
from counselor.endpoint.decoder import JsonDecoder, ConsulKVDecoder, ConsulKVListDecoder
from counselor.endpoint.entity import ConsulKeyValue
from counselor.endpoint.http_client import HttpResponse
from counselor.endpoint.http_endpoint import HttpEndpoint, EndpointConfig
LOGGER = logging.getLogger(__name__)
class KVPath:
def __init__(self, project: str, domain: str, service: str, detail: str = "config", env: str = "dev"):
self.project = project
self.domain = domain
self.service = service
self.detail = detail
self.env = env
@staticmethod
def parse_from_path(path: str) -> 'KVPath':
splitted_path = path.split('/')
if len(splitted_path) != 5:
raise ValueError("Path should have 5 parts")
return KVPath(project=splitted_path[0],
domain=splitted_path[2],
service=splitted_path[3],
detail=splitted_path[4],
env=splitted_path[1])
def compose_path(self) -> str:
return "{}/{}/{}/{}/{}".format(self.project, self.env, self.domain, self.service, self.detail)
class KVEndpoint(HttpEndpoint):
"""Key value store interface to consul. This class is meant to store dicts as values.
TODO: use StatusResponse as returned value
"""
def __init__(self, endpoint_config: EndpointConfig, url_parts: List[str]):
if url_parts is None:
url_parts = ["kv"]
super().__init__(endpoint_config, url_parts)
def get_raw(self, path) -> (Response, dict):
"""Return the raw config as dict, without the Consul specific fields."""
query_params = {'raw': True}
response = self._get(path=path, query_params=query_params)
endpoint_response = Response.create_from_http_response(response)
if not endpoint_response.successful:
return endpoint_response, None
decoder = JsonDecoder()
result = decoder.decode(response.payload)
if not decoder.successful:
endpoint_response.update_by_decode_result(decoder)
return endpoint_response, result
def get(self, path) -> (Response, ConsulKeyValue):
"""Get a value.
Raw means without the Consul metadata like CreateIndex and ModifyIndex.
"""
response = self._get(path=path)
endpoint_response = Response.create_from_http_response(response)
if not endpoint_response.successful:
return endpoint_response, None
decoder = ConsulKVDecoder()
consul_kv = decoder.decode(response.payload)
if not decoder.successful:
endpoint_response.update_by_decode_result(decoder)
return endpoint_response, consul_kv
def get_recursive(self, path) -> (Response, List[ConsulKeyValue]):
"""Return an array of all the entries from the path downwards"""
query_params = {'recurse': True}
response = self._get(path=path, query_params=query_params)
endpoint_response = Response.create_from_http_response(response)
if not endpoint_response.successful:
return endpoint_response, None
decoder = ConsulKVListDecoder()
result_list = decoder.decode(response.payload)
if not decoder.successful:
endpoint_response.update_by_decode_result(decoder)
return endpoint_response, result_list
def _get(self, path: str, query_params=None) -> HttpResponse:
if path is None or path == "":
return HttpResponse(status_code=500, body="Path can not be empty", headers=None)
if query_params is None:
query_params = {}
path = path.lstrip('/')
return self.get_response(url_parts=[path], query=query_params)
def set(self, path: str, value, flags=None) -> Response:
"""Set a value.
"""
path = path.rstrip('/')
query_params = {}
if flags is not None:
query_params['flags'] = flags
response = self.put_response(url_parts=[path], query=query_params, payload=value)
return Response.create_from_http_response(response)
def merge(self, path: str, updates: dict) -> Response:
"""Try to fetch an existing config. If successful, overwrite the values with the updates.
Otherwise assume that there is no config yet and try to store it."""
response, config = self.get_raw(path)
if not response.successful:
return self.set(path, updates)
if not isinstance(config, dict):
return Response.create_error_result_with_message_only("Current config is not a dict")
for key in updates.keys():
config[key] = updates[key]
return self.set(path, config)
def delete(self, path, recurse=False) -> Response:
"""Remove an item.
"""
query_params = {'recurse': True} if recurse else {}
response = self.delete_response(url_parts=[path], query=query_params)
return Response.create_from_http_response(response)
def acquire_lock(self, path, session) -> Response:
"""Set a lock.
"""
response = self.put_response(url_parts=[path], query=None, payload={'acquire': session})
return Response.create_from_http_response(response)
def release_lock(self, path, session) -> Response:
"""Release a lock.
"""
response = self.put_response(url_parts=[path], query=None, payload={'release': session})
return Response.create_from_http_response(response)
```
#### File: counselor/endpoint/service_endpoint.py
```python
import logging
from typing import List
from counselor.endpoint.common import Response
from counselor.endpoint.decoder import ServiceDefinitionDecoder, ServiceDefinitionListDecoder
from counselor.endpoint.encoder import Encoder
from counselor.endpoint.entity import ServiceDefinition
from counselor.endpoint.http_endpoint import HttpEndpoint, EndpointConfig
LOGGER = logging.getLogger(__name__)
class ServiceEndpoint(HttpEndpoint):
"""
Service endpoint for Consul.
It uses the the agent service endpoint, which is easier than the low level service endpoint.
"""
def __init__(self, endpoint_config: EndpointConfig, url_parts: List[str] = None):
if url_parts is None:
url_parts = ["agent"]
super().__init__(endpoint_config, url_parts)
def search(self, query: List[tuple] = None) -> (Response, List[ServiceDefinition]):
"""Return all the services that are registered with the local agent.
"""
response = self.get_response(url_parts=['services'], query=query)
endpoint_response = Response.create_from_http_response(response)
if not endpoint_response.successful:
return endpoint_response, None
decoder = ServiceDefinitionListDecoder()
found_services = decoder.decode(response.payload)
if not decoder.successful:
return endpoint_response.update_by_decode_result(found_services), None
return endpoint_response, found_services
def register(self, service_definition: ServiceDefinition) -> Response:
"""Register a service.
"""
service_definition.validate()
payload = Encoder.service_definition_to_consul_dict(service_definition)
response = self.put_response(url_parts=['service', 'register'], query=None, payload=payload)
return Response.create_from_http_response(response)
def get_details(self, service_key) -> (Response, ServiceDefinition):
"""Get the details of the service.
"""
response = self.get_response(url_parts=['service', service_key])
endpoint_response = Response.create_from_http_response(response)
if not endpoint_response.successful:
return endpoint_response, None
decoder = ServiceDefinitionDecoder()
service_definition = decoder.decode(response.payload)
if not decoder.successful:
endpoint_response.update_by_decode_result(decoder), None
return endpoint_response, service_definition
def update(self, service_definition: ServiceDefinition) -> Response:
"""Update is the same as registering - the values are simply overwritten
"""
return self.register(service_definition)
def deregister(self, service_key) -> Response:
"""Deregister a service.
"""
response = self.put_response(url_parts=['service', 'deregister', service_key])
return Response.create_from_http_response(response)
```
#### File: counselor/counselor/trigger.py
```python
import logging
from threading import Thread, Event
LOGGER = logging.getLogger(__name__)
class Trigger(Thread):
"""Periodically execute registered tasks.
"""
def __init__(self):
Thread.__init__(self)
self.tasks = []
self.running = False
def clear(self):
self.stop_tasks()
self.tasks.clear()
def add_task(self, task: Thread):
self.tasks.append(task)
def run_nonblocking(self):
self.run()
self.running = True
def start_blocking(self, close_event: Event):
self.run_nonblocking()
close_event.wait()
self.stop_tasks()
def get_number_of_active_tasks(self) -> int:
active = 0
for t in self.tasks:
if t.is_alive():
active += 1
return active
def run(self):
LOGGER.info("Starting tasks...")
for t in self.tasks:
t.start()
LOGGER.info("Active task {}".format(t.name))
LOGGER.info("Trigger is active.")
def stop_tasks(self):
for t in self.tasks:
LOGGER.info("Stopping task {}".format(t.name))
t.stop()
self.running = False
LOGGER.info("Trigger exited.")
```
#### File: counselor/counselor/watcher.py
```python
import logging
import time
from datetime import timedelta
from threading import Event, Thread
LOGGER = logging.getLogger(__name__)
class Task(Thread):
"""Base class to represent a Task that is executed by a trigger.
"""
def __init__(self, name: str, interval: timedelta, stop_event: Event, log_interval_seconds=3 * 60 * 60,
daemon=True):
Thread.__init__(self, daemon=daemon)
self.interval = interval
self.stop_event = stop_event
self.name = name
self.last_log_time = 0
self.info_log_interval_seconds = log_interval_seconds
def log_with_interval(self, message):
current_timestamp = int(time.time())
if (current_timestamp - self.last_log_time) > self.info_log_interval_seconds:
LOGGER.info(message)
self.last_log_time = current_timestamp
def get_name(self):
"""Return a unique name to identify the task
"""
return self.name
def check(self):
"""Method to implement the check that is periodically executed
"""
pass
def stop(self):
self.stop_event.set()
self.join()
def run(self):
while not self.stop_event.wait(self.interval.total_seconds()):
self.check()
```
#### File: tests/manual/kv_endpoint_test.py
```python
import logging
import unittest
from counselor import client
from counselor.endpoint.http_endpoint import EndpointConfig
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
class KeyValueTests(unittest.TestCase):
def setUp(self):
LOGGER.info("Setting up")
self.test_key_prefix = "test"
self.consul_config = EndpointConfig(token="")
self.consul = client.ConsulClient(config=self.consul_config)
def tearDown(self):
LOGGER.info("Cleaning up")
response = self.consul.kv.delete(self.test_key_prefix, recurse=True)
if not response.successful:
LOGGER.info("Cound not delete key: {}".format(response.as_string()))
def test_kv_raw_entry(self):
test_config = {
"key": "value",
"active": True,
"pairs": ["btc", "etc", "ren"],
"strategy": {
"goal": 42,
"risk": 3.1415,
}
}
key = self.test_key_prefix + "/raw"
set_response = self.consul.kv.set(key, test_config)
self.assertTrue(set_response.successful)
get_response, found_entry = self.consul.kv.get_raw(key)
self.assertTrue(get_response.successful)
self.assertEqual(test_config, found_entry, "Configs do not match")
def test_kv_consul_entry(self):
test_config = {
"key": "value",
"active": True,
"pairs": ["btc", "etc", "ren"],
"strategy": {
"goal": 42,
"risk": 3.1415,
}
}
key = self.test_key_prefix + "/config"
set_response = self.consul.kv.set(key, test_config)
self.assertTrue(set_response.successful)
updates = {"active": False, "strategy": {"goal": "none"}}
merge_response = self.consul.kv.merge(key, updates)
self.assertTrue(merge_response)
get_response, found_entry = self.consul.kv.get_raw(key)
self.assertTrue(get_response.successful)
self.assertNotEqual(test_config, found_entry, "Configs should diverge")
self.assertEqual(test_config.get("pairs"), found_entry.get("pairs"))
self.assertEqual(updates.get("active"), found_entry.get("active"))
def test_recursive_kv(self):
service_config_path = self.test_key_prefix + "/service"
service_config = {
"env": "test",
"pairs": ["btc", "etc", "ren"],
"strategy": {
"goal": 42,
"risk": 3.1415,
}
}
response = self.consul.kv.set(service_config_path, service_config)
self.assertTrue(response.successful, response.as_string())
s1_config_path = service_config_path + "/s1"
s1_config = {
"name": "service-1",
"current_rate": 1.8
}
response = self.consul.kv.set(s1_config_path, s1_config)
self.assertTrue(response.successful, response.as_string())
s2_config_path = service_config_path + "/s2"
s2_config = {
"name": "service-2",
"current_rate": 1.4
}
response = self.consul.kv.set(s2_config_path, s2_config)
self.assertTrue(response.successful, response.as_string())
response, kv_list = self.consul.kv.get_recursive(service_config_path)
self.assertTrue(response.successful)
self.assertEqual(3, len(kv_list))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/manual/kv_watcher_test.py
```python
import logging
import unittest
from datetime import timedelta
from threading import Event
from counselor.client import ConsulClient
from counselor.endpoint.http_endpoint import EndpointConfig
from counselor.endpoint.kv_endpoint import KVPath
from counselor.kv_watcher import ConfigUpdateListener, KVWatcherTask
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
class TestListener(ConfigUpdateListener):
def __init__(self, kv_path: KVPath):
self.kv_path = kv_path
self.initialized = False
self.updated = False
self.current_config = None
def get_path(self) -> str:
return self.kv_path.compose_path()
def on_init(self, config: dict) -> bool:
self.current_config = config
self.initialized = True
return True
def on_update(self, new_config: dict) -> bool:
self.current_config = new_config
self.updated = True
return True
class KVWatcherTests(unittest.TestCase):
def setUp(self):
LOGGER.info("Setting up")
self.consul_config = EndpointConfig()
self.consul_client = ConsulClient(config=self.consul_config)
self.kv_config_path = KVPath(project="project", domain="feature", service="service",
detail="config", env="dev")
def tearDown(self):
LOGGER.info("Cleaning up")
response = self.consul_client.kv.delete(self.kv_config_path.compose_path(), recurse=True)
if not response.successful:
LOGGER.info("Cound not delete key: {}".format(response.as_string()))
def test_config_listener(self):
test_config = {
"a": 1,
"b": "x",
"c": True
}
set_response = self.consul_client.kv.merge(self.kv_config_path.compose_path(), test_config)
self.assertTrue(set_response.successful)
test_listener = TestListener(self.kv_config_path)
interval = timedelta(seconds=1)
stop_event = Event()
watcher_task = KVWatcherTask(test_listener, self.consul_client, interval, stop_event)
watcher_task.check()
self.assertTrue(test_listener.initialized)
self.assertFalse(test_listener.updated)
self.assertEqual(test_config, test_listener.current_config)
self.assertTrue(watcher_task.last_modify_index > 0)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joergfranke/recnet",
"score": 3
} |
#### File: examples/numbers_recognition/make_data_set.py
```python
import os
import klepto
import numpy as np
numbers_ = [
[
[0,1,1,0],
[1,0,0,1],
[1,0,0,1],
[0,1,1,0],
],
[
[1,1,0],
[0,1,0],
[0,1,0],
[1,1,1],
],
[
[0,1,1,0],
[1,0,0,1],
[0,0,1,0],
[0,1,0,0],
[1,1,1,1],
],
[
[0,1,1,0],
[1,0,0,1],
[0,0,1,0],
[1,0,0,1],
[0,1,1,0],
],
[
[0,0,1,0,0,1],
[0,1,0,0,1,0],
[1,1,1,1,0,0],
[0,0,1,0,0,0],
[0,1,0,0,0,0],
],
[
[1,1,1],
[1,0,0],
[1,1,1],
[0,0,1],
[1,0,1],
[1,1,1],
],
[
[0,1,1,0],
[1,0,0,0],
[1,1,1,0],
[1,0,0,1],
[0,1,1,0],
],
[
[1,1,1,1,1],
[0,0,0,1,0],
[0,0,1,0,0],
[0,1,1,1,0],
[0,0,1,0,0],
[0,0,1,0,0],
],
[
[0,1,1,0],
[1,0,0,1],
[0,1,1,0],
[1,0,0,1],
[0,1,1,0],
],
[
[0,1,1,0],
[1,0,0,1],
[0,1,1,1],
[0,0,0,1],
[0,0,1,0],
]
]
numbers = [np.asarray(num) for num in numbers_]
class Alphabet():
def __init__(self, chars=None):
self.bitmaps = [np.asarray(num) for num in numbers_] #bitmaps
self.n = len(self.bitmaps)
self.chars = chars if chars else [str(i) for i in range(len(self.bitmaps))]
self.maxHt = max([bitmap.shape[0] for bitmap in self.bitmaps])
self.maxWd = max([bitmap.shape[1] for bitmap in self.bitmaps])
def get_char(self, index=None):
if index is None:
index = np.random.choice(self.n)
bitmap = self.bitmaps[index]
char = self.chars[index]
return index, char, bitmap
def __str__(self):
ret = ''
for c, b in zip(self.chars, self.bitmaps):
slab = '\n'.join((''.join('# '[p] for p in r) for r in b))
ret += '\n{}:\n{}'.format(c, slab)
return ret
class Scribe():
def __init__(self, parameter):
self.alphabet = Alphabet() #parameter["alphabet"]
self.noise = parameter["noise"]
self.hbuffer = parameter["hbuffer"]
self.vbuffer = parameter["vbuffer"]
self.avg_len = parameter["avg_seq_len"]
self.varying_len = parameter["varying_len"]
self.nchars_per_sample = parameter["nchars_per_sample"]
self.nDims = self.alphabet.maxHt + self.vbuffer
self.shuffled_char_indices = np.arange(self.alphabet.n)
self.shuffled_char_pointer = 0
self.nClasses = len(self.alphabet.chars)
self.len_range = (3*self.avg_len//4, 5*self.avg_len//4)
def get_sample_of_random_len(self):
length = np.random.randint(*self.len_range)
return self.get_sample_of_len(length)
def get_next_char(self):
if self.shuffled_char_pointer == len(self.shuffled_char_indices):
np.random.shuffle(self.shuffled_char_indices)
self.shuffled_char_pointer = 0
self.shuffled_char_pointer += 1
return self.alphabet.get_char(self.shuffled_char_indices[
self.shuffled_char_pointer-1])
def get_sample_of_len(self, length):
image = np.zeros((self.nDims, length), dtype=float)
labels = []
at_wd = np.random.exponential(self.hbuffer) + self.hbuffer
while at_wd < length - self.hbuffer - self.alphabet.maxWd:
index, char, bitmap = self.get_next_char()
ht, wd = bitmap.shape
at_ht = np.random.randint(self.vbuffer + self.alphabet.maxHt - ht + 1)
image[at_ht:at_ht+ht, at_wd:at_wd+wd] += bitmap
at_wd += wd + np.random.randint(self.hbuffer)
labels.append(index)
image += self.noise * np.random.normal(size=image.shape,)
image = np.clip(image, 0, 1)
return image, labels
def get_sample_of_n_chars(self, n):
gaps = np.random.randint(self.hbuffer, size=n+1)
labels_bitmaps = [self.get_next_char() for _ in range(n)]
labels, _, bitmaps = zip(*labels_bitmaps)
length = sum(gaps) + sum(b.shape[1] for b in bitmaps)
image = np.zeros((self.nDims, length), dtype=float)
at_wd = gaps[-1]
for i, bitmap in enumerate(bitmaps):
ht, wd = bitmap.shape
at_ht = np.random.randint(self.vbuffer + self.alphabet.maxHt - ht + 1)
image[at_ht:at_ht+ht, at_wd:at_wd+wd] += bitmap
at_wd += wd + gaps[i]
image += self.noise * np.random.normal(size=image.shape,)
image = np.clip(image, 0, 1)
return image, labels
def get_sample(self):
if self.nchars_per_sample:
return self.get_sample_of_n_chars(self.nchars_per_sample)
if self.varying_len:
return self.get_sample_of_random_len()
else:
return self.get_sample_of_len(self.avg_len)
def __repr__(self):
if self.nchars_per_sample:
cps = self.nchars_per_sample
len = 'Varies (to fit the {} of chars per sample)'.format(cps)
else:
cps = 'Depends on the random length'
len = 'Avg:{} Range:{}'.format(self.avg_len, self.len_range)
ret = ('Scribe:'
'\n Alphabet: {}'
'\n Noise: {}'
'\n Buffers (vert, horz): {}, {}'
'\n Characters per sample: {}'
'\n Length: {}'
'\n Height: {}'
'\n'.format(
''.join(self.alphabet.chars),
self.noise,
self.hbuffer,
self.vbuffer,
cps, len,
self.nDims,
))
return ret
###### Main
########################################
if __name__ == "__main__":
# create data set folder
if "data_set" not in os.listdir(os.getcwd()):
os.mkdir("data_set")
out_file_name = "data_name"
if not out_file_name.endswith('.pkl'):
out_file_name += '.pkl'
scribe_args = {
'noise': .05,
'vbuffer': 3,
'hbuffer': 5,
'avg_seq_len': 5,
'varying_len': True,
'nchars_per_sample': 10,
'num_samples':2000,
}
scriber = Scribe(scribe_args)
alphabet_chars = scriber.alphabet.chars
xs = []
ys = []
for i in range(scribe_args['num_samples']):
x, y = scriber.get_sample()
xs.append(np.transpose(x))
ys.append(np.asarray(y))
print(y, ''.join(alphabet_chars[i] for i in y))
print("write train set")
print("train set length: " + str(ys.__len__()))
file_name = "data_set/numbers_image_train.klepto"
print("train set name: " + file_name)
d = klepto.archives.file_archive(file_name, cached=True,serialized=True)
d['x'] = xs
d['y'] = ys
d.dump()
d.clear()
xs = []
ys = []
for i in range(int(scribe_args['num_samples'] * 0.1)):
x, y = scriber.get_sample()
xs.append(np.transpose(x))
ys.append(np.asarray(y))
print(y, ''.join(alphabet_chars[i] for i in y))
print("write valid set")
print("valid set length: " + str(ys.__len__()))
file_name = "data_set/numbers_image_valid.klepto"
print("valid set name: " + file_name)
d = klepto.archives.file_archive(file_name, cached=True,serialized=True)
d['x'] = xs
d['y'] = ys
d.dump()
d.clear()
xs = []
ys = []
for i in range(int(scribe_args['num_samples'] * 0.3)):
x, y = scriber.get_sample()
xs.append(np.transpose(x))
ys.append(np.asarray(y))
print(y, ''.join(alphabet_chars[i] for i in y))
print("write test set")
print("test set length: " + str(ys.__len__()))
file_name = "data_set/numbers_image_test.klepto"
print("test set name: " + file_name)
d = klepto.archives.file_archive(file_name, cached=True,serialized=True)
d['x'] = xs
d['y'] = ys
d.dump()
d.clear()
```
#### File: recnet/layer_pool/recurrent_layer.py
```python
from __future__ import absolute_import, print_function, division
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
from .layer_master import LayerMaster
###### Conventional recurrent layer
########################################
class conv(LayerMaster):
"""
Hyperbolic tangent or rectified linear unit layer
"""
def __init__(self, rng, trng, n_in, n_out, n_batches, activation, old_weights=None,go_backwards=False): # , prm_structure, layer_no ):
# Parameters
self.go_backwards = go_backwards
self.activation = activation
# Random
self.rng = rng
self.trng = trng
if old_weights == None:
np_weights = OrderedDict()
# np_weights['w_in_hidden'] = self.rec_uniform_sqrt(rng, n_in, n_out)
# np_weights['w_hidden_hidden'] = self.sqr_ortho(rng, n_out)
# np_weights['b_act'] = np.zeros(n_out)
np_weights['w_in_hidden'] = 1 * (np.random.rand(n_in, n_out) - 0.5) #self.rec_uniform_sqrt(rng, n_in, n_out)
np_weights['w_hidden_hidden'] = 1 * (np.random.rand(n_out, n_out) - 0.5) #self.sqr_ortho(rng, n_out)
np_weights['b_act'] = 1 * (np.random.rand(n_out) - 0.5) #np.zeros(n_out)
self.weights = []
for kk, pp in np_weights.items():
self.weights.append(theano.shared(name=kk, value=pp.astype(T.config.floatX)))
# load old weights
else:
self.weights = []
for pp in old_weights:
self.weights.append(theano.shared(value=pp.astype(T.config.floatX)))
# Init last output and cell state #todo make initialization of hidden state lernable
#init_hidden = 1 * (np.random.rand(n_batches, n_out) - 0.5)
#init_hidden = init_hidden.astype(dtype=theano.config.floatX)
init_hidden = np.zeros([n_batches, n_out]).astype(dtype=theano.config.floatX)
self.t_init_hidden = theano.shared(name='init_hidden', value=init_hidden.astype(T.config.floatX))
def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, w_hidden_hidden, b_act):
pre_w_sig = T.dot(pre_out_sig, w_hidden_hidden)
inner_act = self.activation
out_sig = inner_act(T.add(cur_w_in_sig, pre_w_sig, b_act))
mask = T.addbroadcast(mask, 1)
out_sig_m = mask * out_sig + (1. - mask) * pre_out_sig
return [out_sig_m]
def sequence_iteration(self, in_seq, mask, use_dropout, dropout_value=1):
in_seq_d = T.switch(use_dropout,
(in_seq *
self.trng.binomial(in_seq.shape,
p=dropout_value, n=1,
dtype=in_seq.dtype)),
in_seq)
w_in_seq = T.dot(in_seq_d, self.weights[0])
out_seq, updates = theano.scan(
fn=self.t_forward_step,
sequences=[mask, w_in_seq],
outputs_info=[self.t_init_hidden],
non_sequences=self.weights[1:],
go_backwards=self.go_backwards,
truncate_gradient=-1,
# n_steps=50,
strict=True,
allow_gc=False,
)
return out_seq
###### LSTM Layer with peepholes
########################################
class LSTMp(LayerMaster):
"""
Long short term memory layer
key ideas of implementation:
- peepholes at input gate and forget gate but not at output gate
- calculate dot product of input and input weights before scan function
- calculate dot product of previous output and weights only ones per sequence
- weights and biases separate
- one function for each step, one for each sequence
"""
def __init__(self, rng, trng, n_in, n_out, n_batches, activation, old_weights=None,
go_backwards=False): # , prm_structure, layer_no ):
# Parameters
self.go_backwards = go_backwards
self.activation = activation
# Random
self.rng = rng
self.trng = trng
self.t_n_out = theano.shared(name='t_n_out', value=n_out)
if old_weights == None:
np_weights = OrderedDict()
# Peephole weights (input- forget- output- gate)
# np_weights['w_ig_c'] = self.vec_uniform_sqrt(self.rng, n_out)
# np_weights['w_fg_c'] = self.vec_uniform_sqrt(self.rng, n_out) + 2 # Forgot gate with +2 initialized for keeping sequences right from begin
# np_weights['w_og_c'] = self.vec_uniform_sqrt(self.rng, n_out)
# # Previous output weights
# np_weights['w_ifco'] = self.rec_ortho(rng, n_out, 4)
# np_weights['b_ifco'] = np.zeros(4 * n_out)
# # Input weights
# np_weights['w_ifco_x'] = self.rec_uniform_sqrt(rng, n_in, 4 * n_out)
# np_weights['b_ifco_x'] = np.zeros(4 * n_out)
# todo initialization
np_weights['w_ig_c'] = rng.uniform(-np.sqrt(1./n_out), np.sqrt(1./n_out), n_out)
np_weights['w_fg_c'] = rng.uniform(-np.sqrt(1./n_out), np.sqrt(1./n_out), n_out)+2 #Forgot gate with +2 initialized for keeping sequences right from begin
np_weights['w_og_c'] = rng.uniform(-np.sqrt(1./n_out), np.sqrt(1./n_out), n_out)
# Previous output weights
np_weights['w_ifco'] = rng.uniform(-np.sqrt(1./n_out), np.sqrt(1./n_out), (n_out, 4*n_out))
np_weights['b_ifco'] = np.zeros(4*n_out)
# Input weights
np_weights['w_ifco_x'] = rng.uniform(-np.sqrt(1./n_out), np.sqrt(1./n_out), (n_in, 4*n_out))
np_weights['b_ifco_x'] = np.zeros(4*n_out)
self.weights = []
for kk, pp in np_weights.items():
self.weights.append(theano.shared(name=kk, value=pp.astype(T.config.floatX)))
# load old weights
else:
self.weights = []
for pp in old_weights:
self.weights.append(theano.shared(value=pp.astype(T.config.floatX)))
# Init last output and cell state
ol_t00_np1 = np.zeros([n_batches, n_out]).astype(dtype=theano.config.floatX)
cs_t00_np1 = np.zeros([n_batches, n_out]).astype(dtype=theano.config.floatX)
self.t_ol_t00 = theano.shared(name='ol_b_t00', value=ol_t00_np1.astype(T.config.floatX))
self.t_cs_t00 = theano.shared(name='cs_b_t00', value=cs_t00_np1.astype(T.config.floatX))
def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, pre_cell_sig, w_ig_c, w_fg_c, w_og_c, w_ifco, b_ifco,
t_n_out):
ifco = T.add(T.dot(pre_out_sig, w_ifco), b_ifco)
inner_act = self.activation
gate_act = self.sigmoid()
# Input Gate
ig_t1 = gate_act(T.add(ifco[:, 0:t_n_out], T.mul(pre_cell_sig, w_ig_c), cur_w_in_sig[:, 0:t_n_out]))
# Forget Gate
fg_t1 = gate_act(T.add(ifco[:, 1 * t_n_out:2 * t_n_out], T.mul(pre_cell_sig, w_fg_c),
cur_w_in_sig[:, 1 * t_n_out:2 * t_n_out]))
# Cell State
cs_t1 = T.add(T.mul(fg_t1, pre_cell_sig), T.mul(ig_t1, inner_act(
T.add(ifco[:, 2 * t_n_out:3 * t_n_out], cur_w_in_sig[:, 2 * t_n_out:3 * t_n_out]))))
mask = T.addbroadcast(mask, 1)
cs_t1 = mask * cs_t1 + (1. - mask) * pre_cell_sig
# functionality: cs_t1 = T.switch(mask , cs_t1, pre_cell_sig)
# Output Gate
og_t1 = gate_act(
T.add(ifco[:, 3 * t_n_out:4 * t_n_out], T.mul(cs_t1, w_og_c), cur_w_in_sig[:, 3 * t_n_out:4 * t_n_out]))
# Output LSTM
out_sig = T.mul(og_t1, inner_act(cs_t1))
out_sig = mask * out_sig + (1. - mask) * pre_out_sig
return [out_sig, cs_t1]
def sequence_iteration(self, in_seq, mask, use_dropout, dropout_value=1):
in_seq_d = T.switch(use_dropout,
(in_seq *
self.trng.binomial(in_seq.shape,
p=dropout_value, n=1,
dtype=in_seq.dtype)),
in_seq)
w_in_seq = T.add(T.dot(in_seq_d, self.weights[5]), self.weights[6])
[out_seq, cell_seq], updates = theano.scan(
fn=self.t_forward_step,
sequences=[mask, w_in_seq],
outputs_info=[self.t_ol_t00, self.t_cs_t00],
non_sequences=self.weights[:5] + [self.t_n_out],
go_backwards=self.go_backwards,
truncate_gradient=-1,
# n_steps=50,
strict=True,
allow_gc=False,
)
return out_seq
###### LSTM without peepholes Layer
########################################
class LSTM(LayerMaster):
"""
Long short term memory layer without peepholes
key ideas of implementation:
- calculate dot product of input and input weights before scan function
- calculate dot product of previous output and weights only ones per sequence
- weights and biases separate
- one function for each step, one for each sequence
"""
def __init__(self, rng, trng, n_in, n_out, n_batches, activation, old_weights=None,
go_backwards=False): # , prm_structure, layer_no ):
# Parameters
self.go_backwards = go_backwards
self.activation = activation
# Random
self.rng = rng
self.trng = trng
self.t_n_out = theano.shared(name='t_n_out', value=n_out)
if old_weights == None:
np_weights = OrderedDict()
# Previous output weights
np_weights['w_ifco'] = self.rec_ortho(rng, n_out, 4)
np_weights['b_ifco'] = np.zeros(4 * n_out)
# Input weights
np_weights['w_ifco_x'] = self.rec_uniform_sqrt(rng, n_in, 4 * n_out)
np_weights['b_ifco_x'] = np.zeros(4 * n_out)
self.weights = []
for kk, pp in np_weights.items():
self.weights.append(theano.shared(name=kk, value=pp.astype(T.config.floatX)))
# load old weights
else:
self.weights = []
for pp in old_weights:
self.weights.append(theano.shared(value=pp.astype(T.config.floatX)))
# Init last output and cell state
ol_t00_np1 = np.zeros([n_batches, n_out]).astype(dtype=theano.config.floatX)
cs_t00_np1 = np.zeros([n_batches, n_out]).astype(dtype=theano.config.floatX)
self.t_ol_t00 = theano.shared(name='ol_b_t00', value=ol_t00_np1.astype(T.config.floatX))
self.t_cs_t00 = theano.shared(name='cs_b_t00', value=cs_t00_np1.astype(T.config.floatX))
# Outputs & cell states
self.t_o = T.matrix('ol', dtype=theano.config.floatX)
self.t_cs = T.vector('cs', dtype=theano.config.floatX)
def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, pre_cell_sig, w_ifco, b_ifco,
t_n_out):
ifco = T.add(T.dot(pre_out_sig, w_ifco), b_ifco)
inner_act = self.activation
gate_act = self.sigmoid()
# Input Gate
ig_t1 = gate_act(T.add(ifco[:, 0:t_n_out], cur_w_in_sig[:, 0:t_n_out]))
# Forget Gate
fg_t1 = gate_act(T.add(ifco[:, 1 * t_n_out:2 * t_n_out],
cur_w_in_sig[:, 1 * t_n_out:2 * t_n_out]))
# Cell State
cs_t1 = T.add(T.mul(fg_t1, pre_cell_sig), T.mul(ig_t1, inner_act(
T.add(ifco[:, 2 * t_n_out:3 * t_n_out], cur_w_in_sig[:, 2 * t_n_out:3 * t_n_out]))))
mask = T.addbroadcast(mask, 1)
cs_t1 = mask * cs_t1 + (1. - mask) * pre_cell_sig
# functionality: cs_t1 = T.switch(mask , cs_t1, pre_cell_sig)
# Output Gate
og_t1 = gate_act(
T.add(ifco[:, 3 * t_n_out:4 * t_n_out], cur_w_in_sig[:, 3 * t_n_out:4 * t_n_out]))
# Output LSTM
out_sig = T.mul(og_t1, inner_act(cs_t1))
out_sig = mask * out_sig + (1. - mask) * pre_out_sig
return [out_sig, cs_t1]
def sequence_iteration(self, in_seq, mask, use_dropout, dropout_value=1):
in_seq_d = T.switch(use_dropout,
(in_seq *
self.trng.binomial(in_seq.shape,
p=dropout_value, n=1,
dtype=in_seq.dtype)),
in_seq)
w_in_seq = T.add(T.dot(in_seq_d, self.weights[2]), self.weights[3])
[out_seq, cell_seq], updates = theano.scan(
fn=self.t_forward_step,
sequences=[mask, w_in_seq],
outputs_info=[self.t_ol_t00, self.t_cs_t00],
non_sequences=self.weights[:2] + [self.t_n_out],
go_backwards=self.go_backwards,
truncate_gradient=-1,
# n_steps=50,
strict=True,
allow_gc=False,
)
return out_seq
###### GRU Layer
########################################
class GRU(LayerMaster):
"""
Gated recurrent unit layer
key ideas of implementation:
"""
def __init__(self, rng, trng, n_in, n_out, n_batches, activation, old_weights=None,go_backwards=False): #, prm_structure, layer_no ):
# Parameters
self.go_backwards = go_backwards
self.activation = activation
# Random
self.rng = rng
self.trng = trng
if old_weights == None:
np_weights = OrderedDict()
# Input weights for reset/update gate and update weights
np_weights['w_rzup'] = self.rec_uniform_sqrt(rng,n_in, 3 * n_out ) # rng.uniform(-0.1, 0.1,(n_in, 3 * n_out))
np_weights['b_rzup'] = np.zeros( 3 * n_out )
# reset and update gate
np_weights['u_rz'] = self.rec_ortho(rng, n_out, 2) #self.uniform(-0.1, 0.1, (n_out, n_out))
# reset gate
#np_weights['u_r'] = self.sqr_ortho(rng, n_out) #self.uniform(-0.1, 0.1, (n_out, n_out))
# update gate
#np_weights['u_z'] = self.sqr_ortho(rng, n_out) #rng.uniform(-0.1, 0.1, (n_out, n_out))
# update weights
np_weights['u_up'] = self.sqr_ortho(rng, n_out) #rng.uniform(-0.1, 0.1, (n_out, n_out))
self.weights = []
for kk, pp in np_weights.items():
self.weights.append(theano.shared(name=kk, value=pp.astype(T.config.floatX)))
# load old weights
else:
self.weights = []
for pp in old_weights:
self.weights.append(theano.shared(value=pp.astype(T.config.floatX)))
self.t_n_out = theano.shared(name='t_n_out', value=n_out)
#Init last output and cell state
ol_t00_np1 = np.zeros([n_batches,n_out]).astype(dtype=theano.config.floatX)
self.t_ol_t00 = theano.shared(name='ol_b_t00', value=ol_t00_np1.astype(T.config.floatX))
def t_forward_step(self,mask, rzup_in_sig, h_pre, u_rz, u_up, t_n_out): #u_r, u_z,
signal_act = self.activation
gate_act = self.sigmoid()
preact = T.dot( h_pre, u_rz)
r = gate_act( T.add( rzup_in_sig[:, 0:t_n_out] , preact[:, 0:t_n_out] )) #T.dot( h_pre, u_r) ) )
z = gate_act( T.add( rzup_in_sig[:, t_n_out:2 * t_n_out] , preact[:, t_n_out:2 * t_n_out] )) #T.dot(h_pre, u_z) ))
h_update = signal_act( T.add( rzup_in_sig[:, 2*t_n_out:3*t_n_out] , T.dot( T.mul( h_pre, r), u_up) ))
h_new = T.add( (1.-z) * h_update , z * h_pre )
mask = T.addbroadcast(mask, 1)
out_sig = T.add( mask * h_new , (1. - mask) * h_pre )
return out_sig
def sequence_iteration(self, in_seq, mask, use_dropout,dropout_value=1):
in_seq_d = T.switch(use_dropout,
(in_seq *
self.trng.binomial(in_seq.shape,
p=dropout_value, n=1,
dtype=in_seq.dtype)),
in_seq)
rz_in_seq = T.add( T.dot(in_seq_d, self.weights[0]) , self.weights[1] )
out_seq, updates = theano.scan(
fn=self.t_forward_step,
sequences=[mask, rz_in_seq], # in_seq_d],
outputs_info=[self.t_ol_t00],
non_sequences=[i for i in self.weights][2:] + [self.t_n_out],
go_backwards = self.go_backwards,
truncate_gradient=-1,
#n_steps=50,
strict=True,
allow_gc=False,
)
return out_seq
``` |
{
"source": "joerghall/cbt",
"score": 2
} |
#### File: cbt/buildpy/upload-artifactory.py
```python
from __future__ import print_function
import argparse
import os
import os.path
import re
import requests
import sys
SCRIPT_PATH = os.path.realpath(__file__)
def upload(package_path, url, apiuser, apikey):
print("Starting upload {0} to {1}".format(package_path, url))
parameters = {"publish": "1", "override": "1"}
with open(package_path, "rb") as fp:
response = requests.put(url, auth=(apiuser, apikey), params=parameters, data=fp)
if response.status_code != 201:
raise RuntimeError("Uploading package {0} to {1} - code: {2} msg: {3}".format(package_path, url, response.status_code, response.text))
else:
print("Completed uploading package {0} to {1}".format(package_path, url))
def main_inner(args):
if args.apiuser:
print("API key provided")
elif args.apiuser is None and "apiuser" in os.environ:
args.apiuser = os.environ["apiuser"]
else:
raise RuntimeError("No apiuser provided")
if args.apipwd:
print("API pwd provided")
elif args.apipwd is None and "apipwd" in os.environ:
args.apipwd = os.environ["apipwd"]
else:
raise RuntimeError("No apipwd provided")
file = args.file
url = args.url
upload(file, url, args.apiuser, args.apipwd)
def main():
parser = argparse.ArgumentParser(description="Upload to artifactory")
parser.add_argument("--file", help="package path to upload", type=os.path.abspath)
parser.add_argument("--url", help="url to upload")
parser.add_argument("--apiuser", default=None, help="API user")
parser.add_argument("--apipwd", default=None, help="API pwd")
args = parser.parse_args()
result = main_inner(args)
if result is None:
sys.exit(0)
else:
sys.stderr.write("{} validation errors were detected.\n".format(len(result)))
for error in result:
sys.stderr.write(" {}\n".format(error))
sys.exit(1)
if __name__ == "__main__":
main()
``` |
{
"source": "joergkiesewetter/eth-market-analysis",
"score": 3
} |
#### File: joergkiesewetter/eth-market-analysis/calculate_realized_market_capitalization.py
```python
import json
import os
from datetime import date, datetime, timedelta
import config
import manage_realized_market_capitalization
from exchange_rates import util
from exchange_rates.util import get_first_market_price_date
from manage_realized_market_capitalization import BASE_DIRECTORY, get_first_data_timestamp
from util import logging
STORE_DIRECTORY = '/market-data/final/realized_market_cap/'
log = logging.get_custom_logger(__name__, config.LOG_LEVEL)
def calculate_realized_market_capitalization(token):
symbol = token['symbol']
symbol_file = STORE_DIRECTORY + symbol
os.makedirs(STORE_DIRECTORY, exist_ok=True)
max_time = datetime.utcnow()
max_time = max_time.replace(hour=0, minute=0, second=0, microsecond=0)
stop_processing = False
date_to_process = get_first_data_timestamp(symbol)
if not date_to_process:
log.debug('no date to calculate realized market cap')
return
date_last_processed = _get_last_processed_date(symbol)
date_to_process = max(date_to_process, date_last_processed + timedelta(days=1))
log.debug('calculate_realized_market_cap for ' + symbol)
if date_to_process >= max_time:
return
with open(symbol_file, 'a') as file:
while not stop_processing:
data = _get_data_to_process(symbol, date_to_process)
result = _analyse_data(token, data, date_to_process)
date_string = date_to_process.strftime('%Y-%m-%d')
result_string = date_string + ',' + \
str(result['num_coins']) + ',' + \
str(result['circulating_supply']) + ',' + \
str(result['not_moved_coins']) + ',' + \
str(result['market_cap']) + ',' + \
str(result['realized_market_cap']) + ',' + \
str(result['mvrv']) + ',' + \
str(result['coins_older_1y']) + ',' + \
str(result['num_transactions']) + ',' + \
str(result['transaction_volume']) + ',' + \
str(result['num_holder']) + ',' + \
str(result['exchange_rate'])
file.write(result_string + '\n')
file.flush()
log.debug('calculate_realized_market_cap for ' + date_string)
date_to_process += timedelta(days=1)
if date_to_process >= max_time:
stop_processing = True
def _get_last_processed_date(symbol):
symbol_file = STORE_DIRECTORY + symbol
last_file_timestamp = '1970-01-01'
if not os.path.exists(symbol_file):
return datetime.utcfromtimestamp(0)
with open(symbol_file, 'r') as file:
for line in file:
line_parts = line.split(',')
last_file_timestamp = line_parts[0]
return datetime.strptime(last_file_timestamp, '%Y-%m-%d')
##
# return [[<holder_address>, <balance>, <token data>], ...]
#
def _get_data_to_process(symbol, date):
try:
with open(os.path.join(BASE_DIRECTORY, symbol, date.strftime('%Y-%m-%d') + '.csv'), 'rt') as file:
return_data = []
for line in file:
return_data.append(line.split(';'))
return return_data
except:
return []
def _analyse_data(token, data, date_to_process) :
symbol = token['symbol']
return_data = {
'num_coins': 0,
'circulating_supply': 0,
'not_moved_coins': 0,
'market_cap': 0,
'realized_market_cap': 0,
'mvrv': 0,
'coins_older_1y': 0,
'num_transactions': 0,
'transaction_volume': 0,
'num_holder': 0,
'exchange_rate': 0,
}
market_entry_date = get_first_market_price_date(symbol)
date_1y = _add_years(date_to_process, -1)
exchange_rate = util.get_local_exchange_rate(symbol, date_to_process)
if not exchange_rate:
exchange_rate = token['init_price']
for line in data:
for coin_data in json.loads(line[2]):
# calculate total amount of coins
return_data['num_coins'] += coin_data[1]
# calculate number of coins never traded after market entry
if int(coin_data[0]) < market_entry_date.timestamp():
return_data['not_moved_coins'] += coin_data[1]
# calculate total realized market cap
amount_coins = coin_data[1] / pow(10, 18)
if line[0] not in token['token_contracts'] and \
line[0] not in token['lending_contracts'] and \
line[0] not in token['team_accounts']:
return_data['circulating_supply'] += amount_coins
return_data['realized_market_cap'] += amount_coins * coin_data[2]
# calculate number of coins not moved for more than a year
if int(coin_data[0]) < date_1y.timestamp():
return_data['coins_older_1y'] += coin_data[1]
# calculate num holder
if (int(line[1]) / 1e18) * exchange_rate > 0.01:
# if coin_data[1] > 1e20:
return_data['num_holder'] += 1
transactions = manage_realized_market_capitalization.get_transaction_data(symbol, date_to_process)
for transaction in transactions:
# calculate num trades
return_data['num_transactions'] += 1
# calculate trade volume
return_data['transaction_volume'] += int(transaction[7])
return_data['num_coins'] /= pow(10, 18)
return_data['not_moved_coins'] /= pow(10, 18)
return_data['coins_older_1y'] /= pow(10, 18)
return_data['transaction_volume'] /= pow(10, 18)
return_data['exchange_rate'] = exchange_rate
return_data['market_cap'] = return_data['exchange_rate'] * return_data['circulating_supply']
if return_data['realized_market_cap'] > 0:
return_data['mvrv'] = return_data['market_cap'] / return_data['realized_market_cap']
else:
return_data['mvrv'] = 0
return return_data
def _add_years(d, years):
"""Return a date that's `years` years after the date (or datetime)
object `d`. Return the same calendar date (month and day) in the
destination year, if it exists, otherwise use the following day
(thus changing February 29 to March 1).
"""
try:
return d.replace(year = d.year + years)
except ValueError:
return d + (date(d.year + years, 1, 1) - date(d.year, 1, 1))
```
#### File: eth-market-analysis/exchange_rates/source_coin_gecko.py
```python
import os
import time
from datetime import datetime, timedelta
import config
from manage_transactions import get_first_transaction_timestamp
from provider.coingecko import CoinGecko
from util import logging
BASE_DIRECTORY = '/market-data/raw/exchange_rates/'
log = logging.get_custom_logger(__name__, config.LOG_LEVEL)
def update_exchange_rates(symbol: str):
os.makedirs(BASE_DIRECTORY, exist_ok=True)
coin_gecko_id = CoinGecko.get_id_by_symbol(symbol)
if not coin_gecko_id:
return
max_time = datetime.utcnow()
max_time = max_time.replace(hour=0, minute=0, second=0, microsecond=0)
path = os.path.join(BASE_DIRECTORY, symbol + '.csv')
if not os.path.isfile(path):
act_date = get_first_transaction_timestamp(symbol)
else:
act_date = _get_last_timestamp(path) + timedelta(days=1)
log.debug('updating token exchange rates')
with open(path, 'a') as file:
while act_date < max_time:
price = CoinGecko.get_exchange_rate(coin_gecko_id, act_date)
file.write(','.join([act_date.strftime('%Y-%m-%d'), str(price)]) + '\n')
file.flush()
time.sleep(1)
act_date += timedelta(days=1)
def _get_last_timestamp(path):
with open(path, 'rt') as file:
try:
last_line = file.readlines()[-1]
last_line = last_line.split(',')
return datetime.strptime(last_line[0], '%Y-%m-%d')
except:
return datetime(year=2018, month=1, day=1)
```
#### File: joergkiesewetter/eth-market-analysis/manage_balances.py
```python
import json
import os
from datetime import datetime, timedelta
import config
from exchange_rates.util import get_local_exchange_rate, get_first_market_price_date
from manage_transactions import get_first_transaction_timestamp, get_transaction_data
from util import logging
BASE_DIRECTORY = '/market-data/raw/balance_data/'
log = logging.get_custom_logger(__name__, config.LOG_LEVEL)
def update_balances(token):
symbol = token['symbol']
symbol_dir = BASE_DIRECTORY + symbol
os.makedirs(symbol_dir, exist_ok=True)
max_time = datetime.utcnow()
max_time = max_time.replace(hour=0, minute=0, second=0, microsecond=0)
stop_processing = False
date_to_process = _get_next_time_to_process(symbol, symbol_dir)
if date_to_process >= max_time:
stop_processing = True
state = _load_state(symbol_dir, date_to_process)
log.debug('manage balances for ' + symbol)
while not stop_processing:
transactions = get_transaction_data(symbol, date_to_process)
log.debug('managing balances for ' + str(date_to_process))
for transaction in transactions:
block_number = transaction[0]
timestamp = transaction[1]
hash = transaction[2]
nonce = transaction[3]
block_hash = transaction[4]
from_address = transaction[5]
to_address = transaction[6]
value = int(transaction[7])
token_decimal = transaction[8]
transaction_index = transaction[9]
gas = transaction[10]
gas_price = transaction[11]
gas_used = transaction[12]
cumulative_gas_used = transaction[13]
input = transaction[14]
confirmations = transaction[15]
if from_address in state.keys():
from_account = state[from_address]
else:
from_account = None
if to_address in state.keys():
to_account = state[to_address]
else:
to_account = {
'balance': 0,
'balance_normalized': 0,
}
state[to_address] = to_account
# change balances
if from_account:
from_account['balance'] -= value
from_account['balance'] = max(from_account['balance'], 0)
to_account['balance'] += value
# change normalized balances
if from_address not in token['lending_contracts'] and to_address not in token['lending_contracts']:
if from_account:
from_account['balance_normalized'] -= value
from_account['balance_normalized'] = max(from_account['balance_normalized'], 0)
to_account['balance_normalized'] += value
else:
# when the transaction contains a lending address, the transaction should be visible from the point of view of the lending address
# for the user address, it should not be visible
if from_account and from_address in token['lending_contracts']:
from_account['balance_normalized'] -= value
from_account['balance_normalized'] = max(from_account['balance_normalized'], 0)
if to_address in token['lending_contracts']:
to_account['balance_normalized'] += value
# all transactions are processed, saving state to a file
_save_state(symbol_dir, date_to_process, state)
date_to_process = date_to_process + timedelta(days=1)
if date_to_process >= max_time:
stop_processing = True
def _get_next_time_to_process(symbol, symbol_dir):
last_file_timestamp = None
last_file = None
files = [f for f in os.listdir(symbol_dir) if os.path.isfile(os.path.join(symbol_dir, f))]
# get the file with the highest timestamp
for file in files:
filename = file.split('.')[0]
timestamp = datetime.strptime(filename, '%Y-%m-%d')
if not last_file_timestamp or timestamp > last_file_timestamp:
last_file_timestamp = timestamp
last_file = file
if last_file_timestamp:
return last_file_timestamp + timedelta(days=1)
else:
return get_first_transaction_timestamp(symbol)
def _load_state(symbol_dir, date_to_process):
date_to_load = date_to_process - timedelta(days=1)
path = os.path.join(symbol_dir, date_to_load.strftime('%Y-%m-%d') + '.csv')
if not os.path.isfile(path):
return {}
return_data = {}
with open(path, 'rt') as file:
for line in file:
line_parts = line.split(';')
return_data[line_parts[0]] = {
'balance': int(line_parts[1]),
'balance_normalized': int(line_parts[2]),
}
return return_data
def _save_state(symbol_dir, date_to_process, state):
path = os.path.join(symbol_dir, date_to_process.strftime('%Y-%m-%d') + '.csv')
if os.path.isfile(path):
os.remove(path)
with open(path, 'at') as file:
for key, value in state.items():
if value['balance_normalized'] > 0 or value['balance'] > 0:
file.write(key + ';' + str(value['balance']) + ';' + str(value['balance_normalized']) + '\n')
def get_first_data_timestamp(symbol):
symbol_dir = BASE_DIRECTORY + symbol
last_file_timestamp = None
files = [f for f in os.listdir(symbol_dir) if os.path.isfile(os.path.join(symbol_dir, f))]
# get the file with the highest timestamp
for file in files:
filename = file.split('.')[0]
timestamp = datetime.strptime(filename, '%Y-%m-%d')
if not last_file_timestamp or timestamp < last_file_timestamp:
last_file_timestamp = timestamp
return last_file_timestamp
def get_last_data_timestamp(symbol):
symbol_dir = BASE_DIRECTORY + symbol
last_file_timestamp = None
files = [f for f in os.listdir(symbol_dir) if os.path.isfile(os.path.join(symbol_dir, f))]
# get the file with the highest timestamp
for file in files:
filename = file.split('.')[0]
timestamp = datetime.strptime(filename, '%Y-%m-%d')
if not last_file_timestamp or timestamp > last_file_timestamp:
last_file_timestamp = timestamp
return last_file_timestamp
```
#### File: eth-market-analysis/provider/coingecko.py
```python
import time
from datetime import datetime
import requests
import config
from util import logging
COIN_GECKO_BASE_URL = 'https://api.coingecko.com/api/v3/'
log = logging.get_custom_logger(__name__, config.LOG_LEVEL)
class CoinGecko:
@staticmethod
def get_id_by_symbol(symbol):
url = COIN_GECKO_BASE_URL + 'coins/list'
response = requests.get(url)
json = response.json()
symbol_lower = symbol.lower()
for token in json:
if token['symbol'].lower() == symbol_lower:
return token['id']
return None
@staticmethod
def get_exchange_rate(coin_gecko_id, timestamp):
url = COIN_GECKO_BASE_URL + 'coins/' + coin_gecko_id + '/history?date=' + timestamp.strftime('%d-%m-%Y')
response = requests.get(url)
while response.status_code != 200:
log.warning(response.status_code)
response = requests.get(url)
time.sleep(1)
json = response.json()
if 'market_data' in json:
return json['market_data']['current_price']['usd']
return None
@staticmethod
def get_market_cap_by_date(coingecko_id, date, currency):
date_string = date.strftime('%d-%m-%Y')
url = COIN_GECKO_BASE_URL + 'coins/' + coingecko_id + '/history?date=' + date_string
response = requests.get(url)
while response.status_code != 200:
log.warning(response.status_code)
response = requests.get(url)
time.sleep(1)
json = response.json()
if 'market_data' in json:
return json['market_data']['market_cap'][currency]
return None
```
#### File: eth-market-analysis/provider/nexustracker.py
```python
from datetime import time
import requests
import config
from util import logging
log = logging.get_custom_logger(__name__, config.LOG_LEVEL)
class NexusTracker:
@staticmethod
def get_exchange_rates():
url = 'https://nexustracker.io/nxm_price'
response = requests.get(url)
while response.status_code != 200:
log.warning(response.status_code)
response = requests.get(url)
time.sleep(1)
json = response.json()
if 'ETH' in json:
return json['ETH']
return None
``` |
{
"source": "joergkiesewetter/terra-analytics",
"score": 2
} |
#### File: joergkiesewetter/terra-analytics/calculate_daily_retention_data.py
```python
import json
import os
from datetime import timedelta, datetime
import pytz
import calculate_daily_transaction_data
import calculate_total_user_data
import config
from manage_transactions import get_first_transaction_timestamp
from util import logging
STORE_DAILY_RETENTION_DATA = '/terra-data/v2/raw/stats_daily_retention_data'
log = logging.get_custom_logger(__name__, config.LOG_LEVEL)
def calculate_daily_retention_data():
os.makedirs(STORE_DAILY_RETENTION_DATA, exist_ok=True)
max_time = datetime.utcnow()
max_time = max_time.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=pytz.UTC)
stop_processing = False
date_to_process = get_first_transaction_timestamp()
date_last_processed = _get_last_processed_date()
date_to_process = max(date_to_process, date_last_processed - timedelta(days=31))
log.debug('calculate: retention')
if date_to_process >= max_time:
return
while not stop_processing:
log.debug('creating retention data for ' + date_to_process.strftime('%Y-%m-%d'))
final_data = _calculate_retention_data(date_to_process)
for currency in final_data.keys():
file_path = os.path.join(STORE_DAILY_RETENTION_DATA, currency, date_to_process.strftime('%Y-%m-%d') + '.json')
os.makedirs(os.path.join(STORE_DAILY_RETENTION_DATA, currency), exist_ok=True)
with open(file_path, 'w') as file:
file.write(json.dumps(final_data[currency]))
date_to_process += timedelta(days=1)
if date_to_process >= max_time:
stop_processing = True
def _calculate_retention_data(start_date):
new_user_data = calculate_total_user_data.get_new_user_for_day(start_date)
retention_data = {}
date_7d = start_date + timedelta(days=7)
date_14d = start_date + timedelta(days=14)
date_30d = start_date + timedelta(days=30)
user_data_7d = calculate_daily_transaction_data.get_user(date_7d)
user_data_14d = calculate_daily_transaction_data.get_user(date_14d)
user_data_30d = calculate_daily_transaction_data.get_user(date_30d)
for currency in new_user_data:
if currency not in retention_data.keys():
retention_data[currency] = {}
new_user_list = [key for (key, value) in new_user_data[currency].items()]
if len(new_user_list) <= 0:
retention_data[currency]['7d'] = 0
retention_data[currency]['14d'] = 0
retention_data[currency]['30d'] = 0
continue
if currency in user_data_7d:
user_list_7d = [value['address'] for value in user_data_7d[currency]]
user_list_7d_intersection = _intersection(new_user_list, user_list_7d)
retention_data[currency]['7d'] = len(user_list_7d_intersection) / len(new_user_list)
else:
retention_data[currency]['7d'] = 0
if currency in user_data_14d:
user_list_14d = [value['address'] for value in user_data_14d[currency]]
user_list_14d_intersection = _intersection(new_user_list, user_list_14d)
retention_data[currency]['14d'] = len(user_list_14d_intersection) / len(new_user_list)
else:
retention_data[currency]['14d'] = 0
if currency in user_data_30d:
user_list_30d = [value['address'] for value in user_data_30d[currency]]
user_list_30d_intersection = _intersection(new_user_list, user_list_30d)
retention_data[currency]['30d'] = len(user_list_30d_intersection) / len(new_user_list)
else:
retention_data[currency]['30d'] = 0
return retention_data
def _intersection(lst1, lst2):
return set(lst1).intersection(lst2)
def _get_last_processed_date():
directories = [f for f in os.listdir(STORE_DAILY_RETENTION_DATA) if
os.path.isdir(os.path.join(STORE_DAILY_RETENTION_DATA, f))]
last_file_timestamp = datetime.strptime('1970-01-01', '%Y-%m-%d')
last_file_timestamp = last_file_timestamp.replace(tzinfo=pytz.UTC)
for directory in directories:
target_dir = os.path.join(STORE_DAILY_RETENTION_DATA, directory)
files = [f for f in os.listdir(target_dir) if os.path.isfile(os.path.join(target_dir, f))]
# get the file with the highest timestamp
for file in files:
if file.startswith('.'):
continue
line_parts = file.split('.')
this_timestamp = datetime.strptime(line_parts[0], '%Y-%m-%d')
this_timestamp = this_timestamp.replace(tzinfo=pytz.UTC)
last_file_timestamp = max(last_file_timestamp, this_timestamp)
return last_file_timestamp
def get_retention_for_date(day, currency):
day_string = day.strftime('%Y-%m-%d')
file_path = os.path.join(STORE_DAILY_RETENTION_DATA, currency, day_string + '.json')
if not os.path.isfile(file_path):
return {}
with open(file_path, 'r') as file:
content = json.load(file)
return content
```
#### File: joergkiesewetter/terra-analytics/final_data_general.py
```python
import json
import os
from datetime import datetime, timedelta
import pytz
import calculate_daily_payment_data
import calculate_market_data
import config
from manage_transactions import get_first_transaction_timestamp
from util import logging
STORE_FINAL_DATA_GENERAL = '/terra-data/v2/final/general'
log = logging.get_custom_logger(__name__, config.LOG_LEVEL)
def final_data_general():
os.makedirs(STORE_FINAL_DATA_GENERAL, exist_ok=True)
max_time = datetime.utcnow()
max_time = max_time.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=pytz.UTC)
stop_processing = False
date_to_process = get_first_transaction_timestamp()
# date_last_processed = _get_last_processed_date()
# date_to_process = max(date_to_process, date_last_processed + timedelta(days=1))
log.debug('generate final data: general')
if date_to_process >= max_time:
return
while not stop_processing:
final_data = {}
payment_data = calculate_daily_payment_data.get_data_for_date(date_to_process)
file_path = os.path.join(STORE_FINAL_DATA_GENERAL, date_to_process.strftime('%Y-%m-%d') + '.json')
if not os.path.isfile(file_path):
for symbol in payment_data.keys():
final_data[symbol] = {}
log.debug('creating final general data for ' + date_to_process.strftime('%Y-%m-%d'))
# Amount of Coins
# Velocity
market_data = calculate_market_data.get_data(symbol, date_to_process)
if not market_data:
return
final_data[symbol]['amount_of_coins'] = market_data['circulating_supply']
final_data[symbol]['velocity_m1'] = payment_data[symbol]['total_amount'] / market_data['circulating_supply']
if len(final_data.keys()) > 0:
with open(file_path, 'w') as file:
file.write(json.dumps(final_data))
date_to_process += timedelta(days=1)
if date_to_process >= max_time:
stop_processing = True
```
#### File: joergkiesewetter/terra-analytics/manage_transactions.py
```python
import base64
import os
import traceback
from datetime import datetime, timezone, timedelta
import pytz
import config
from provider.terra import Terra
from util import logging
# structure: /terra-data/raw/transactions/<type>/<date>.csv
BASE_DIRECTORY = '/terra-data/v2/raw/transactions'
log = logging.get_custom_logger(__name__, config.LOG_LEVEL)
token = dict()
def update_token_transactions():
"""
fetches all transactions.
:return:
Nothing
"""
os.makedirs(BASE_DIRECTORY, exist_ok=True)
# symbol_dir = BASE_DIRECTORY + symbol
#
# os.makedirs(symbol_dir, exist_ok=True)
max_time = datetime.utcnow()
max_time = max_time.replace(hour=0, minute=0, second=0, microsecond=0)
last_timestamp, last_block, last_hash = _get_last_transaction()
log.debug('starting update from block: ' + str(last_block))
if last_hash:
log.debug('with hash: ' + last_hash)
log.debug('with timestamp: ' + str(last_timestamp))
transactions = Terra.get_transaction(last_block)
_clear_last_block(last_block)
max_time_exceeded = False
while not max_time_exceeded:
log.debug('storing block ' + str(last_block))
# TODO add correct tracking for gas price and taxes
for transaction in transactions:
# last_batch_block = last_block
# last_batch_timestamp = last_timestamp
# last_batch_hash = last_hash
block_number = transaction['block']
timestamp = datetime.utcfromtimestamp(int(transaction['timestamp']))
hash = transaction['txhash']
type = transaction['type']
if timestamp > max_time:
max_time_exceeded = True
break
if type not in token.keys():
token[type] = {
'directory': os.path.join(BASE_DIRECTORY, type.replace('/', '_')),
'file': None,
'filename': None
}
os.makedirs(token[type]['directory'], exist_ok=True)
act_filename = timestamp.strftime('%Y-%m-%d') + '.csv'
if not token[type]['file'] or act_filename != token[type]['filename']:
token[type]['filename'] = act_filename
if token[type]['file']:
token[type]['file'].close()
token[type]['file'] = open(os.path.join(token[type]['directory'], token[type]['filename']), 'a')
# TODO message type cosmos/MsgUnjail (see block 806047)
# TODO message type staking/MsgBeginRedelegate (see block 2846428)
if type == 'distribution/MsgWithdrawDelegationReward':
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
transaction['delegator'],
transaction['validator'],
transaction['reward_from'],
transaction['delegation_reward'],
str(transaction['tax_amount']),
transaction['tax_currency'],
])
elif type == 'distribution/MsgWithdrawValidatorCommission':
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
transaction['validator'],
transaction['commission_from'],
transaction['commission'],
])
elif type == 'gov/MsgSubmitProposal':
title_encoded = base64.b64encode(transaction['proposal_title'].encode('utf-8'))
text_encoded = base64.b64encode(transaction['proposal_text'].encode('utf-8'))
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
transaction['proposer'],
str(transaction['init_deposit_amount']),
transaction['init_deposit_currency'],
transaction['proposal_id'],
title_encoded.decode('utf-8'),
text_encoded.decode('utf-8'),
str(transaction['tax_amount']),
transaction['tax_currency'],
])
elif type == 'gov/MsgDeposit':
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
transaction['depositor'],
transaction['proposal_id'],
transaction['amount'],
transaction['currency'],
str(transaction['tax_amount']),
transaction['tax_currency'],
])
elif type == 'staking/MsgDelegate':
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
transaction['delegator'],
transaction['validator'],
transaction['amount'],
transaction['currency'],
str(transaction['tax_amount']),
transaction['tax_currency'],
])
elif type == 'staking/MsgUndelegate':
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
transaction['delegator'],
transaction['validator'],
transaction['amount'],
transaction['currency'],
str(transaction['tax_amount']),
transaction['tax_currency'],
])
elif type == 'market/MsgSwap':
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
transaction['ask_address'],
transaction['ask_amount'],
transaction['ask_currency'],
transaction['bid_address'],
transaction['bid_amount'],
transaction['bid_currency'],
str(transaction['swap_fee_amount']),
transaction['swap_fee_currency'],
str(transaction['tax_amount']),
transaction['tax_currency'],
])
elif type == 'staking/MsgEditValidator':
details_encoded = base64.b64encode(transaction['details'].encode('utf-8'))
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
transaction['address'],
details_encoded.decode('utf-8'),
transaction['moniker'],
transaction['website'],
transaction['identity'],
transaction['commission_rate'] or '-1',
transaction.get('min_self_delegation') or '',
str(transaction['tax_amount']),
transaction['tax_currency'],
])
elif type == 'staking/MsgCreateValidator':
details_encoded = base64.b64encode(transaction['details'].encode('utf-8'))
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
transaction['pubkey'],
transaction['amount'],
transaction['currency'],
transaction['commission_rate'],
transaction['commission_max_rate'],
transaction['commission_max_change_rate'],
details_encoded.decode('utf-8'),
transaction['moniker'],
transaction['website'],
transaction['identity'],
transaction['min_self_delegation'],
transaction['delegator'],
transaction['validator'],
str(transaction['tax_amount']),
transaction['tax_currency'],
])
elif type == 'oracle/MsgExchangeRateVote':
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
str(transaction['exchange_rate']),
transaction['currency'],
transaction['feeder'],
str(transaction['tax_amount']),
transaction['tax_currency'],
])
elif type == 'oracle/MsgExchangeRatePrevote':
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
transaction['feeder'],
transaction['currency'],
str(transaction['tax_amount']),
transaction['tax_currency'],
])
elif type == 'oracle/MsgDelegateFeedConsent':
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
])
elif type == 'bank/MsgMultiSend':
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
str(transaction['amount']),
transaction['currency'],
transaction['from_address'],
transaction['to_address'],
str(transaction['tax_amount']),
transaction['tax_currency'],
])
elif type == 'bank/MsgSend':
new_line = ','.join([str(transaction['block']),
str(transaction['timestamp']),
transaction['txhash'],
str(transaction['amount']),
transaction['currency'],
transaction['from_address'],
transaction['to_address'],
str(transaction['tax_amount']),
transaction['tax_currency'],
])
else:
new_line = ''
log.warning('transaction type not known: ' + type)
token[type]['file'].write(new_line + '\n')
last_timestamp = timestamp
# last_block = block_number
last_hash = hash
# log.debug('last block: ' + str(last_batch_block))
# log.debug('last timestamp: ' + str(last_batch_timestamp))
last_block += 1
transactions = Terra.get_transaction(last_block)
# if last_timestamp == last_batch_timestamp and last_block == last_batch_block and last_hash == last_batch_hash:
# break
# last_timestamp = last_batch_timestamp
# last_block = last_batch_block
# last_hash = last_batch_hash
for key in token.keys():
if token[key]['file']:
token[key]['file'].flush()
os.fsync(token[key]['file'].fileno())
token[key]['file'].close()
token[key]['file'] = None
def _clear_last_block(block_number):
directories = [f for f in os.listdir(BASE_DIRECTORY) if os.path.isdir(os.path.join(BASE_DIRECTORY, f))]
for directory in directories:
path = os.path.join(BASE_DIRECTORY, directory)
last_file_timestamp = None
last_file = None
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
# get the file with the highest timestamp
for file in files:
if file.startswith('.'):
continue
filename = file.split('.')[0]
timestamp = datetime.strptime(filename, '%Y-%m-%d')
if not last_file_timestamp or timestamp > last_file_timestamp:
last_file_timestamp = timestamp
last_file = file
if not last_file:
return
log.debug('removing data from the last block')
log.debug('scanning for block number: ' + str(block_number) + ' in directory \'' + directory + '\'')
removed_lines = 0
new_lines = []
with open(os.path.join(path, last_file), 'rt') as file:
for line in file:
line_split = line.split(',')
if str(line_split[0]) != str(block_number):
new_lines.append(line)
else:
removed_lines += 1
file.flush()
file.close()
log.debug('removing number of lines: ' + str(removed_lines))
with open(os.path.join(path, last_file), 'w') as file:
for line in new_lines:
file.write(line)
file.flush()
file.close()
def _get_last_transaction():
last_timestamp = 0
# TODO change back to 0
last_block = 0
last_hash = None
directories = [f for f in os.listdir(BASE_DIRECTORY) if os.path.isdir(os.path.join(BASE_DIRECTORY, f))]
for directory in directories:
path = os.path.join(BASE_DIRECTORY, directory)
last_file_timestamp = None
last_file = None
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
# get the file with the highest timestamp
for file in files:
if file.startswith('.'):
continue
filename = file.split('.')[0]
timestamp = datetime.strptime(filename, '%Y-%m-%d')
if not last_file_timestamp or timestamp > last_file_timestamp:
last_file_timestamp = timestamp
last_file = file
# if we don't have stored data for the given symbol
if not last_file:
return 0, 0, None
# if the file exists, but is empty
if os.stat(os.path.join(path, last_file)).st_size <= 0:
continue
# getting the last line of the file an extract the timestamp
with open(os.path.join(path, last_file), 'rt') as file:
last_line = file.readlines()[-1]
last_line = last_line.split(',')
if last_block is None or last_timestamp < int(last_line[1]):
last_timestamp = int(last_line[1])
last_block = int(last_line[0])
last_hash = last_line[2]
return last_timestamp, last_block, last_hash
def get_first_transaction_timestamp():
last_file_timestamp = None
directories = [f for f in os.listdir(BASE_DIRECTORY) if os.path.isdir(os.path.join(BASE_DIRECTORY, f))]
for dir in directories:
dir = os.path.join(BASE_DIRECTORY, dir)
files = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
# get the file with the highest timestamp
for file in files:
if file.startswith('.'):
continue
filename = file.split('.')[0]
timestamp = datetime.strptime(filename, '%Y-%m-%d')
timestamp = timestamp.replace(tzinfo=pytz.UTC)
if not last_file_timestamp or timestamp < last_file_timestamp:
last_file_timestamp = timestamp
print(last_file_timestamp)
return last_file_timestamp
def get_transaction_data(date, type_filter=None):
return_data = []
directories = [f for f in os.listdir(BASE_DIRECTORY) if os.path.isdir(os.path.join(BASE_DIRECTORY, f))]
for dir in directories:
if type_filter and dir not in type_filter:
continue
try:
filename = os.path.join(BASE_DIRECTORY, dir, date.strftime('%Y-%m-%d') + '.csv')
if not os.path.isfile(filename):
continue
with open(filename, 'rt') as file:
for line in file:
data = line.strip().split(',')
data.insert(0, dir)
return_data.append(data)
except:
traceback.print_exc()
return return_data
``` |
{
"source": "joergklausen/gaw-mkn-daq",
"score": 2
} |
#### File: mkndaq/inst/g2401.py
```python
import os
import socket
import time
import logging
import shutil
import zipfile
import colorama
class G2401:
"""
Instrument of type Picarro G2401.
Instrument of type Picarro G2410 with methods, attributes for interaction.
"""
_source = None
_socksleep = None
_sockaddr = None
_socktout = None
_data_storage = None
_log = None
_zip = None
_staging = None
_netshare = None
_datadir = None
_name = None
_logger = None
_get_data = None
_socket_port = None
_socket_host = None
@classmethod
def __init__(cls, name: str, config: dict) -> None:
"""
Constructor
Parameters
----------
name : str
name of instrument as defined in config file
config : dict
dictionary of attributes defining the instrument and port
"""
colorama.init(autoreset=True)
print("# Initialize G2401")
try:
# setup logging
logdir = os.path.expanduser(config['logs'])
os.makedirs(logdir, exist_ok=True)
logfile = '%s.log' % time.strftime('%Y%m%d')
logfile = os.path.join(logdir, logfile)
cls._logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d %H:%M:%S',
filename=str(logfile),
filemode='a')
# configure tcp/ip
cls._sockaddr = (config[name]['socket']['host'],
config[name]['socket']['port'])
cls._socktout = config[name]['socket']['timeout']
cls._socksleep = config[name]['socket']['sleep']
# read instrument control properties for later use
cls._name = name
cls._type = config[name]['type']
cls._serial_number = config[name]['serial_number']
cls._get_data = config[name]['get_data']
# setup data directory
datadir = os.path.expanduser(config['data'])
cls._datadir = os.path.join(datadir, name)
os.makedirs(cls._datadir, exist_ok=True)
# source of data files
cls._source = config[name]['source']
# interval to fetch and stage data files
cls._staging_interval = config[name]['staging_interval']
# reporting/storage
cls._reporting_interval = config[name]['reporting_interval']
cls._data_storage = config[name]['data_storage']
# netshare of user data files
cls._netshare = os.path.expanduser(config[name]['netshare'])
# staging area for files to be transfered
cls._staging = os.path.expanduser(config['staging']['path'])
cls._zip = config['staging']['zip']
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def tcpip_comm(cls, cmd: str, tidy=True) -> str:
"""
Send a command and retrieve the response. Assumes an open connection.
:param cmd: command sent to instrument
:param tidy: remove cmd echo, \n and *\r\x00 from result string, terminate with \n
:return: response of instrument, decoded
"""
rcvd = b''
try:
# open socket connection as a client
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, ) as s:
# connect to the server
s.settimeout(cls._socktout)
s.connect(cls._sockaddr)
# send data
s.sendall((cmd + chr(13) + chr(10)).encode())
time.sleep(cls._socksleep)
# receive response
while True:
data = s.recv(1024)
rcvd = rcvd + data
if chr(13).encode() in data:
break
# decode response, tidy
rcvd = rcvd.decode()
if tidy:
if "\n" in rcvd:
rcvd = rcvd.split("\n")[0]
return rcvd
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def store_and_stage_latest_file(cls):
try:
# get data file from netshare
if cls._data_storage == 'hourly':
path = os.path.join(cls._netshare, time.strftime("/%Y/%m/%d"))
elif cls._data_storage == 'daily':
path = os.path.join(cls._netshare, time.strftime("/%Y/%m"))
else:
raise ValueError("Configuration 'data_storage' of %s must be <hourly|daily>." % cls._name)
file = max(os.listdir(path))
# store data file
shutil.copyfile(os.path.join(path, file), os.path.join(cls._datadir, file))
# stage data for transfer
stage = os.path.join(cls._staging, cls._name)
os.makedirs(stage, exist_ok=True)
if cls._zip:
# create zip file
archive = os.path.join(stage, "".join([file[:-4], ".zip"]))
with zipfile.ZipFile(archive, "w", compression=zipfile.ZIP_DEFLATED) as fh:
fh.write(os.path.join(path, file), file)
else:
shutil.copyfile(os.path.join(path, file), os.path.join(stage, file))
print("%s .store_and_stage_latest_file (name=%s)" % (time.strftime('%Y-%m-%d %H:%M:%S'), cls._name))
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def store_and_stage_files(cls):
"""
Fetch data files from local source and move to datadir. Zip files and place in staging area.
:return: None
"""
try:
print("%s .store_and_stage_files (name=%s)" % (time.strftime('%Y-%m-%d %H:%M:%S'), cls._name))
# get data file from local source
files = os.listdir(cls._source)
if files:
# staging location for transfer
stage = os.path.join(cls._staging, cls._name)
os.makedirs(stage, exist_ok=True)
# store and stage data files
for file in files:
# stage file
if cls._zip:
# create zip file
archive = os.path.join(stage, "".join([file[:-4], ".zip"]))
with zipfile.ZipFile(archive, "w", compression=zipfile.ZIP_DEFLATED) as fh:
fh.write(os.path.join(cls._source, file), file)
else:
shutil.copyfile(os.path.join(cls._source, file), os.path.join(stage, file))
# move to data storage location
shutil.move(os.path.join(cls._source, file), os.path.join(cls._datadir, file))
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def get_meas_getconc(cls) -> str:
"""
Retrieve instantaneous data from instrument
:return:
"""
try:
return cls.tcpip_comm('_Meas_GetConc')
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def get_co2_ch4_co(cls) -> list:
"""
Get instantaneous cleaned response to '_Meas_GetConc' from instrument.
:return: list: concentration values from instrument
"""
try:
return cls.tcpip_comm("_Meas_GetConc").split(';')[0:3]
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def print_co2_ch4_co(cls) -> None:
try:
conc = cls.tcpip_comm("_Meas_GetConc").split(';')[0:3]
print(colorama.Fore.GREEN + "%s [%s] CO2 %s ppm CH4 %s ppm CO %s ppm" % \
(time.strftime("%Y-%m-%d %H:%M:%S"), cls._name, *conc))
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
def read_user_file(self, file, log=False):
"""
Read user file to Pandas data.frame
Parameters
----------
file : str
Full path to file
log : str, optional
DESCRIPTION. The default is False.
Returns
-------
Pandas data.frame
"""
```
#### File: mkndaq/inst/tei49i.py
```python
import logging
import os
import shutil
import socket
import time
import zipfile
import colorama
from mkndaq.utils import datetimebin
class TEI49I:
"""
Instrument of type Thermo TEI 49I with methods, attributes for interaction.
"""
_datadir = None
_datafile = None
_data_header = None
_get_config = None
_get_data = None
_id = None
_log = None
_logger = None
_name = None
_reporting_interval = None
_set_config = None
_simulate = None
_sockaddr = None
_socksleep = None
_socktout = None
_staging = None
_zip = False
@classmethod
def __init__(cls, name: str, config: dict, simulate=False) -> None:
"""
Initialize instrument class.
:param name: name of instrument
:param config: dictionary of attributes defining the instrument, serial port and other information
- config[name]['type']
- config[name]['id']
- config[name]['serial_number']
- config[name]['get_config']
- config[name]['set_config']
- config[name]['get_data']
- config[name]['data_header']
- config['logs']
- config[name]['socket']['host']
- config[name]['socket']['port']
- config[name]['socket']['timeout']
- config[name]['socket']['sleep']
- config[name]['sampling_interval']
- config['data']
- config[name]['logs']: default=True, write information to logfile
- config['staging']['path']
- config['staging']['zip']
:param simulate: default=True, simulate instrument behavior. Assumes a serial loopback connector.
"""
colorama.init(autoreset=True)
print("# Initialize TEI49I")
try:
cls._simulate = simulate
# setup logging
if config['logs']:
cls._log = True
logs = os.path.expanduser(config['logs'])
os.makedirs(logs, exist_ok=True)
logfile = '%s.log' % time.strftime('%Y%m%d')
cls._logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d %H:%M:%S',
filename=str(os.path.join(logs, logfile)),
filemode='a')
# read instrument control properties for later use
cls._name = name
cls._id = config[name]['id'] + 128
cls._type = config[name]['type']
cls._serial_number = config[name]['serial_number']
cls._get_config = config[name]['get_config']
cls._set_config = config[name]['set_config']
cls._get_data = config[name]['get_data']
cls._data_header = config[name]['data_header']
# configure tcp/ip
cls._sockaddr = (config[name]['socket']['host'],
config[name]['socket']['port'])
cls._socktout = config[name]['socket']['timeout']
cls._socksleep = config[name]['socket']['sleep']
# sampling, aggregation, reporting/storage
cls._sampling_interval = config[name]['sampling_interval']
cls._reporting_interval = config['reporting_interval']
# setup data directory
datadir = os.path.expanduser(config['data'])
cls._datadir = os.path.join(datadir, name)
os.makedirs(cls._datadir, exist_ok=True)
# staging area for files to be transfered
cls._staging = os.path.expanduser(config['staging']['path'])
cls._zip = config['staging']['zip']
# # query instrument to see if communication is possible, set date and time
# if not cls._simulate:
# dte = cls.get_data('date', save=False)
# if dte:
# tme = cls.get_data('time', save=False)
# msg = "Instrument '%s' initialized. Instrument datetime is %s %s." % (cls._name, dte, tme)
# cls._logger.info(msg)
# cls.set_datetime()
# else:
# msg = "Instrument '%s' did not respond as expected." % cls._name
# cls._logger.error(msg)
# print(colorama.Fore.RED + "%s %s" % (time.strftime('%Y-%m-%d %H:%M:%S'), msg))
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def tcpip_comm(cls, cmd: str, tidy=True) -> str:
"""
Send a command and retrieve the response. Assumes an open connection.
:param cmd: command sent to instrument
:param tidy: remove cmd echo, \n and *\r\x00 from result string, terminate with \n
:return: response of instrument, decoded
"""
_id = bytes([cls._id])
rcvd = b''
try:
# open socket connection as a client
if cls._simulate:
rcvd = cls.simulate_get_data(cmd).encode()
else:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, ) as s:
# connect to the server
s.settimeout(cls._socktout)
s.connect(cls._sockaddr)
if cls._simulate:
_id = b''
# send data
s.sendall(_id + ('%s\x0D' % cmd).encode())
time.sleep(cls._socksleep)
# receive response
while True:
data = s.recv(1024)
rcvd = rcvd + data
if b'\x0D' in data:
break
# decode response, tidy
rcvd = rcvd.decode()
if tidy:
# - remove checksum after and including the '*'
rcvd = rcvd.split("*")[0]
# - remove echo before and including '\n'
if "\n" in rcvd:
rcvd = rcvd.split("\n")[1]
return rcvd
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def get_config(cls) -> list:
"""
Read current configuration of instrument and optionally write to log.
:return (err, cfg) configuration or errors, if any.
"""
print("%s .get_config (name=%s)" % (time.strftime('%Y-%m-%d %H:%M:%S'), cls._name))
cfg = []
try:
for cmd in cls._get_config:
cfg.append(cls.tcpip_comm(cmd))
if cls._log:
cls._logger.info("Current configuration of '%s': %s" % (cls._name, cfg))
return cfg
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def set_datetime(cls) -> None:
"""
Synchronize date and time of instrument with computer time.
:return:
"""
try:
dte = cls.tcpip_comm("set date %s" % time.strftime('%m-%d-%y'))
msg = "Date of instrument %s set to: %s" % (cls._name, dte)
print("%s %s" % (time.strftime('%Y-%m-%d %H:%M:%S'), msg))
cls._logger.info(msg)
tme = cls.tcpip_comm("set time %s" % time.strftime('%H:%M:%S'))
msg = "Time of instrument %s set to: %s" % (cls._name, tme)
print("%s %s" % (time.strftime('%Y-%m-%d %H:%M:%S'), msg))
cls._logger.info(msg)
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def set_config(cls) -> list:
"""
Set configuration of instrument and optionally write to log.
:return (err, cfg) configuration set or errors, if any.
"""
print("%s .set_config (name=%s)" % (time.strftime('%Y-%m-%d %H:%M:%S'), cls._name))
cfg = []
try:
for cmd in cls._set_config:
cfg.append(cls.tcpip_comm(cmd))
time.sleep(1)
if cls._log:
cls._logger.info("Configuration of '%s' set to: %s" % (cls._name, cfg))
return cfg
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def get_data(cls, cmd=None, save=True) -> str:
"""
Retrieve long record from instrument and optionally write to log.
:param str cmd: command sent to instrument
:param bln save: Should data be saved to file? default=True
:return str response as decoded string
"""
try:
dtm = time.strftime('%Y-%m-%d %H:%M:%S')
if cls._simulate:
print("%s .get_data (name=%s, save=%s, simulate=%s)" % (dtm, cls._name, save, cls._simulate))
else:
print("%s .get_data (name=%s, save=%s)" % (dtm, cls._name, save))
if cmd is None:
cmd = cls._get_data
data = cls.tcpip_comm(cmd)
if cls._simulate:
data = cls.simulate_get_data(cmd)
if save:
# generate the datafile name
cls._datafile = os.path.join(cls._datadir,
"".join([cls._name, "-",
datetimebin.dtbin(cls._reporting_interval), ".dat"]))
if not (os.path.exists(cls._datafile)):
# if file doesn't exist, create and write header
with open(cls._datafile, "at") as fh:
fh.write("%s\n" % cls._data_header)
fh.close()
with open(cls._datafile, "at") as fh:
fh.write("%s %s\n" % (dtm, data))
fh.close()
# stage data for transfer
root = os.path.join(cls._staging, os.path.basename(cls._datadir))
os.makedirs(root, exist_ok=True)
if cls._zip:
# create zip file
archive = os.path.join(root, "".join([os.path.basename(cls._datafile[:-4]), ".zip"]))
with zipfile.ZipFile(archive, "w", compression=zipfile.ZIP_DEFLATED) as fh:
fh.write(cls._datafile, os.path.basename(cls._datafile))
else:
shutil.copyfile(cls._datafile, os.path.join(root, os.path.basename(cls._datafile)))
return data
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def get_o3(cls) -> str:
try:
return cls.tcpip_comm('o3')
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def print_o3(cls) -> None:
try:
o3 = cls.tcpip_comm('O3').split()
print(colorama.Fore.GREEN + "%s [%s] %s %s %s" % (time.strftime("%Y-%m-%d %H:%M:%S"),
cls._name,
o3[0], str(float(o3[1])), o3[2]))
except Exception as err:
if cls._log:
cls._logger.error(err)
print(err)
@classmethod
def simulate_get_data(cls, cmd=None) -> str:
"""
:param cmd:
:return:
"""
if cmd is None:
cmd = 'lrec'
dtm = time.strftime("%H:%M %m-%d-%y", time.gmtime())
if cmd == 'lrec':
data = "(simulated) %s flags D800500 o3 0.394 cellai 123853.000 cellbi 94558.000 bncht 31.220 lmpt " \
"53.754 o3lt 68.363 flowa 0.000 flowb 0.000 pres 724.798" % dtm
else:
data = "(simulated) %s Sorry, I can only simulate lrec. " % dtm
return data
if __name__ == "__main__":
pass
```
#### File: mkndaq/tests/test_serial_loopback.py
```python
import sys
import glob
import serial
import time
def find_serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
found = []
for port in ports:
try:
s = serial.Serial(port)
print("Found %s: %s" % (port, s.getSettingsDict()))
s.close()
found.append(port)
except (OSError, serial.SerialException):
pass
return found
def test_serial_loopback(port='COM1', cfg=None, sleep=0.5, cmd="Hello, World"):
if cfg is None:
cfg = [9800, 8, 'N', 1, 1]
err = None
try:
# configure serial port
ser = serial.Serial()
ser.port = port
ser.baudrate = cfg[0]
ser.bytesize = cfg[1]
ser.parity = cfg[2]
ser.stopbits = cfg[3]
ser.timeout = cfg[4]
ser.open()
rcvd = b''
if ser.is_open:
print('%s successfully opened.' % port)
msg = ('%s\x0D' % cmd).encode()
print('sent (encoded): ', msg)
ser.write(msg)
time.sleep(sleep)
while ser.in_waiting > 0:
rcvd = rcvd + ser.read(1024)
time.sleep(0.1)
rcvd = rcvd.decode()
print('response (decoded): ', rcvd)
ser.close()
if not ser.is_open:
print("%s correctly closed." % port)
else:
raise
return rcvd
except Exception as err:
print(err)
if __name__ == '__main__':
serial_ports = find_serial_ports()
print("Serial ports found: %s" % serial_ports)
for port in serial_ports:
print(port, test_serial_loopback(port))
``` |
{
"source": "joergklausen/mkndaq",
"score": 2
} |
#### File: mkndaq/mkndaq/mkndaq.py
```python
import colorama
import os
import logging
import time
import argparse
import schedule
import threading
from mkndaq.utils.configparser import config
from mkndaq.utils.filetransfer import SFTPClient
from mkndaq.inst.tei49c import TEI49C
from mkndaq.inst.tei49i import TEI49I
from mkndaq.inst.g2401 import G2401
from mkndaq.inst.meteo import METEO
from mkndaq.inst.aerosol import AEROSOL
def run_threaded(job_func):
"""Set up threading and start job.
Args:
job_func ([type]): [description]
"""
job_thread = threading.Thread(target=job_func)
job_thread.start()
def main():
"""Read config file, set up instruments, and launch data acquisition."""
logs = None
logger = None
try:
colorama.init(autoreset=True)
version = 'v0.4.6'
print("### MKNDAQ (%s) started on %s" % (version, time.strftime("%Y-%m-%d %H:%M")))
# collect and interprete CLI arguments
parser = argparse.ArgumentParser(
description='Data acquisition and transfer for MKN Global GAW Station.',
usage='mkndaq[.exe] [-s] -c')
parser.add_argument('-s', '--simulate', action='store_true',
help='simulate communication with instruments', required=False)
parser.add_argument('-c', '--configuration', type=str, help='path to configuration file', required=True)
parser.add_argument('-f', '--fetch', type=int, default=20,
help='interval in seconds to fetch and display current instrument data',
required=False)
args = parser.parse_args()
simulate = args.simulate
fetch = args.fetch
config_file = args.configuration
# read config file
cfg = config(config_file)
# setup logging
logs = os.path.expanduser(cfg['logs'])
os.makedirs(logs, exist_ok=True)
logfile = os.path.join(logs,
'%s.log' % time.strftime('%Y%m%d'))
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d %H:%M:%S',
filename=str(logfile),
filemode='a')
logging.getLogger('schedule').setLevel(level=logging.ERROR)
logging.getLogger('paramiko.transport').setLevel(level=logging.ERROR)
logger.info("=== mkndaq (%s) started ===" % version)
# initialize data transfer, set up remote folders
sftp = SFTPClient(config=cfg)
sftp.setup_remote_folders()
# stage most recent config file
print("%s Staging current config file ..." % time.strftime('%Y-%m-%d %H:%M:%S'))
sftp.stage_current_config_file(config_file)
# initialize instruments, get and set configurations and define schedules
# NB: In case, more instruments should be handled, the relevant calls need to be included here below.
try:
if cfg.get('tei49c', None):
tei49c = TEI49C(name='tei49c', config=cfg, simulate=simulate)
tei49c.get_config()
tei49c.set_config()
schedule.every(cfg['tei49c']['sampling_interval']).minutes.at(':00').do(tei49c.get_data)
schedule.every(6).hours.at(':00').do(tei49c.set_datetime)
schedule.every(fetch).seconds.do(tei49c.print_o3)
if cfg.get('tei49i', None):
tei49i = TEI49I(name='tei49i', config=cfg, simulate=simulate)
tei49i.get_config()
tei49i.set_config()
schedule.every(cfg['tei49i']['sampling_interval']).minutes.at(':00').do(tei49i.get_data)
schedule.every().day.at('00:00').do(tei49i.set_datetime)
schedule.every(fetch).seconds.do(tei49i.print_o3)
if cfg.get('g2401', None):
g2401 = G2401('g2401', config=cfg)
g2401.store_and_stage_latest_file()
schedule.every(cfg['g2401']['reporting_interval']).minutes.at(':00').do(
g2401.store_and_stage_files)
schedule.every(fetch).seconds.do(g2401.print_co2_ch4_co)
if cfg.get('meteo', None):
meteo = METEO('meteo', config=cfg)
meteo.store_and_stage_files()
schedule.every(cfg['meteo']['staging_interval']).minutes.do(meteo.store_and_stage_files)
schedule.every(cfg['meteo']['staging_interval']).minutes.do(meteo.print_meteo)
if cfg.get('aerosol', None):
aerosol = AEROSOL('aerosol', config=cfg)
aerosol.store_and_stage_files()
schedule.every(cfg['aerosol']['staging_interval']).minutes.do(aerosol.store_and_stage_files)
schedule.every(cfg['aerosol']['staging_interval']).minutes.do(aerosol.print_aerosol)
except Exception as err:
if logs:
logger.error(err)
print(err)
# stage most recent log file and define schedule
print("%s Staging current log file ..." % time.strftime('%Y-%m-%d %H:%M:%S'))
sftp.stage_current_log_file()
schedule.every().day.at('00:00').do(sftp.stage_current_log_file)
# transfer any existing staged files and define schedule for data transfer
print("%s Transfering existing staged files ..." % time.strftime('%Y-%m-%d %H:%M:%S'))
sftp.xfer_r()
schedule.every(cfg['reporting_interval']).minutes.at(':20').do(run_threaded, sftp.xfer_r)
print("# Begin data acquisition and file transfer")
while True:
schedule.run_pending()
time.sleep(1)
except Exception as err:
if logs:
logger.error(err)
print(err)
if __name__ == '__main__':
main()
``` |
{
"source": "joergmeyer-kit/hydrobox",
"score": 3
} |
#### File: hydrobox/signal/optimize.py
```python
import numpy as np
import pandas as pd
from hydrobox.utils.decorators import accept
@accept(
x=(np.ndarray, pd.Series, pd.DataFrame),
flatten=bool,
threshold=(int, float)
)
def simplify(x, flatten=True, threshold=0):
"""Simplify signal
An given input is simplified by reducing the amount of nodes representing
the signal. Whenever node[n+1] - node[n] <= threshold, no information
gain is assumed between the two nodes. Thus, node[n+1] will be removed.
In case flatten is True, noise in the signal will be flattened as well.
This is done by removing node[n + 1] in case node[n] and node[n + 1] hold
the same value. In case the underlying frequency in the noise is higher
than one time step or the amplitude is higher than the sensor precision,
this method will not assume the value change as noise. In these cases a
filter needs to be applied first.
Parameters
----------
x : numpy.ndarray, pandas.Series, pandas.DataFrame
numpy.array of signal
flatten : bool
Specify if a 1 frequence 1 amplitude change in signal be flattened
out as assumed noise.
threshold : int, float
value threshold at which a difference in signal is assumed
Returns
-------
numpy.ndarray
"""
# Turn Series and DataFrame instances to a numpy array
if isinstance(x, (pd.Series, pd.DataFrame)):
arr = x.values
else:
arr = x
if arr.ndim > 1:
raise NotImplementedError
# remove the nodes without a gain of information
simple = arr[np.where(np.abs(np.diff(arr)) > threshold)]
# build the remove mask for noise, if not flatten do not remove anything
if flatten:
# The first and last element are never removed (False)
remove_mask = np.concatenate((
[False],
np.fromiter((float(simple[i]) == float(simple[i - 2]) for i in range(2, len(simple))), dtype=bool),
[False]
))
else:
remove_mask = np.zeros(simple.shape, dtype=bool) * False
return simple[~remove_mask]
```
#### File: hydrobox/tests/gstat.py
```python
import unittest
import numpy as np
from numpy.testing import assert_array_almost_equal
from hydrobox.toolbox import variogram
class TestVariogram(unittest.TestCase):
def setUp(self):
np.random.seed(42)
self.c = np.random.gamma(20, 10, (30, 2))
np.random.seed(42)
self.v = np.random.normal(10, 5, 30)
# results
self.b = [3.82, 7.64, 11.46, 15.29, 19.11, 22.93, 26.75, 30.57, 34.39,
38.21, 42.03, 45.86, 49.68, 53.5, 57.32]
self.e = [10.16, 12.3, 0.58, 18.52, 21.74, 41.21, 13.74, 12.25, 20.49,
18.87, 22.72, 16.34, 20.74, 28.57, 0.]
def test_default_no_plot(self):
b, e, y = variogram(self.c, self.v, effective_range=37., sill=21.,
plot=False)
# test
assert_array_almost_equal(self.b, b, decimal=2)
assert_array_almost_equal(self.e, e, decimal=2)
def test_matern_function(self):
b, e, y = variogram(self.c, self.v, effective_range=37., sill=21.,
nugget=3., plot=False, model='matern', s=15.)
# test
# experimental part should not have changed
assert_array_almost_equal(self.b, b, decimal=2)
assert_array_almost_equal(self.e, e, decimal=2)
# check function
assert_array_almost_equal(
y[[1, 5, 8, 13, 40, 50, 60, 80, 90]],
[4.21, 5.61, 6.91, 9.4, 20.84, 22.61, 23.45, 23.94, 23.98],
decimal=2
)
def test_variogram_plot(self):
# run without plot first:
b, e, y = variogram(self.c, self.v, effective_range=37., sill=21.,
nugget=3., plot=False, model='matern', s=15.)
# run with plot
fig = variogram(self.c, self.v, effective_range=37., sill=21.,
nugget=3., plot=True, model='matern', s=15.)
# dig out the arrays
ax = fig.axes[0]
exp_line, mod_line = ax.get_lines()
# test experimental scatter plot
assert_array_almost_equal(b, exp_line.get_data()[0], decimal=6)
assert_array_almost_equal(e, exp_line.get_data()[1], decimal=6)
# test model function
assert_array_almost_equal(y, mod_line.get_data()[1], decimal=6)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/preprocessing/union.py
```python
import unittest
import pandas as pd
import numpy as np
from numpy.testing import assert_almost_equal
from pandas.testing import assert_series_equal, assert_index_equal
from hydrobox.toolbox import merge
class TestMerge(unittest.TestCase):
def setUp(self):
self.s1 = pd.Series(data=[1, 2, 3])
self.s2 = pd.Series(data=[4, 5.5])
self.s3 = pd.Series(data=[7, 9.5, 4.5, 2])
self.dtindex = pd.date_range('201309241100', freq='2H', periods=4)
def test_default(self):
assert_almost_equal(
merge(self.s1, self.s2, self.s3).values,
np.array(
[[1., 4., 7.], [2., 5.5, 9.5],
[3., np.nan, 4.5], [np.nan, np.nan, 2.]])
, decimal=1
)
def test_dropna(self):
assert_almost_equal(
merge(self.s1, self.s2, self.s3, dropna=True).values,
np.array([[1., 4., 7.], [2., 5.5, 9.5]]),
decimal=1
)
def test_index_mismatch(self):
with self.assertRaises(ValueError):
s = self.s3.copy()
s.index = self.dtindex
merge(self.s1, self.s2, s)
def test_pass_no_series(self):
self.assertIsNone(merge())
def test_pass_only_one_series(self):
assert_series_equal(self.s1, merge(self.s1))
def test_datetimeindex(self):
s1 = self.s1.copy()
s2 = self.s2.copy()
s3 = self.s3.copy()
s1.index = self.dtindex[1:]
s2.index = self.dtindex[[1,3]]
s3.index = self.dtindex
# correct amount of elements
self.assertEqual(merge(s1, s2, s3).size, 12)
self.assertEqual(merge(s1, s2, s3, dropna=True).size, 6)
# check index
assert_index_equal(merge(s1, s2, s3, dropna=True).index,
pd.DatetimeIndex(
['2013-09-24 13:00:00', '2013-09-24 17:00:00'],
dtype='datetime64[ns]', freq='4H')
)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joergmlpts/usb-camera",
"score": 3
} |
#### File: joergmlpts/usb-camera/usb-camera.py
```python
import cv2 # install on Ubuntu with 'sudo apt install python3-opencv'
# if missing, install on Ubuntu with 'sudo apt install python3-pyqt5'
from PyQt5.QtCore import Qt, QDateTime, QSize, QTimer
from PyQt5.QtGui import QKeySequence, QImage, QPixmap, QIcon
from PyQt5.QtWidgets import (QApplication, QMainWindow, QAction, QWidget,
QVBoxLayout, QHBoxLayout, QFrame, QLabel, qApp,
QFileDialog, QGroupBox, QPushButton, QComboBox)
import os, queue, sys, threading, time
class MainWindow(QMainWindow):
WINDOW_NAME = 'USB-Camera'
MSG_DURATION = 5000 # show messages for 5 seconds
def __init__(self, widget, app):
QMainWindow.__init__(self)
self.app = app
self.widget = widget
self.setWindowTitle(self.WINDOW_NAME)
self.setCentralWidget(widget)
# Menu
self.menu = self.menuBar()
self.file_menu = self.menu.addMenu("File")
exit_action = QAction("Exit", self)
exit_action.setShortcut(QKeySequence.Quit)
exit_action.triggered.connect(self.end_window)
self.file_menu.addAction(exit_action)
# Status Bar
self.status = self.statusBar()
def end_window(self):
self.widget.end_widget()
self.close()
# show text in status message
def show_message(self, msg):
self.status.showMessage(msg, self.MSG_DURATION)
self.app.processEvents()
class CameraPicture(QLabel):
def __init__(self, camera_widget):
super().__init__()
self.camera_widget = camera_widget
def sizeHint(self):
return QSize(self.camera_widget.cam_width + 2 * self.frameWidth(),
self.camera_widget.cam_height + 2 * self.frameWidth())
# display frame in GUI, called by update_frame
def display_frame(self, frame):
image = QImage(frame, frame.shape[1], frame.shape[0],
frame.strides[0], QImage.Format_RGB888)
size = QSize(self.size().width() - 2 * self.frameWidth(),
self.size().height() - 2 * self.frameWidth())
self.setPixmap(QPixmap.fromImage(image).scaled(size,
Qt.KeepAspectRatio))
class CameraWidget(QWidget):
# messages
NO_CAMERAS_MSG = 'No cameras found.'
CANNOT_OPEN_MSG = 'Cannot open microscope %s.'
CANNOT_READ_MSG = 'Cannot get picture from microscope.'
TAKING_PICTURE = "Taking picture '%s'."
CANNOT_WRITE_PICTURE = "Cannot write picture '%s'."
RECORDING_VIDEO = "Recording video '%s'."
CANNOT_WRITE_VIDEO = "Cannot write video '%s'."
PICTURE_TAKEN = 'Picture %s taken.'
# record button
RECORD_VIDEO = 'Record Video'
RECORD_ICON = 'rodentia-icons_media-record.svg'
STOP_RECORDING = 'Stop Recording'
STOP_ICON = 'rodentia-icons_media-playback-stop.svg'
CAMERA_ICON = 'mono-camera-mount.svg'
MIN_TIMER_DELAY = 20 # in milliseconds
# commands sent from main thread to video thread
VIDEO_OPEN = 0
VIDEO_FRAME = 1
VIDEO_CLOSE = 2
VIDEO_EXIT = 3
def __init__(self):
QWidget.__init__(self)
self.main_window = None
self.cam = None
self.cam_width = 640
self.cam_height = 480
self.cam_fps = 25.0
self.cameras = {}
self.combo_cams2camera = {}
self.cameras_scanned = False
self.flip = None
self.timer = None
inst_dir = os.path.dirname(sys.argv[0])
if os.path.islink(sys.argv[0]):
inst_dir = os.path.join(inst_dir, os.path.dirname(
os.readlink(sys.argv[0])))
self.icon_dir = os.path.join(inst_dir, 'icons')
# create queue of requests by threads for main thread and gui
self.requests_queue = queue.Queue(5)
# create queue and thread to record videos
self.video_queue = queue.Queue(100)
self.thread_videos = threading.Thread(target=self.thread_write_videos)
self.thread_videos.start()
# create queue and thread to take photos
self.picture_queue = queue.Queue(5)
self.thread_pictures = threading.Thread(target=self.
thread_write_pictures)
self.thread_pictures.start()
self.picture = None # next filename when taking picture
self.video = False # video recording in progress
#######
# GUI #
#######
# camera videostream
self.pixmap_view = CameraPicture(self)
self.pixmap_view.setFrameShape(QFrame.Box)
self.pixmap_view.setBaseSize(640, 480)
# save photos and videos
self.layout_save = QHBoxLayout()
self.btn_directory = QPushButton("Output Directory")
self.btn_directory.clicked.connect(self.change_output_directory)
self.layout_save.addWidget(self.btn_directory)
self.label_directory = QLabel('')
self.layout_save.addWidget(self.label_directory, stretch=1)
self.set_output_directory(os.getcwd())
self.btn_take_photo = QPushButton("Take Photo")
self.btn_take_photo.setIcon(QIcon(os.path.join(self.icon_dir,
self.CAMERA_ICON)))
self.btn_take_photo.clicked.connect(self.take_picture)
self.btn_record_video = QPushButton(self.RECORD_VIDEO)
self.btn_record_video.setIcon(QIcon(os.path.join(self.icon_dir,
self.RECORD_ICON)))
self.btn_record_video.clicked.connect(self.record_video)
self.layout_save.addWidget(self.btn_take_photo)
self.layout_save.addWidget(self.btn_record_video)
# select camera
self.layout_group_cam = QHBoxLayout()
self.btn_update_cameras = QPushButton('Re-Scan Cameras')
self.btn_update_cameras.clicked.connect(self.update_cameras)
self.combo_cams = QComboBox()
self.combo_cams.currentIndexChanged.connect(self.change_camera)
self.combo_flip = QComboBox()
self.combo_flip.currentIndexChanged.connect(self.change_flip)
for item in ['No Flip', 'Flip Horizontally', 'Flip Vertically',
'Flip Both']: self.combo_flip.addItem(item)
self.layout_group_cam.addWidget(QLabel('Camera:'))
self.layout_group_cam.addWidget(self.combo_cams)
self.layout_group_cam.addWidget(self.combo_flip)
self.layout_group_cam.addWidget(self.btn_update_cameras)
# main layout
self.main_layout = QVBoxLayout()
self.main_layout.addLayout(self.layout_group_cam)
self.main_layout.addWidget(self.pixmap_view)
self.main_layout.addLayout(self.layout_save)
self.setLayout(self.main_layout)
self.enable_buttons(False)
# called upon exit, stops timer and shuts down threads
def end_widget(self):
self.end_camera()
self.picture_queue.put(None)
self.video_queue.put((self.VIDEO_EXIT, None))
self.thread_pictures.join()
self.thread_videos.join()
# show status message
def show_message(self, msg):
if self.main_window:
self.main_window.show_message(msg)
# clear status message
def clear_message(self):
self.show_message('')
# enable or disable buttons, only enabled when a camera selected
def enable_buttons(self, enable=True):
for btn in [self.btn_take_photo, self.btn_record_video,
self.combo_flip]: btn.setEnabled(enable)
# display file dialog to select an output directory
def change_output_directory(self):
directory = QFileDialog.getExistingDirectory(None,
"choose output directory", self.output_path,
QFileDialog.ShowDirsOnly)
if directory:
self.set_output_directory(directory)
self.clear_message()
else:
self.show_message('Output directory not changed.')
# set output directory
def set_output_directory(self, directory):
self.output_path = directory
self.label_directory.setText(directory)
# test if camera can be opened
def camera_exists(self, idx):
cam = cv2.VideoCapture(idx)
rsl = False
if cam.isOpened():
self.cameras[idx] = ('%dx%d %.1ffps' %
(cam.get(cv2.CAP_PROP_FRAME_WIDTH),
cam.get(cv2.CAP_PROP_FRAME_HEIGHT),
cam.get(cv2.CAP_PROP_FPS)))
rsl = True
cam.release()
return rsl
# query available cameras, populate combo box
def update_cameras(self):
QApplication.setOverrideCursor(Qt.WaitCursor)
self.end_camera()
self.cameras = {}
if not self.cameras_scanned and len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg.isdigit():
arg = int(arg)
self.camera_exists(arg)
else:
idx = 0
no_failures = 0
MAX_FAILURES = 2
while True:
if self.camera_exists(idx):
no_failures = 0
else:
no_failures += 1
if no_failures > MAX_FAILURES:
break
idx += 1
self.cameras_scanned = True
self.combo_cams.clear()
self.combo_cams2camera = {}
for idx, info in self.cameras.items():
item = f'{idx}: {info}'
self.combo_cams2camera[item] = idx
self.combo_cams.addItem(item)
QApplication.restoreOverrideCursor()
# called upon selection of another camera
def change_camera(self, i):
QApplication.setOverrideCursor(Qt.WaitCursor)
self.end_camera()
if i >= 0:
self.begin_camera(self.combo_cams2camera[self.combo_cams.
itemText(i)])
QApplication.restoreOverrideCursor()
# called upon selection of another flip
def change_flip(self, i):
idx2flip = [None, 1, 0, -1]
self.flip = idx2flip[i]
# initialize GUI, query available cameras, select first camera
def initialize(self, main_window):
self.main_window = main_window
self.update_cameras()
if not self.cameras:
self.show_message(self.NO_CAMERAS_MSG)
return
self.begin_camera(self.combo_cams2camera[self.combo_cams.
currentText()])
# open camera and set up timer to process frames
def begin_camera(self, camera):
self.clear_message()
assert camera in self.cameras
self.cam = cv2.VideoCapture(camera)
if not self.cam.isOpened():
no_retries = 0
MAX_RETRIES = 5
while not self.cam.isOpened() and no_retries < MAX_RETRIES:
time.sleep(1)
self.cam.open(camera)
no_retries += 1
if not self.cam.isOpened():
self.end_camera()
self.show_message(self.CANNOT_OPEN_MSG % camera)
return
self.cam_width = int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH))
self.cam_height = int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.cam_fps = self.cam.get(cv2.CAP_PROP_FPS)
self.pixmap_view.setMinimumSize(self.cam_width // 2,
self.cam_height // 2)
self.timer = QTimer()
self.timer.timeout.connect(self.update_frame)
self.timer_delay = int(1000.0 / self.cam_fps)
while self.timer_delay < self.MIN_TIMER_DELAY:
self.timer_delay = self.MIN_TIMER_DELAY
self.timer.start(self.timer_delay)
self.enable_buttons()
# end camera use
def end_camera(self):
self.enable_buttons(False)
if self.cam:
self.cam.release()
self.cam = None
if self.timer:
self.timer.stop()
self.timer = None
self.end_video()
# read one frame from camera, called by update_frame
def read_frame(self):
if self.cam:
ret_val, bgr_frame = self.cam.read()
if ret_val:
if self.flip is not None:
bgr_frame = cv2.flip(bgr_frame, self.flip)
return bgr_frame, cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2RGB)
else:
self.show_message(self.CANNOT_READ_MSG)
self.cam = None
self.timer.stop()
# for my 25fps microscope, the timer calls this function every 40ms
def update_frame(self):
while not self.requests_queue.empty():
req = self.requests_queue.get()
req()
self.requests_queue.task_done()
frames = self.read_frame()
if frames is not None:
bgr_frame, rgb_frame = frames
if self.picture:
self.picture_queue.put((self.picture, bgr_frame))
self.picture = None
if self.video:
self.video_queue.put((self.VIDEO_FRAME, bgr_frame))
self.pixmap_view.display_frame(rgb_frame)
# generate a filename that consists in the current date and time
def generate_filename(self):
format = 'yyyy-MM-dd_HH-mm-ss'
return os.path.join(self.output_path,
QDateTime.currentDateTime().toString(format))
# makes sure that 'msg' disappears from scope and cannot be re-used
def queue_message(self, msg_base, filename):
msg = msg_base % filename
self.requests_queue.put(lambda : self.show_message(msg))
# runs in thread, gets filenames and frames and writes pictures
def thread_write_pictures(self):
while True:
cmd = self.picture_queue.get()
if not cmd is None:
filename, frame = cmd
self.queue_message(self.TAKING_PICTURE if cv2.imwrite(filename,
frame)
else self.CANNOT_WRITE_PICTURE, filename)
self.picture_queue.task_done()
if cmd is None:
break
# runs in thread, writes videos
def thread_write_videos(self):
video = None
while True:
cmd, arg = self.video_queue.get()
if cmd == self.VIDEO_FRAME:
if video:
video.write(arg)
elif cmd == self.VIDEO_OPEN:
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
video = cv2.VideoWriter(arg, fourcc,
1000.0 / self.timer_delay,
(self.cam_width, self.cam_height))
if video and video.isOpened():
self.queue_message(self.RECORDING_VIDEO, arg)
self.requests_queue.put(lambda:self.btn_record_video.\
setIcon(QIcon(os.path.join(self.\
icon_dir,
self.STOP_ICON))))
self.requests_queue.put(lambda:self.btn_record_video.\
setText(self.STOP_RECORDING))
else:
video = None
self.queue_message(self.CANNOT_WRITE_VIDEO, arg)
self.video = False
elif cmd == self.VIDEO_CLOSE:
if video:
video.release()
video = None
self.video_queue.task_done()
if cmd == self.VIDEO_EXIT:
if video:
video.release()
video = None
break
# request to take a picture in next call of update_frame
def take_picture(self):
self.picture = self.generate_filename() + '.jpg'
# request to record a video of subsequent calls of update_frame
def record_video(self):
if self.video:
self.end_video()
return
filename = self.generate_filename() + '.mp4'
# send open command and filename to thread
self.video_queue.put((self.VIDEO_OPEN, filename))
self.video = True
# end video recording
def end_video(self):
if self.video:
self.btn_record_video.setText(self.RECORD_VIDEO)
self.btn_record_video.setIcon(QIcon(os.path.join(self.icon_dir,
self.RECORD_ICON)))
# send close command to thread
self.video_queue.put((self.VIDEO_CLOSE, None))
self.clear_message()
self.video = False
if __name__ == '__main__':
app = QApplication(sys.argv)
widget = CameraWidget()
app.aboutToQuit.connect(widget.end_widget)
main_window = MainWindow(widget, app)
widget.initialize(main_window)
main_window.show()
sys.exit(app.exec_())
``` |
{
"source": "joergneulist/banker",
"score": 3
} |
#### File: banker/app/main.py
```python
import sys
from db_driver import db_driver
import import_file_dkb
def load(db):
data = db.load()
for acc in data:
acc['Latest'] = max([x['Wertstellung'] for x in acc['Data']])
return data
def overview(data):
for acc in data:
print('account "{}" ({}) has {} records, the latest from {}'\
.format(acc['Name'], acc['Code'], len(acc['Data']), acc['Latest']))
def find_account(data, code):
lookup = {acc['Code']: idx for idx, acc in enumerate(data)}
if code in lookup:
return data[lookup[code]]
def update(db, data, filename):
print(f'READ: {filename}')
file_data = import_file_dkb.read_from_file(filename)
print('FOUND: account {} has {} records'.format(file_data['code'], len(file_data['entries'])))
acc = find_account(data, file_data['code'])
print('DATABASE: account {} has {} records, the latest from {}'.format(acc['Name'], len(acc['Data']), acc['Latest']))
inserted = 0
#for entry in account['entries'][::-1]:
# if not newest or entry['Wertstellung'].date() > newest:
# db.insert(account['id'], account['fields'], entry)
# inserted += 1
# else:
# if db.insert_unique(account['id'], account['fields'], entry):
# inserted += 1
#db.commit()
print(f'STORE: {inserted} new records')
if __name__ == "__main__":
db = db_driver('db_config.json')
data = load(db)
overview(data)
if len(sys.argv) > 1:
update(db, data, sys.argv[1])
``` |
{
"source": "joergoster/Optuna-Game-Parameter-Tuner",
"score": 3
} |
#### File: Optuna-Game-Parameter-Tuner/tools/filter.py
```python
__version__ = 'v0.1.0'
__script_name__ = 'filter'
def main():
# Change here, be sure to remove the dupes in the input file, use deduplicator.py
infn = 'archbishop_chancellor_random_startpos.fen'
filter = ['A', 'M', 'a', 'm']
outfn = f'out_{infn}'
total_lines = 0
saved_lines = 0
with open(outfn, 'w') as w:
with open(infn) as f:
for lines in f:
total_lines += 1
line = lines.rstrip()
pcs = line.split('[')[0]
selection = line.split('[')[1].split(']')[0]
found = False
for n in filter:
if n in pcs:
w.write(f'{line}\n')
saved_lines += 1
found = True
break
if not found:
if selection.count('-') < 4:
w.write(f'{line}\n')
saved_lines += 1
print(f'total lines: {total_lines}, saved lines: {saved_lines}')
if __name__ == "__main__":
main()
```
#### File: tourney_manager/duel/duel.py
```python
__author__ = 'fsmosca'
__script_name__ = 'Duel'
__version__ = 'v1.18.0'
__credits__ = ['musketeerchess']
from pathlib import Path
import subprocess
import argparse
import time
import random
import concurrent.futures
from concurrent.futures import ProcessPoolExecutor
import logging
from statistics import mean
from typing import List
import multiprocessing
from datetime import datetime
import glob
import math
logging.basicConfig(
filename='log_duel.txt', filemode='w',
level=logging.DEBUG,
format='%(asctime)s - pid%(process)5d - %(levelname)5s - %(message)s')
class Elo:
"""
Ref.: https://github.com/cutechess/cutechess/blob/master/projects/lib/src/elo.cpp
"""
def __init__(self, win, loss, draw):
self.wins = win
self.losses = loss
self.draws = draw
self.n = win + loss + draw
self.mu = self.wins/self.n + self.draws/self.n / 2
def stdev(self):
n = self.n
wr = self.wins / n
lr = self.losses / n
dr = self.draws / n
dev_w = wr * math.pow(1.0 - self.mu, 2.0)
dev_l = lr * math.pow(0.0 - self.mu, 2.0)
dev_d = dr * math.pow(0.5 - self.mu, 2.0)
return math.sqrt(dev_w + dev_l + dev_d) / math.sqrt(n)
def draw_ratio(self):
return self.draws / self.n
def diff(self, p=None):
"""Elo difference"""
p = self.mu if p is None else p
# Manage extreme values of p, if 1.0 or more make it 0.99.
# If 0 or below make it 0.01. With 0.01 the The max rating diff is 800.
p = min(0.99, max(0.01, p))
return -400.0 * math.log10(1.0 / p - 1.0)
def error_margin(self, confidence_level=95):
a = (1 - confidence_level/100) / 2
mu_min = self.mu + self.phi_inv(a) * self.stdev()
mu_max = self.mu + self.phi_inv(1-a) * self.stdev()
return (self.diff(mu_max) - self.diff(mu_min)) / 2.0
def erf_inv(self, x):
pi = 3.1415926535897
a = 8.0 * (pi - 3.0) / (3.0 * pi * (4.0 - pi))
y = math.log(1.0 - x * x)
z = 2.0 / (pi * a) + y / 2.0
ret = math.sqrt(math.sqrt(z * z - y / a) - z)
if x < 0.0:
return -ret
return ret
def phi_inv(self, p):
return math.sqrt(2.0) * self.erf_inv(2.0 * p - 1.0)
def los(self):
"""LOS - Likelihood Of Superiority"""
if self.wins == 0 and self.losses == 0:
return 0
return 100 * (0.5 + 0.5 * math.erf((self.wins - self.losses) / math.sqrt(2.0 * (self.wins + self.losses))))
def confidence_interval(self, confidence_level=95, type_='elo'):
e = self.diff()
em = self.error_margin(confidence_level)
if type_ == 'rate':
return self.expected_score_rate(e-em), self.expected_score_rate(e+em)
else:
return e-em, e+em
def expected_score_rate(self, rd):
return 1 / (1 + 10 ** (-rd/400))
class Timer:
def __init__(self, base_time, inc_time):
"""
The time unit is in ms (milliseconds)
"""
self.base_time = base_time
self.inc_time = inc_time
self.rem_time = self.base_time + self.inc_time
self.init_base_time = base_time
self.init_inc_time = inc_time
def update(self, elapse):
"""
This is called after every engine move is completed.
"""
self.rem_time -= elapse
self.rem_time += self.inc_time
def is_zero_time(self):
return True if self.rem_cs() <= 0 else False
def rem_cs(self):
return self.rem_time // 10
class Duel:
def __init__(self, e1, e2, fens, rounds, concurrency, pgnout, repeat, draw_option,
resign_option, variant, event):
self.e1 = e1
self.e2 = e2
self.fens = fens
self.rounds = rounds
self.concurrency = concurrency
self.pgnout = pgnout
self.repeat = repeat
self.draw_option = draw_option
self.resign_option = resign_option
self.variant = variant
self.event = event
self.base_time_sec = 5
self.inc_time_sec = 0.05
self.betza = []
self.lock = multiprocessing.Manager().Lock()
def save_game(self, fen, moves, scores, depths, e1_name, e2_name,
start_turn, gres, termination, round_num, subround):
self.lock.acquire()
logging.info('Saving game ...')
tag_date = datetime.today().strftime('%Y.%m.%d')
round_tag_value = f'{round_num}.{subround}'
betzavalue = ''
for b in self.betza:
p = b.split()[1].strip().split('&')[0]
v = b.split()[2].strip()
betzavalue += f'{p}:{v};'
betzavalue = betzavalue[0:-1] # strip the last semi-colon.
with open(self.pgnout, 'a') as f:
f.write(f'[Event "{self.event}"]\n')
f.write('[Site "Computer"]\n')
f.write(f'[Date "{tag_date}"]\n')
f.write(f'[Round "{round_tag_value}"]\n')
f.write(f'[White "{e1_name if start_turn else e2_name}"]\n')
f.write(f'[Black "{e1_name if not start_turn else e2_name}"]\n')
f.write(f'[Result "{gres}"]\n')
f.write(f'[TimeControl "{self.base_time_sec}+{self.inc_time_sec}"]\n')
f.write(f'[Variant "{self.variant}"]\n')
if self.variant == 'musketeer':
f.write(f'[VariantFamily "seirawan"]\n')
f.write(f'[VariantMen "{betzavalue}"]\n')
if termination != '':
f.write(f'[Termination "{termination}"]\n')
if not isinstance(fen, int):
f.write(f'[FEN "{fen}"]\n')
else:
f.write('\n')
move_len = len(moves)
f.write(f'[PlyCount "{move_len}"]\n\n')
for i, (m, s, d) in enumerate(zip(moves, scores, depths)):
num = i + 1
if num % 2 == 0:
if start_turn:
str_num = f'{num // 2}... '
else:
str_num = f'{num // 2 + 1}. '
else:
num += 1
if start_turn:
str_num = f'{num // 2}. '
else:
str_num = f'{num // 2}... '
if move_len - 1 == i:
f.write(f'{str_num}{m} {{{s}/{d}}} {gres}')
else:
f.write(f'{str_num}{m} {{{s}/{d}}} ')
if (i + 1) % 5 == 0:
f.write('\n')
f.write('\n\n')
self.lock.release()
def match(self, fen, round_num) -> List[float]:
"""
Run an engine match between e1 and e2. Save the game and print result
from e1 perspective.
"""
all_e1score = []
is_show_search_info = False
subround = 0
# Start engine match, 2 games will be played.
for gn in range(self.repeat):
logging.info(f'Match game no. {gn + 1}')
logging.info(f'Test engine plays as {"first" if gn % 2 == 0 else "second"} engine.')
e1_folder, e2_folder = Path(self.e1['cmd']).parent, Path(self.e2['cmd']).parent
subround = gn + 1
pe1 = subprocess.Popen(self.e1['cmd'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True, bufsize=1,
cwd=e1_folder)
pe2 = subprocess.Popen(self.e2['cmd'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True, bufsize=1,
cwd=e2_folder)
self.e1.update({'proc': pe1})
self.e2.update({'proc': pe2})
if gn % 2 == 0:
eng = [self.e1, self.e2]
else:
eng = [self.e2, self.e1]
for i, pr in enumerate(eng):
e = pr['proc']
pn = pr['name']
send_command(e, 'xboard', pn)
send_command(e, 'protover 2', pn)
for eline in iter(e.stdout.readline, ''):
line = eline.strip()
logging.debug(f'{pn} < {line}')
if 'done=1' in line:
break
# Set param to engines.
for k, v in pr['opt'].items():
send_command(e, f'option {k}={v}', pn)
timer, depth_control = [], []
check_betza = False
for i, pr in enumerate(eng):
e = pr['proc']
pn = pr['name']
send_command(e, f'variant {self.variant}', pn)
send_command(e, 'ping 1', pn)
for eline in iter(e.stdout.readline, ''):
line = eline.strip()
logging.debug(f'{pn} < {line}')
if not check_betza and line.startswith('piece'):
self.betza.append(line)
if 'pong' in line:
break
check_betza = True
send_command(e, 'new', pn)
# Set to ponder on
send_command(e, 'hard', pn)
# Set to ponder off
send_command(e, 'easy', pn)
send_command(e, 'post', pn)
# Define time control, base time in minutes and inc in seconds.
base_minv, base_secv, incv = get_tc(pr['tc'])
all_base_sec = base_minv * 60 + base_secv
self.base_time_sec = all_base_sec
self.inc_time_sec = incv
logging.info(f'base_minv: {base_minv}m, base_secv: {base_secv}s, incv: {incv}s')
# Send level command to each engine.
if pr['depth'] > 0:
tbase = 300
else:
tbase = max(1, all_base_sec // 60)
send_command(e, f'level 0 {tbase} {float(incv):0.2f}', pn)
# Setup Timer, convert base time to ms and inc in sec to ms
timer.append(Timer(all_base_sec * 1000, int(incv * 1000)))
depth_control.append(pr['depth'])
send_command(e, 'force', pn)
send_command(e, f'setboard {fen}', pn)
send_command(e, 'ping 2', pn)
for eline in iter(e.stdout.readline, ''):
line = eline.strip()
logging.debug(f'{pn} < {line}')
if 'pong' in line:
break
num, side, move, line, game_end = 0, 0, None, '', False
score_history, elapse_history, depth_history, move_hist = [], [], [], []
start_turn = turn(fen) if not isinstance(fen, int) else True
gres, gresr, e1score = '*', '*', 0.0
is_time_over = [False, False]
current_color = start_turn # True if white to move
test_engine_color = True if ((start_turn and gn % 2 == 0) or (not start_turn and gn % 2 != 0)) else False
termination = ''
# Start the game.
while True:
if depth_control[side] > 0:
send_command(eng[side]['proc'], f'sd {depth_control[side]}', eng[side]['name'])
else:
send_command(eng[side]['proc'], f'time {timer[side].rem_cs()}', eng[side]['name'])
send_command(eng[side]['proc'], f'otim {timer[not side].rem_cs()}', eng[side]['name'])
t1 = time.perf_counter_ns()
if num == 0:
send_command(eng[side]['proc'], 'go', eng[side]['name'])
else:
send_command(eng[side]['proc'], f'{move}', eng[side]['name'])
# Send another go because of force.
if num == 1:
send_command(eng[side]['proc'], 'go', eng[side]['name'])
num += 1
score, depth = None, None
for eline in iter(eng[side]['proc'].stdout.readline, ''):
line = eline.strip()
logging.debug(f'{eng[side]["name"]} < {line}')
if is_show_search_info:
if not line.startswith('#'):
print(line)
# Save score and depth from engine search info.
try:
value_at_index_0 = line.split()[0]
except IndexError:
pass
else:
if value_at_index_0.isdigit():
score = int(line.split()[1]) # cp
depth = int(line.split()[0])
# Check end of game as claimed by engines.
game_endr, gresr, e1scorer, termi = is_game_end(line, test_engine_color)
if game_endr:
game_end, gres, e1score, termination = game_endr, gresr, e1scorer, termi
break
if 'move ' in line and not line.startswith('#'):
elapse = (time.perf_counter_ns() - t1) // 1000000
timer[side].update(elapse)
elapse_history.append(elapse)
move = line.split('move ')[1]
move_hist.append(move)
score_history.append(score if score is not None else 0)
depth_history.append(depth if depth is not None else 0)
if (timer[side].init_base_time + timer[side].init_inc_time > 0
and timer[side].is_zero_time()):
is_time_over[current_color] = True
termination = 'forfeits on time'
logging.info('time is over')
break
if game_end:
# Send result to each engine after a claim of such result.
for e in eng:
send_command(e['proc'], f'result {gresr}', e['name'])
break
# Game adjudications
# Resign
if (self.resign_option['movecount'] is not None
and self.resign_option['score'] is not None):
game_endr, gresr, e1scorer = adjudicate_win(
test_engine_color, score_history, self.resign_option, start_turn)
if game_endr:
gres, e1score = gresr, e1scorer
logging.info('Game ends by resign adjudication.')
break
# Draw
if (self.draw_option['movenumber'] is not None
and self.draw_option['movenumber'] is not None
and self.draw_option['score'] is not None):
game_endr, gresr, e1scorer = adjudicate_draw(
score_history, self.draw_option)
if game_endr:
gres, e1score = gresr, e1scorer
logging.info('Game ends by resign adjudication.')
break
# Time is over
if depth_control[side] == 0:
game_endr, gresr, e1scorer = time_forfeit(
is_time_over[current_color], current_color, test_engine_color)
if game_endr:
gres, e1score = gresr, e1scorer
break
side = not side
current_color = not current_color
if self.pgnout is not None:
self.save_game(fen, move_hist, score_history,
depth_history, eng[0]["name"], eng[1]["name"],
start_turn, gres, termination, round_num, subround)
for i, e in enumerate(eng):
send_command(e['proc'], 'quit', e['name'])
all_e1score.append(e1score)
return all_e1score
def round_match(self, fen, round_num) -> List[float]:
"""
Play a match between e1 and e2 using fen as starting position. By default
2 games will be played color is reversed.
"""
return self.match(fen, round_num)
def run(self):
"""Start the match."""
joblist = []
test_engine_score_list = []
wins, losses, draws = 0, 0, 0
# Use Python 3.8 or higher
with ProcessPoolExecutor(max_workers=self.concurrency) as executor:
for i, fen in enumerate(self.fens if len(self.fens) else range(self.rounds)):
if i >= self.rounds:
break
job = executor.submit(self.round_match, fen, i+1)
joblist.append(job)
for future in concurrent.futures.as_completed(joblist):
try:
test_engine_score = future.result()
for s in test_engine_score:
test_engine_score_list.append(s)
if s == 1:
wins += 1
elif s == 0:
losses += 1
elif s == 0.5:
draws += 1
perf = mean(test_engine_score_list)
games = len(test_engine_score_list)
cf = 95
elo = Elo(wins, losses, draws)
elodiff = elo.diff()
em = elo.error_margin(cf)
lowci, highci = elo.confidence_interval(cf, 'elo')
los = elo.los()
drawrate = elo.draw_ratio()
print(f'Score of {self.e1["name"]} vs {self.e2["name"]}: {wins} - {losses} - {draws} [{perf:0.8f}] {games}')
print(f'Elo difference: {elodiff:+0.1f} +/- {em:0.1f}, CI: [{lowci:0.1f}, {highci:0.1f}], LOS: {los:0.1f}%, DrawRatio: {100*drawrate:0.1f}%')
except concurrent.futures.process.BrokenProcessPool as ex:
print(f'exception: {ex}')
logging.info(f'final test score: {mean(test_engine_score_list)}')
print('Finished match')
def define_engine(engine_option_value):
"""
Define engine files, name and options.
"""
ed1, ed2 = {}, {}
e1 = {'proc': None, 'cmd': None, 'name': 'test', 'opt': ed1, 'tc': '', 'depth': 0}
e2 = {'proc': None, 'cmd': None, 'name': 'base', 'opt': ed2, 'tc': '', 'depth': 0}
for i, eng_opt_val in enumerate(engine_option_value):
for value in eng_opt_val:
if i == 0:
if 'cmd=' in value:
e1.update({'cmd': value.split('=')[1]})
elif 'option.' in value:
# Todo: support float value
# option.QueenValueOpening=1000
optn = value.split('option.')[1].split('=')[0]
optv = int(value.split('option.')[1].split('=')[1])
ed1.update({optn: optv})
e1.update({'opt': ed1})
elif 'tc=' in value:
e1.update({'tc': value.split('=')[1]})
elif 'name=' in value:
e1.update({'name': value.split('=')[1]})
elif 'depth=' in value:
e1.update({'depth': int(value.split('=')[1])})
elif i == 1:
if 'cmd=' in value:
e2.update({'cmd': value.split('=')[1]})
elif 'option.' in value:
optn = value.split('option.')[1].split('=')[0]
optv = int(value.split('option.')[1].split('=')[1])
ed2.update({optn: optv})
e2.update({'opt': ed2})
elif 'tc=' in value:
e2.update({'tc': value.split('=')[1]})
elif 'name=' in value:
e2.update({'name': value.split('=')[1]})
elif 'depth=' in value:
e2.update({'depth': int(value.split('=')[1])})
return e1, e2
def get_fen_list(fn, is_rand=True, posperfile=-1):
"""
Read fen or epd file and return a list of fens. Warning if your file is too big
it may take some memory from your computer and may take time saving the positions
into memory.
"""
fens = []
if fn is None:
return fens
# If fn is a folder read all .fen or .epd in this folder.
fnpath = Path(fn)
if fnpath.is_dir():
# Read epd files.
for name in glob.glob(f'{fn}/*.epd'):
file_fens = []
with open(name) as f:
for lines in f:
fen = lines.strip()
file_fens.append(fen)
if posperfile != -1:
if is_rand:
selections = random.sample(file_fens, posperfile)
else:
selections = file_fens[0:posperfile]
else:
if is_rand:
random.shuffle(file_fens)
selections = file_fens
fens.extend(selections)
# Read fen files.
for name in glob.glob(f'{fn}/*.fen'):
file_fens = []
with open(name) as f:
for lines in f:
fen = lines.strip()
file_fens.append(fen)
if posperfile != -1:
if is_rand:
selections = random.sample(file_fens, posperfile)
else:
selections = file_fens[0:posperfile]
else:
if is_rand:
random.shuffle(file_fens)
selections = file_fens
fens.extend(selections)
else:
saved = 0
with open(fn) as f:
for lines in f:
fen = lines.strip()
fens.append(fen)
saved += 1
if posperfile != -1 and saved >= posperfile:
break
if is_rand:
random.shuffle(fens)
return fens
def get_tc(tcd):
"""
tc=0/3+1 or 3+1, blitz 3s + 1s inc
tc=0/3:1+1 or 3:1+1, blitz 3m + 1s with 1s inc
tc=0/0:5+0.1 or 0:5+0.1, blitz 0m + 5s + 0.1s inc
"""
base_minv, base_secv, inc_secv = 0, 0, 0.0
logging.info(f'tc value: {tcd}')
if tcd == '' or tcd == 'inf':
return base_minv, base_secv, inc_secv
# Check base time with minv:secv format.
if '/' in tcd:
basev = tcd.split('/')[1].split('+')[0].strip()
else:
basev = tcd.split('+')[0].strip()
if ':' in basev:
base_minv = int(basev.split(':')[0])
base_secv = int(basev.split(':')[1])
else:
base_secv = int(basev)
if '/' in tcd:
# 0/0:5+None
inc_value = tcd.split('/')[1].split('+')[1].strip()
if inc_value != 'None':
inc_secv = float(inc_value)
else:
# 0:5+None
inc_value = tcd.split('+')[1].strip()
if inc_value != 'None':
inc_secv = float(inc_value)
return base_minv, base_secv, inc_secv
def turn(fen):
"""
Return side to move of the given fen.
"""
side = fen.split()[1].strip()
if side == 'w':
return True
return False
def adjudicate_win(test_engine_color, score_history, resign_option, start_turn):
logging.info('Try adjudicating this game by win ...')
ret, gres, e1score = False, '*', 0.0
if len(score_history) >= 40:
# fcp is the first player to move, can be white or black.
fcp_score = score_history[0::2]
scp_score = score_history[1::2]
fwin_cnt, swin_cnt = 0, 0
movecount = resign_option['movecount']
score = resign_option['score']
for i, (fs, ss) in enumerate(zip(reversed(fcp_score),
reversed(scp_score))):
if i >= movecount:
break
if fs >= score and ss <= -score:
fwin_cnt += 1
if fwin_cnt >= movecount:
break
elif fs <= -score and ss >= score:
swin_cnt += 1
if swin_cnt >= movecount:
break
if fwin_cnt >= movecount:
gres = '1-0' if start_turn else '0-1'
if gres == '1-0':
e1score = 1.0 if test_engine_color else 0
else:
e1score = 1.0 if not test_engine_color else 0
ret = True
elif swin_cnt >= movecount:
# The second player won and is playing white.
gres = '1-0' if not start_turn else '0-1'
if gres == '1-0':
e1score = 1.0 if test_engine_color else 0
else:
e1score = 1.0 if not test_engine_color else 0
ret = True
return ret, gres, e1score
def adjudicate_draw(score_history, draw_option):
logging.info('Try adjudicating this game by draw ...')
ret, gres, e1score = False, '*', 0.0
if len(score_history) >= draw_option['movenumber'] * 2:
fcp_score = score_history[0::2]
scp_score = score_history[1::2]
draw_cnt = 0
movecount = draw_option['movecount']
score = draw_option['score']
for i, (fs, ss) in enumerate(zip(reversed(fcp_score),
reversed(scp_score))):
if i >= movecount:
break
if abs(fs) <= score and abs(ss) <= score:
draw_cnt += 1
if draw_cnt >= movecount:
gres = '1/2-1/2'
e1score = 0.5
logging.info('Draw by adjudication.')
ret = True
return ret, gres, e1score
def is_game_end(line, test_engine_color):
game_end, gres, e1score, termination, comment = False, '*', 0.0, '', ''
if '1-0' in line:
game_end = True
e1score = 1.0 if test_engine_color else 0.0
gres = '1-0'
termination = 'white mates black'
elif '0-1' in line:
game_end = True
e1score = 1.0 if not test_engine_color else 0.0
gres = '0-1'
termination = 'black mates white'
elif '1/2-1/2' in line:
game_end = True
e1score = 0.5
gres = '1/2-1/2'
if 'repetition' in line.lower():
termination = 'draw by repetition'
elif 'insufficient' in line.lower():
termination = 'draw by insufficient mating material'
elif 'fifty' in line.lower():
termination = 'draw by insufficient mating material'
elif 'stalemate' in line.lower():
termination = 'draw by stalemate'
return game_end, gres, e1score, termination
def param_to_dict(param):
"""
Convert string param to a dictionary.
"""
ret_param = {}
for par in param.split(','):
par = par.strip()
sppar = par.split() # Does not support param with space
spname = sppar[0].strip()
spvalue = int(sppar[1].strip())
ret_param.update({spname: spvalue})
return ret_param
def time_forfeit(is_timeup, current_color, test_engine_color):
game_end, gres, e1score = False, '*', 0.0
if is_timeup:
# test engine loses as white
if current_color and test_engine_color:
gres = '0-1'
e1score = 0.0
game_end = True
print(f'test engine with color {test_engine_color} loses on time')
# test engine loses as black
elif not current_color and not test_engine_color:
gres = '1-0'
e1score = 0.0
game_end = True
print(f'test engine with color {test_engine_color} loses on time')
# test engine wins as white
elif not current_color and test_engine_color:
gres = '1-0'
e1score = 1.0
game_end = True
print(f'test engine with color {test_engine_color} wins on time')
# test engine wins as black
elif current_color and not test_engine_color:
gres = '0-1'
e1score = 1.0
game_end = True
print(f'test engine with color {test_engine_color} wins on time')
if game_end:
logging.info('Game ends by time forfeit.')
return game_end, gres, e1score
def send_command(proc, command, name=''):
"""Send command to engine process."""
proc.stdin.write(f'{command}\n')
logging.debug(f'{name} > {command}')
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
prog='%s %s' % (__script_name__, __version__),
description='Conduct engine vs engine matches for cecp engines.',
epilog='%(prog)s')
parser.add_argument('-rounds', required=False,
help='Number of games per encounter per position.\n'
'If rounds is 1 (default) and repeat is 2\n'
'then total games will be 2.',
type=int, default=1)
parser.add_argument('-repeat', required=False,
help='Number of times to play a certain opening\n'
'default is 2 so that the position will be\n'
'played twice and each engine takes both white\n'
'and black at the start of each game.',
type=int, default=2)
parser.add_argument('-engine', nargs='*', action='append', required=True,
metavar=('cmd=', 'name='),
help='This option is used to define the engines.\n'
'Example:\n'
'-engine cmd=engine1.exe name=test ...'
' --engine cmd=engine2.exe name=base')
parser.add_argument('-draw', nargs='*', action='append', required=False,
metavar=('movenumber=', 'movecount='),
help='Adjudicates game to a draw result. Example:\n'
'-draw movenumber=40 movecount=10 score=0')
parser.add_argument('-resign', nargs='*', action='append', required=False,
metavar=('movecount=', 'score='),
help='Adjudicates game to a loss result. Example:\n'
'-resign movecount=10 score=900')
parser.add_argument('-pgnout', required=False,
metavar='pgn_output_filename',
help='pgn output filename')
parser.add_argument('-concurrency', required=False,
help='number of game to run in parallel, default=1',
type=int, default=1)
parser.add_argument('-variant', required=True, help='name of the variant')
parser.add_argument('-each', nargs='*', action='append', required=False,
metavar=('tc=', 'option.<option_name>='),
help='This option is used to apply to both engines.\n'
'Example where tc is applied to each engine:\n'
'-each tc=1+0.1\n'
'1 is in sec and 0.1 is the increment in sec.\n'
'-each tc=0:30+0.2\n'
'0 is in min, 30 is in sec, 0.2 is the increment in sec.\n'
'-each option.PawnValue=100\n'
'PawnValue is the name of the option. Or\n'
'-each tc=inf depth=4\n'
'to set the depth to 4.')
parser.add_argument('-openings', nargs='*', action='append',
required=False,
metavar=('file=<xxx.fen | xxx.epd>', 'random=<true | false>'),
help='Define start openings. Example:\n'
'-openings file=start.fen random=false posperfile=10\n'
'default random is true, default posperfile is -1 meaning all pos in a file.\n'
'If value of file is only a folder, then fen and epd files under that folder\n'
'will be considered. Example:\n'
'-openings file=c:/startpos ...\n'
'Start opening from move sequences is not supported.\n'
'Only fen and epd format are supported.')
parser.add_argument('-tournament', required=False, default='round-robin',
metavar='tour_type',
help='tournament type, default=round-robin')
parser.add_argument('-event', required=False, default='Computer Games',
help='Name of event, default=Computer Games')
parser.add_argument('-v', '--version', action='version', version=f'{__version__}')
args = parser.parse_args()
# Define engine files, name and options.
e1, e2 = define_engine(args.engine)
# Exit if engine file is not defined.
if e1['cmd'] is None or e2['cmd'] is None:
print('Error, engines are not properly defined!')
return
each_engine_option = {}
if args.each is not None:
for opt in args.each:
for value in opt:
key = value.split('=')[0]
val = value.split('=')[1].strip()
each_engine_option.update({key: val})
# Update tc of e1/e2 from each.
if e1['tc'] == '' or e2['tc'] == '':
if 'tc' in each_engine_option:
for key, val in each_engine_option.items():
if key == 'tc':
e1.update({key: val})
e2.update({key: val})
break
# Update depth of e1/e2 from each.
if e1['depth'] == 0 or e2['depth'] == 0:
if 'depth' in each_engine_option:
for key, val in each_engine_option.items():
if key == 'depth':
e1.update({key: int(val)})
e2.update({key: int(val)})
break
# Exit if there are no tc or depth.
if e1['tc'] == '' or e2['tc'] == '':
if e1['depth'] == 0 or e2['depth'] == 0:
raise Exception('Error! tc or depth are not defined.')
# Get the opening file and random state settings.
fen_file, is_random, posperfile = None, True, -1
if args.openings is not None:
for opt in args.openings:
for value in opt:
if value.startswith('file='):
fen_file = value.split('=')[1]
elif value.startswith('random='):
random_value = value.split('=')[1]
is_random = True if random_value.lower() == 'true' else False
elif value.startswith('posperfile='):
posperfile = int(value.split('=')[1])
draw_option = {'movenumber': None, 'movecount': None, 'score': None}
if args.draw is not None:
for opt in args.draw[0]:
key = opt.split('=')[0]
val = int(opt.split('=')[1])
draw_option.update({key: val})
resign_option = {'movecount': None, 'score': None}
if args.resign is not None:
for opt in args.resign[0]:
key = opt.split('=')[0]
val = int(opt.split('=')[1])
resign_option.update({key: val})
fens = get_fen_list(fen_file, is_rand=is_random, posperfile=posperfile)
duel = Duel(e1, e2, fens, args.rounds, args.concurrency, args.pgnout,
args.repeat, draw_option, resign_option, args.variant,
args.event)
duel.run()
if __name__ == '__main__':
main()
``` |
{
"source": "joerg-rade/domox",
"score": 3
} |
#### File: domox/docker/test_api.py
```python
from __future__ import print_function
import pexpect
import pytest
import requests
EXPECTED_PARSE = u"(ROOT\n (S\n (SBAR (IN Although)\n (S\n (NP (PRP they))\n (VP (VBD did) (RB n't)\n (PP (IN like)\n (NP (PRP it))))))\n (, ,)\n (NP (PRP they))\n (VP (VBD accepted)\n (NP (DT the) (NN offer)))\n (. .)))"
@pytest.fixture(scope="session", autouse=True)
def start_api():
"""Starts the CoreNLP REST API in a separate process."""
print("starting API...")
# 2 GB RAM seems to be the minimal amount CoreNLP needs to parse
# the example sentence.
child = pexpect.spawn('java -Xmx2g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000')
yield child.expect('(?i)StanfordCoreNLPServer listening at /0.0.0.0:9000') # provide the fixture value
print("stopping API...")
child.close()
def test_api_short():
"""The CoreNLP API produces the expected parse output."""
input_text = "Although they didn't like it, they accepted the offer."
res = requests.post(
'http://localhost:9000/?properties={"annotators":"parse","outputFormat":"json"}',
data=bytes(input_text, 'utf-8'))
json_result = res.json()
assert json_result['sentences'][0]['parse'] == EXPECTED_PARSE
``` |
{
"source": "JoergRue/Sephrasto",
"score": 2
} |
#### File: JoergRue/Sephrasto/DatenbankEditWaffeneigenschaft.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_waffeneigenschaftDialog(object):
def setupUi(self, waffeneigenschaftDialog):
waffeneigenschaftDialog.setObjectName("waffeneigenschaftDialog")
waffeneigenschaftDialog.setWindowModality(QtCore.Qt.ApplicationModal)
waffeneigenschaftDialog.resize(440, 200)
self.gridLayout_2 = QtWidgets.QGridLayout(waffeneigenschaftDialog)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label_5 = QtWidgets.QLabel(waffeneigenschaftDialog)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 2, 0, 1, 1)
self.textEdit = QtWidgets.QPlainTextEdit(waffeneigenschaftDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.textEdit, 2, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(waffeneigenschaftDialog)
self.label_4.setMinimumSize(QtCore.QSize(110, 0))
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.scriptPrioEdit = QtWidgets.QSpinBox(waffeneigenschaftDialog)
self.scriptPrioEdit.setMinimum(-10)
self.scriptPrioEdit.setMaximum(10)
self.scriptPrioEdit.setSingleStep(1)
self.scriptPrioEdit.setProperty("value", 0)
self.scriptPrioEdit.setObjectName("scriptPrioEdit")
self.horizontalLayout.addWidget(self.scriptPrioEdit)
self.gridLayout.addLayout(self.horizontalLayout, 3, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(waffeneigenschaftDialog)
self.label_3.setMinimumSize(QtCore.QSize(110, 0))
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 4, 0, 1, 1)
self.scriptEdit = QtWidgets.QLineEdit(waffeneigenschaftDialog)
self.scriptEdit.setObjectName("scriptEdit")
self.gridLayout.addWidget(self.scriptEdit, 4, 1, 1, 1)
self.label = QtWidgets.QLabel(waffeneigenschaftDialog)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.nameEdit = QtWidgets.QLineEdit(waffeneigenschaftDialog)
self.nameEdit.setObjectName("nameEdit")
self.gridLayout.addWidget(self.nameEdit, 1, 1, 1, 1)
self.warning = QtWidgets.QLabel(waffeneigenschaftDialog)
self.warning.setStyleSheet("background-color: rgb(255, 255, 0);")
self.warning.setWordWrap(True)
self.warning.setVisible(False)
self.warning.setObjectName("warning")
self.gridLayout.addWidget(self.warning, 0, 0, 1, 2)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(waffeneigenschaftDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Save)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout_2.addWidget(self.buttonBox, 1, 0, 1, 1)
self.retranslateUi(waffeneigenschaftDialog)
self.buttonBox.accepted.connect(waffeneigenschaftDialog.accept)
self.buttonBox.rejected.connect(waffeneigenschaftDialog.reject)
QtCore.QMetaObject.connectSlotsByName(waffeneigenschaftDialog)
def retranslateUi(self, waffeneigenschaftDialog):
_translate = QtCore.QCoreApplication.translate
waffeneigenschaftDialog.setWindowTitle(_translate("waffeneigenschaftDialog", "Sephrasto - Waffeneigenschaft bearbeiten..."))
self.label_5.setText(_translate("waffeneigenschaftDialog", "Beschreibung"))
self.label_4.setText(_translate("waffeneigenschaftDialog", "Script Priorität"))
self.label_3.setText(_translate("waffeneigenschaftDialog", "Script"))
self.label.setText(_translate("waffeneigenschaftDialog", "Name"))
self.warning.setText(_translate("waffeneigenschaftDialog", "Dies ist eine Ilaris-Standardwaffeneigenschaft. Sobald du hier etwas veränderst, bekommst du eine persönliche Kopie und das Original wird in der aktuellen User-Regelbasis gelöscht. Damit erhältst du für diese Waffeneigenschaft keine automatischen Updates mehr mit neuen Sephrasto-Versionen."))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
waffeneigenschaftDialog = QtWidgets.QDialog()
ui = Ui_waffeneigenschaftDialog()
ui.setupUi(waffeneigenschaftDialog)
waffeneigenschaftDialog.show()
sys.exit(app.exec_())
```
#### File: JoergRue/Sephrasto/roll20Exporter.py
```python
from Wolke import Wolke
import Definitionen
import Objekte
import Talentbox
import os
import math
from collections import namedtuple
import logging
from Charakter import KampfstilMod
from Hilfsmethoden import Hilfsmethoden, WaffeneigenschaftException
import sys
import json
import time
import random
import re
class roll20Exporter(object):
"""exports character data into a Json file for a roll20 character sheet"""
def __init__(self):
pass
def exportCharacter(self, filename):
Wolke.Char.aktualisieren()
# load the file into memory
with open(filename, "r", encoding="utf8") as read_file:
data = json.load(read_file)
# update data
self.updateCharacterData(data["attribs"], Wolke.Char)
# write back the data into the file
with open(filename, "w", encoding="utf8") as write_file:
json.dump(data, write_file, indent=4, ensure_ascii = False)
def updateCharacterData(self, attribs, char):
self.updateAttributes(attribs, char)
self.updateGlobalValues(attribs, char)
self.updateFertigkeiten(attribs, char)
self.updateUebernatuerliches(attribs, char)
self.updateWaffen(attribs, char)
self.updateRuestung(attribs, char)
def updateAttributes(self, attribs, char):
for key in Definitionen.Attribute:
self.setCurrentAttrValue(attribs, key.lower(), char.attribute[key].wert)
def updateGlobalValues(self, attribs, char):
self.setCurrentAttrValue(attribs, "wsb", char.ws)
self.setCurrentAttrValue(attribs, "wsg", char.wsStern)
self.setCurrentAttrValue(attribs, "mr", char.mr)
self.setCurrentAttrValue(attribs, "behinderung", char.be)
self.setCurrentAttrValue(attribs, "geschwindigkeit", char.gs)
self.setCurrentAttrValue(attribs, "kampfreflexe", 4 if "Kampfreflexe" in char.vorteile else 0)
isZauberer = char.aspBasis + char.aspMod > 0
isGeweiht = char.kapBasis + char.kapMod > 0
if isZauberer:
self.setMaxAttrValue(attribs, "energy", char.asp.wert + char.aspBasis + char.aspMod)
elif isGeweiht:
self.setMaxAttrValue(attribs, "energy", char.kap.wert + char.kapBasis + char.kapMod)
self.setMaxAttrValue(attribs, "schip", char.schipsMax)
def getTalents(self, fert, char):
talStr = ""
talente = sorted(fert.gekaufteTalente)
for el2 in talente:
if (len(talStr) > 0):
talStr += ", "
# code taken from pdfMeister, purpose not clear
if el2.startswith("Gebräuche: "):
talStr += el2[11:]
elif el2.startswith("Mythen: "):
talStr += el2[8:]
elif el2.startswith("Überleben: "):
talStr += el2[11:]
else:
talStr += el2
if el2 in char.talenteVariable:
vk = char.talenteVariable[el2]
talStr += " (" + vk.kommentar + ")"
return talStr
def updateFertigkeit(self, attribs, attrName, fert, char):
self.setCurrentAttrValue(attribs, attrName, fert.wert)
# Talente
self.setCurrentAttrValue(attribs, attrName + "_t", self.getTalents(fert, char))
def updateFertigkeiten(self, attribs, char):
attrNames = {
"Athletik": "ath",
"Heimlichkeit": "hei",
"Mythenkunde": "myt",
"Überleben": "ube",
"Alchemie": "alc",
"Selbstbeherrschung": "sel",
"Wahrnehmung": "wah",
"Handwerk": "han",
"Heilkunde": "hku",
"Verschlagenheit": "ver",
"Beeinflussung": "bee",
"Gebräuche": "geb",
"Autorität": "aut",
"Derekunde": "der",
"Magiekunde": "mag"}
assert len(attrNames) == len(Definitionen.StandardFerts) - 6 # nicht Kampffertigkeiten
for fert in attrNames.keys():
assert fert in Definitionen.StandardFerts
additionalFerts = []
for fertKey, fert in char.fertigkeiten.items():
if fert.name in attrNames:
self.updateFertigkeit(attribs, attrNames[fert.name], fert, char)
elif fert.name.startswith("Gebräuche"): # special to replace Gebräuche
self.updateFertigkeit(attribs, "geb", fert, char)
elif fert.kampffertigkeit == 0:
values = []
values.append(fert.name)
values.append(self.getTalents(fert, char))
for attr in fert.attribute:
values.append("@{" + attr.lower() + "}")
values.append(fert.wert)
additionalFerts.append(values)
if len(additionalFerts) > 0:
appendices = ["_name", "_t", "_att1", "_att2", "_att3", "_fw"]
self.setRepeatingAttrValuesEx(attribs, "zfertigkeiten", "zfertigkeit", appendices, additionalFerts)
# Freie Fertigkeiten
fferts = []
for fert in char.freieFertigkeiten:
if fert.wert < 1 or fert.wert > 3 or not fert.name:
continue
val = fert.name + " "
for i in range(fert.wert):
val += "I"
fferts.append(val)
self.setRepeatingAttrValues(attribs, "freiefertigkeiten", "ffert", fferts)
def updateUebernatuerliches(self, attribs, char):
# code taken from pdfMeister, pdfSechsterBlock (pull out function?)
# Get number of talents
talsList = []
for f in char.übernatürlicheFertigkeiten:
if char.übernatürlicheFertigkeiten[f].wert > 0 or\
len(char.übernatürlicheFertigkeiten[f].
gekaufteTalente) > 0:
talsList.extend(char.übernatürlicheFertigkeiten[f].
gekaufteTalente)
talsList = set(talsList)
fertsList = []
for f in char.übernatürlicheFertigkeiten:
if char.übernatürlicheFertigkeiten[f].wert <= 0 and\
len(char.übernatürlicheFertigkeiten[f].
gekaufteTalente) == 0:
continue
fertsList.append(f)
fertsList.sort(key = lambda x: (Wolke.DB.übernatürlicheFertigkeiten[x].printclass, x))
# find highest talent value, talent could be in serveral fertigkeiten
talsValues = {}
for tal in talsList:
talsValues[tal] = 0
for fert in fertsList:
fe = char.übernatürlicheFertigkeiten[fert]
val = fe.probenwertTalent
for tal in fe.gekaufteTalente:
if val > talsValues[tal]:
talsValues[tal] = val
talCount = 1
for tal, val in talsValues.items():
self.setCurrentAttrValue(attribs, "sn" + str(talCount), val)
mod = ""
if tal in char.talenteVariable:
vk = char.talenteVariable[tal]
mod = " (" + vk.kommentar + ")"
self.setCurrentAttrValue(attribs, "sn" + str(talCount) + "_t", tal + mod)
talCount += 1
def ignoreBE(self, weapon, char):
fertigkeit = ""
talent = ""
if weapon.name in Wolke.DB.waffen:
fertigkeit = Wolke.DB.waffen[weapon.name].fertigkeit
talent = Wolke.DB.waffen[weapon.name].talent
if not fertigkeit in char.fertigkeiten:
return False
kampfstilMods = KampfstilMod()
if weapon.kampfstil in char.kampfstilMods:
kampfstilMods = char.kampfstilMods[weapon.kampfstil]
for values in kampfstilMods.BEIgnore:
if values[0] == fertigkeit and values[1] == talent:
return True
return False
def updateWaffen(self, attribs, char):
weaponCount = 1
nkWeaponCount = 1
fkWeaponCount = 1
for weapon in char.waffen:
waffenwerte = char.waffenwerte[weaponCount - 1]
# the values given from the char include modification by BE
# the character sheet expects the values without the modification and adds the modification itself
beMod = char.be
# the character sheet doesn't know the kampfstil, so it is probably wrong for the case that the
# kampfstil ignores the BE. So in that case, let's _add_ the be to the values a second time, then
# the value is correct again after the sheet removes the BE
if self.ignoreBE(weapon, char):
beMod = self.be
if type(weapon) == Objekte.Fernkampfwaffe or (weapon.name in Wolke.DB.waffen and Wolke.DB.waffen[weapon.name].talent == 'Lanzenreiten'):
base = "fkw" + str(fkWeaponCount)
self.setCurrentAttrValue(attribs, base + "_dmd", weapon.W6)
self.setCurrentAttrValue(attribs, base + "_dmn", weapon.plus)
self.setCurrentAttrValue(attribs, base + "_at", waffenwerte.AT + beMod)
self.setCurrentAttrValue(attribs, base + "_t", weapon.anzeigename)
fkWeaponCount += 1
else:
base = "w" + str(nkWeaponCount)
self.setCurrentAttrValue(attribs, base + "_dmd", weapon.W6)
# character sheet expects tp including kampfstil, but excluding damage bonus from KK
# weapon.plus is without both
# waffenwerte.TPPlus is including kampfstil and including damage bonus
self.setCurrentAttrValue(attribs, base + "_dmn", waffenwerte.TPPlus - char.schadensbonus)
# character sheet expects at including kampfstil, waffenwerte.AT is correct except for BE
self.setCurrentAttrValue(attribs, base + "_at", waffenwerte.AT + beMod)
self.setCurrentAttrValue(attribs, base + "_vt", waffenwerte.VT + beMod)
self.setCurrentAttrValue(attribs, base + "_t", weapon.anzeigename)
kl = 1 if "Kopflastig" in weapon.eigenschaften else 0
self.setCurrentAttrValue(attribs, "kl" + base, kl)
nkWeaponCount += 1
weaponCount += 1
def updateRuestung(self, attribs, char):
if len(char.rüstung) > 0:
el = char.rüstung[0]
for zone in range(1, 7):
self.setCurrentAttrValue(attribs, "wsg" + str(zone), el.rs[zone-1] + char.rsmod + char.ws)
def setCurrentAttrValue(self, attribs, name, value):
for attr in attribs:
if "name" in attr and attr["name"] == name:
attr["current"] = str(value)
break
else:
attr = { "name": name, "current": str(value), "max": "", "id": self.generateAttrId() }
attribs.append(attr)
def setMaxAttrValue(self, attribs, name, value):
for attr in attribs:
if "name" in attr and attr["name"] == name:
attr["max"] = str(value)
break
else:
attr = { "name": name, "current": str(value), "max": str(value), "id": self.generateAttrId() }
attribs.append(attr)
def setRepeatingAttrValues(self, attribs, basenamePattern1, basenamePattern2, values):
valueList = []
for value in values:
valueList.append([value])
appendices = [""]
self.setRepeatingAttrValuesEx(attribs, basenamePattern1, basenamePattern2, appendices, valueList)
def setRepeatingAttrValuesEx(self, attribs, basenamePattern1, basenamePattern2, appendices, valueList):
existingList = []
# first find all existing lines
# the lines all start with "repeating", then the first name, then an ID which is unique for the line,
# then the second name, finally an appendix if the line contains several fields.
# all the parts are separated by "_", which therefore must not occur in the ID
pattern = re.compile('^'+ "repeating_" + basenamePattern1 + "_([-_\\d\\w])+_" + basenamePattern2)
for attr in attribs:
match = pattern.match(attr["name"])
if match != None:
existingName = match[0]
if not existingName in existingList:
existingList.append(existingName)
# now replace or add the values
# the valueList contains for each line one value per appendix
for values in valueList:
attrName = ""
if len(existingList) > 0:
attrName = existingList.pop()
else:
attrName = "repeating_"+ basenamePattern1 + "_" + self.generateRepeatingAttrId() + "_" + basenamePattern2
valueIndex = 0
for appendix in appendices:
self.setCurrentAttrValue(attribs, attrName + appendix, values[valueIndex])
valueIndex += 1
def generateAttrId(self):
# see https://app.roll20.net/forum/permalink/4258551/
millis = int(round(time.time() * 1000))
id = ""
base64string = "-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz"
for e in range(0, 8):
id += base64string[millis %64]
millis = math.floor(millis / 64)
for f in range(0, 12):
id += base64string[random.randrange(0, len(base64string))]
return id
def generateRepeatingAttrId(self):
id = self.generateAttrId()
return id.replace("_", "-")
```
#### File: JoergRue/Sephrasto/Sephrasto.py
```python
from PyQt5 import QtWidgets, QtCore, QtGui
import sys
import logging
import os.path
import MainWindow
import CharakterEditor
import DatenbankEdit
import CharakterMain
import DatenbankMain
from Wolke import Wolke
import yaml
from EinstellungenWrapper import EinstellungenWrapper
import Version
loglevels = {0: logging.ERROR, 1: logging.WARNING, 2: logging.DEBUG}
logging.basicConfig(filename="sephrasto.log", \
level=loglevels[Wolke.Settings['Logging']], \
format="%(asctime)s | %(levelname)s | %(filename)s::%(funcName)s(%(lineno)d) | %(message)s")
def sephrasto_excepthook(exc_type, exc_value, tb):
traceback = [' Traceback (most recent call last):']
while tb:
filename = tb.tb_frame.f_code.co_filename
name = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno
traceback.append(' File "%.500s", line %d, in %.500s' %(filename, lineno, name))
tb = tb.tb_next
# Exception type and value
exception = ' %s: %s' %(exc_type.__name__, exc_value)
logging.critical(exception + "\n".join(traceback))
#Try to show message box, hopefully its not a crash in Qt
messagebox = QtWidgets.QMessageBox()
messagebox.setWindowTitle("Fehler!")
messagebox.setText("Unerwarteter Fehler:" + exception + ". Bei Fragen zum diesem Fehler bitte sephrasto.log mitsenden.")
messagebox.setIcon(QtWidgets.QMessageBox.Critical)
messagebox.setStandardButtons(QtWidgets.QMessageBox.Ok)
messagebox.exec_()
class MainWindowWrapper(object):
'''
Main Class responsible for running the entire application.
Just shows three buttons and handles the execution of the individual subparts.
'''
def __init__(self):
sys.excepthook = sephrasto_excepthook
'''
Initializes the GUI and connects the buttons.
'''
self._version_ = "v" + str(Version._sephrasto_version_major) + "." + str(Version._sephrasto_version_minor) + "." + str(Version._sephrasto_version_build)
logging.critical("Starte Sephrasto " + self._version_) #critical so it's always printed, independent of the debug level setting
super().__init__()
#Make sure the application scales properly, i.e. in Win10 users can change the UI scale in the display settings
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
self.app = QtCore.QCoreApplication.instance()
if self.app is None:
self.app = QtWidgets.QApplication(sys.argv)
#self.app.setStyleSheet("*[readOnly=\"true\"] { background-color: #F5F5F5 } QAbstractScrollArea #scrollAreaWidgetContents { background-color: #FFFFFF }")
self.app.setStyleSheet("""
*[readOnly=\"true\"]
{
background-color: #FFFFFF;
border: none
}
QAbstractScrollArea #scrollAreaWidgetContents
{
background-color: #FFFFFF
}
""")
self.Form = QtWidgets.QWidget()
self.ui = MainWindow.Ui_Form()
self.ui.setupUi(self.Form)
self.ui.buttonNew.clicked.connect(self.createNew)
self.ui.buttonEdit.clicked.connect(self.editExisting)
self.ui.buttonRules.clicked.connect(self.editRuleset)
self.ui.buttonSettings.clicked.connect(self.editSettings)
self.ui.labelVersion.setText(self._version_ + " - by Aeolitus ")
self.app.setWindowIcon(QtGui.QIcon('icon_large.png'))
# Get the Settings loaded
EinstellungenWrapper.load()
logging.getLogger().setLevel(loglevels[Wolke.Settings['Logging']])
self.Form.show()
sys.exit(self.app.exec_())
def createNew(self):
'''
Creates a new CharakterEditor which is empty and shows it.
'''
self.ed = CharakterEditor.Editor(self.savePathUpdated)
if self.ed.noDatabase:
raise Exception("Konnte datenbank.xml nicht finden")
self.ed.formMain = QtWidgets.QWidget()
self.ed.ui = CharakterMain.Ui_formMain()
self.ed.ui.setupUi(self.ed.formMain)
self.ed.ui.tabs.removeTab(0)
self.ed.ui.tabs.removeTab(0)
self.ed.setupMainForm()
self.savePathUpdated()
self.ed.formMain.show()
def editExisting(self):
'''
Creates a CharakterEditor for an existing character and shows it.
'''
if os.path.isdir(Wolke.Settings['Pfad-Chars']):
startDir = Wolke.Settings['Pfad-Chars']
else:
startDir = ""
spath, _ = QtWidgets.QFileDialog.getOpenFileName(None,"Charakter laden...",startDir,"XML-Datei (*.xml)")
if spath == "":
return
if not spath.endswith(".xml"):
spath = spath + ".xml"
try:
self.ed = CharakterEditor.Editor(self.savePathUpdated, spath)
except Exception as e:
logging.error("Sephrasto Fehlercode " + str(Wolke.Fehlercode) + ". Exception: " + str(e))
infoBox = QtWidgets.QMessageBox()
infoBox.setIcon(QtWidgets.QMessageBox.Information)
if Wolke.Fehlercode <= -40 and Wolke.Fehlercode > -80:
infoBox.setText("Charakterdatei öffnen fehlgeschlagen")
infoBox.setInformativeText("Die XML-Datei konnte nicht gelesen werden.\n\
Fehlercode: " + str(Wolke.Fehlercode) + "\n\
Fehlermeldung: " + Wolke.ErrorCode[Wolke.Fehlercode] + "\n")
infoBox.setWindowTitle("Fehlerhafte Datei")
else:
infoBox.setText("Ein unerwarteter Fehler ist aufgetreten!")
infoBox.setInformativeText("Ein Fehler ist aufgetreten. Versuche, Sephrasto neu zu starten?\n\
Fehlercode: " + str(Wolke.Fehlercode) + "\n")
infoBox.setWindowTitle("Unbekannter Fehler")
infoBox.setStandardButtons(QtWidgets.QMessageBox.Ok)
infoBox.setEscapeButton(QtWidgets.QMessageBox.Close)
infoBox.exec_()
else:
if self.ed.noDatabase:
raise Exception("Konnte datenbank.xml nicht finden")
self.ed.formMain = QtWidgets.QWidget()
self.ed.ui = CharakterMain.Ui_formMain()
self.ed.ui.setupUi(self.ed.formMain)
self.ed.ui.tabs.removeTab(0)
self.ed.ui.tabs.removeTab(0)
self.ed.setupMainForm()
self.savePathUpdated()
self.ed.formMain.show()
def editRuleset(self):
'''
Creates the DatenbankEdit Form and shows the contents of datenbank.xml.
'''
self.D = DatenbankEdit.DatenbankEdit()
self.D.Form = QtWidgets.QWidget()
self.D.ui = DatenbankMain.Ui_Form()
self.D.ui.setupUi(self.D.Form)
self.D.setupGUI()
self.D.Form.show()
def editSettings(self):
EinstellungenWrapper()
def savePathUpdated(self):
file = " - <NAME>"
if self.ed.savepath:
file = " - " + os.path.basename(self.ed.savepath)
rules = ""
if Wolke.DB.datei:
rules = " (" + os.path.basename(Wolke.DB.datei) + ")"
self.ed.formMain.setWindowTitle("Sephrasto" + file + rules)
if __name__ == "__main__":
itm = MainWindowWrapper()
``` |
{
"source": "joerg-schneider/airflow-bootstrap",
"score": 3
} |
#### File: src/airtunnel/data_store.py
```python
import glob
import importlib
import logging
import os
import shutil
from abc import ABC, abstractmethod
from datetime import datetime
from typing import TextIO, List, Dict, Tuple
from airflow import conf
from airflow.exceptions import AirflowConfigException
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
DEFAULT_ADAPTER_CLASS = "airtunnel.data_store.LocalDataStoreAdapter"
class BaseDataStoreAdapter(ABC):
""" Base class for all Airtunnel DataStoreAdapters. """
@staticmethod
@abstractmethod
def move(source: str, destination: str, recursive: bool = False) -> None:
""" Move file(s) from source to destination. """
raise NotImplementedError
@staticmethod
@abstractmethod
def copy(source: str, destination: str, recursive: bool = False) -> None:
""" Copy file(s) from source to destination. """
raise NotImplementedError
@staticmethod
@abstractmethod
def delete(path: str, recursive: bool = False) -> None:
""" Delete file(s) at a given path. """
raise NotImplementedError
@staticmethod
@abstractmethod
def makedirs(path: str, exist_ok: bool = False, **kwargs) -> None:
""" Make directories along the given path. """
raise NotImplementedError
@staticmethod
@abstractmethod
def open(file: str, mode: str, **kwargs) -> TextIO:
""" Open a file-handle with the given path & mode. """
raise NotImplementedError
@staticmethod
@abstractmethod
def exists(path: str, **kwargs) -> bool:
""" Checks the given path exists. """
raise NotImplementedError
@staticmethod
@abstractmethod
def glob(pattern: str, **kwargs) -> List[str]:
""" Fetch file list using the provided glob pattern. """
raise NotImplementedError
@staticmethod
@abstractmethod
def listdir(path: str, recursive: bool = False, **kwargs) -> List[str]:
""" List entries of a given path. """
raise NotImplementedError
@staticmethod
@abstractmethod
def modification_times(files: List[str]) -> Dict[str, int]:
""" For a given file list, fetch modification times as int Unix timestamps. """
raise NotImplementedError
@staticmethod
@abstractmethod
def inspect(files: List[str]) -> Dict[str, Tuple[datetime, datetime, int]]:
""" For a given file list, fetch create & modification datetime and byte size. """
raise NotImplementedError
class LocalDataStoreAdapter(BaseDataStoreAdapter):
""" DataStoreAdapter implementation targeting the local filesystem. """
@staticmethod
def move(source: str, destination: str, recursive: bool = False) -> None:
""" Move file(s) from source to destination. """
shutil.move(src=source, dst=destination)
@staticmethod
def copy(source: str, destination: str, recursive: bool = False) -> None:
""" Copy file(s) from source to destination. """
if recursive:
shutil.copytree(src=source, dst=destination)
else:
shutil.copy(src=source, dst=destination)
@staticmethod
def delete(path: str, recursive: bool = False) -> None:
""" Delete file(s) at a given path. """
if recursive:
shutil.rmtree(path=path)
else:
os.remove(path)
@staticmethod
def makedirs(path: str, exist_ok: bool = False, **kwargs) -> None:
""" Make directories along the given path. """
os.makedirs(path, exist_ok=exist_ok, **kwargs)
@staticmethod
def open(file: str, mode: str, **kwargs) -> TextIO:
""" Open a file-handle with the given path & mode. """
return open(file=file, mode=mode, **kwargs)
@staticmethod
def exists(path: str, **kwargs) -> bool:
""" Checks the given path exists. """
return os.path.exists(path=path)
@staticmethod
def glob(pattern: str, **kwargs) -> List[str]:
""" Fetch file list using the provided glob pattern. """
return glob.glob(pattern)
@staticmethod
def listdir(path: str, recursive: bool = False, **kwargs) -> List[str]:
""" List entries of a given path. """
if recursive:
return [
os.path.join(root, f)
for root, dirs, files in os.walk(path)
for f in files
]
else:
return [os.path.join(path, p) for p in os.listdir(path)]
@staticmethod
def modification_times(files: List[str]) -> Dict[str, int]:
""" For a given file list, fetch modification times as int Unix timestamps. """
return {f: os.stat(f).st_mtime for f in files}
@staticmethod
@abstractmethod
def inspect(files: List[str]) -> Dict[str, Tuple[datetime, datetime, int]]:
""" For a given file list, fetch create & modification datetime and byte size. """
inspected = {}
for f in files:
f_stat = os.stat(f)
inspected[f] = (
datetime.fromtimestamp(f_stat.st_ctime),
datetime.fromtimestamp(f_stat.st_mtime),
f_stat.st_size,
)
return inspected
def get_configured_data_store_adapter() -> BaseDataStoreAdapter:
""" Gets the configured DataStoreAdapter. """
try:
data_store_adapter_class = conf.get(
section="airtunnel", key="data_store_adapter_class"
)
except AirflowConfigException:
logger.warning(
f"'data_store_adapter_class' for Airtunnel not configured in airflow.cfg – using default"
)
# set the default config as part of the environment, to hide future AirflowConfigExceptions:
os.environ[
"AIRFLOW__AIRTUNNEL__DATA_STORE_ADAPTER_CLASS"
] = DEFAULT_ADAPTER_CLASS
data_store_adapter_class = DEFAULT_ADAPTER_CLASS
module, cls = data_store_adapter_class.rsplit(".", maxsplit=1)
mod = importlib.import_module(name=module)
return getattr(mod, cls)
```
#### File: airtunnel/operators/loading.py
```python
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
import airtunnel.data_store
import airtunnel.metadata.adapter
import airtunnel.operators
from airtunnel.data_asset import BaseDataAsset
from airtunnel.metadata.entities import LoadStatus
@apply_defaults
class StagingToReadyOperator(BaseOperator):
""" Airtunnel's StagingToReadyOperator – moves staged files (from staging/read) to ready for a data asset and
write load status metadata.
"""
ui_color = airtunnel.operators.Colours.loading
@apply_defaults
def __init__(self, asset: BaseDataAsset, *args, **kwargs):
self._asset = asset
self._data_store_adapter = airtunnel.data_store.get_configured_data_store_adapter()
if "task_id" not in kwargs:
kwargs["task_id"] = asset.name + "_" + "staging_to_ready"
super().__init__(*args, **kwargs)
def execute(self, context):
""" Execute this operator from Airflow. """
# python does not have a straight forward "move and overwrite":
# https://stackoverflow.com/questions/7419665/python-move-and-overwrite-files-and-folders
# hence, we use a workaround, which is unfortunately less atomic. keep in mind for cloud storage!
moved_to_temp_path = False
move_to_ready_succeeded = False
asset_temp_path = None
try:
self._data_store_adapter.makedirs(
path=self._asset.ready_path, exist_ok=True
)
self.log.info(f"Loading new version to {self._asset.ready_path}")
asset_temp_path = self._asset.make_ready_temp_path(context)
self._data_store_adapter.move(
source=self._asset.ready_path,
destination=asset_temp_path,
recursive=True,
)
moved_to_temp_path = True
# load the prepared data
self._data_store_adapter.move(
source=self._asset.staging_ready_path,
destination=self._asset.ready_path,
recursive=True,
)
move_to_ready_succeeded = True
except Exception as e:
raise e
finally:
# depending on a successful move or not, we have to leave a consistent state in ready:
if moved_to_temp_path and move_to_ready_succeeded:
try:
# log load-status
airtunnel.metadata.adapter.get_configured_meta_adapter().write_load_status(
LoadStatus(
for_asset=self._asset,
dag_id=self.dag_id,
task_id=self.task_id,
dag_exec_date=context["task_instance"].execution_date,
)
)
self.log.info(
f"Successfully loaded - removing old copy at temp location: {asset_temp_path}"
)
except Exception as e:
self.log.warning(f"Error on logging load status: {e}")
# we can safely remove the old version
self._data_store_adapter.delete(path=asset_temp_path, recursive=True)
elif moved_to_temp_path and not move_to_ready_succeeded:
self.log.warning(
f"Error on loading - restoring old version from {asset_temp_path}"
)
# we have to revert to the old version
self._data_store_adapter.move(
source=asset_temp_path,
destination=self._asset.ready_path,
recursive=True,
)
```
#### File: airtunnel/sensors/ingestion.py
```python
import os
from airflow.models import TaskInstance
from airflow.operators.sensors import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
import airtunnel.data_store
import airtunnel.operators
from airtunnel.data_asset import BaseDataAsset
K_DISCOVERED_FILES = "discovered_input_files"
@apply_defaults
class SourceFileIsReadySensor(BaseSensorOperator):
"""
Airtunnel's SourceFileIsReadySensor – for a given Airtunnel data asset of type ``ingested``, this sensor will
leverage the declared input files glob pattern to sense for new input data.
If files have been discovered, the sensor will capture their names and modification dates. Then it will
check that for ``no_of_required_static_pokes`` times, file names and modification dates have not changed, before
returning successfully.
This guards against incomplete larger input files that are still being written into the landing area while this
sensor finds them.
"""
ui_color = airtunnel.operators.Colours.ingestion
@apply_defaults
def __init__(
self,
asset: BaseDataAsset,
no_of_required_static_pokes: int = 2,
poke_interval: int = 30,
timeout: int = 60 * 15,
**kwargs,
):
if "task_id" not in kwargs:
kwargs["task_id"] = asset.name + "_" + "source_is_ready"
super().__init__(poke_interval=poke_interval, timeout=timeout, **kwargs)
self._asset = asset
self._no_of_required_static_pokes = no_of_required_static_pokes
self._discovered_input_files = None
self._search_glob = os.path.join(
self._asset.landing_path, self._asset.declarations.ingest_file_glob
)
self._data_store_adapter = airtunnel.data_store.get_configured_data_store_adapter()
def poke(self, context):
""" Perform the poke operation for this sensor from Airflow. """
if (
self._discovered_input_files is not None
and self._no_of_required_static_pokes <= 1
):
# we have found files that remained static for enough iterations.
# -->> push the found files which will expose them as an XCom payload
# context.xcom.push self._discovered_input_files
ti: TaskInstance = context["task_instance"]
ti.xcom_push(
key=self._asset.discovered_files_xcom_key,
value=list(self._discovered_input_files.keys()),
)
return True
elif self._discovered_input_files is not None:
# we have not found files before
# scan for matching files again:
matching_files = self._matching_files()
# get modification timestamps on all files:
matching_files_w_time = self._data_store_adapter.modification_times(
matching_files
)
# check if same as in previous probe:
if matching_files_w_time == self._discovered_input_files:
# decrement the remaining number of checks for the files to remain static:
self._no_of_required_static_pokes = (
self._no_of_required_static_pokes - 1
)
self.log.info(
"Previously discovered files have not changed - "
f"poke another {self._no_of_required_static_pokes} times"
)
else:
# files have changed since the last check - store the new list of relevant files
self.log.info(
"Previously discovered files have changed - keep poking ..."
)
self._discovered_input_files = matching_files_w_time
else:
matching_files = self._matching_files()
if len(matching_files) > 0:
# capture found files and their modification timestamps:
self.log.info(f"Found {len(matching_files)} source files to ingest")
self._discovered_input_files = self._data_store_adapter.modification_times(
matching_files
)
else:
self.log.info(f"No matching files at {self._search_glob}")
# we need to poke for another iteration
return False
def _matching_files(self):
return self._data_store_adapter.glob(pattern=self._search_glob)
```
#### File: airtunnel/sensors/metadata.py
```python
from datetime import timedelta, datetime
from typing import Optional, Iterable
from airflow.operators.sensors import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
import airtunnel.operators
from airtunnel import BaseDataAsset
from airtunnel.metadata.adapter import get_configured_meta_adapter
@apply_defaults
class AwaitLoadStatusSensor(BaseSensorOperator):
""" Airtunnel's AwaitLoadStatusSensor – checks the data asset load status metadata for a given condition."""
ui_color = airtunnel.operators.Colours.ingestion
@apply_defaults
def __init__(
self,
asset: BaseDataAsset,
poke_interval: int = 30,
timeout: int = 60 * 15,
refreshed_within: timedelta = None,
refreshed_after: datetime = None,
**kwargs,
):
if "task_id" not in kwargs:
kwargs["task_id"] = asset.name + "_" + "await_load_status"
self._asset = asset
if refreshed_after is None and refreshed_within is None:
raise ValueError(
"One of 'refreshed_within' or 'refreshed_after' should be supplied!"
)
if refreshed_within is None:
self._refreshed_within = datetime.now() - timedelta(days=365 * 10)
else:
self._refreshed_within = datetime.now() - refreshed_within
if refreshed_after is None:
self._refreshed_after = datetime.now() - timedelta(days=365 * 10)
else:
self._refreshed_after = refreshed_after
self._compare_date = None
self._meta_adapter = None
super().__init__(poke_interval=poke_interval, timeout=timeout, **kwargs)
def poke(self, context):
""" Perform the poke operation for this sensor from Airflow. """
if self._meta_adapter is None:
self._meta_adapter = get_configured_meta_adapter()
if self._compare_date is None:
# we pick the more recent date as a comparison:
if self._refreshed_within > self._refreshed_after:
self._compare_date = self._refreshed_within
else:
self._compare_date = self._refreshed_after
self.log.info(f"Poking for a load status after: {self._compare_date}")
current_load_status = self._meta_adapter.read_load_status(for_asset=self._asset)
if (
current_load_status is not None
and current_load_status.load_time > self._compare_date
):
return True
self.log.info(
f"Current load status of {current_load_status.load_time} is not recent enough."
)
return False
@apply_defaults
class AwaitAssetAncestorsUpdatedSensor(BaseSensorOperator):
""" Airtunnel's AwaitAssetAncestorsUpdatedSensor – using lineage for a given data asset, probes until all
ancestors have been updated. (i.e. load status timestamp of all ancestors is "recent enough")
"""
ui_color = airtunnel.operators.Colours.ingestion
@apply_defaults
def __init__(
self,
asset: BaseDataAsset,
ignore_ancestors: Optional[Iterable[BaseDataAsset]] = (),
ancestors_refreshed_within: timedelta = None,
include_upstream_levels: Iterable[int] = (0,),
poke_interval: int = 30,
timeout: int = 60 * 15,
**kwargs,
):
if "task_id" not in kwargs:
kwargs["task_id"] = asset.name + "_" + "await_ancestors_updated"
self._asset = asset
self._ignore_ancestors = ignore_ancestors
self._meta_adapter = None
self._include_upstream_levels = include_upstream_levels
self._ancestors_refreshed_within = ancestors_refreshed_within
self._refreshed_since = None
super().__init__(poke_interval=poke_interval, timeout=timeout, **kwargs)
def poke(self, context):
""" Perform the poke operation for this sensor from Airflow. """
if self._meta_adapter is None:
self._meta_adapter = get_configured_meta_adapter()
if self._refreshed_since is None:
if self._ancestors_refreshed_within is None:
self._refreshed_since = datetime.now() - timedelta(days=365 * 10)
else:
self._refreshed_since = (
datetime.now() - self._ancestors_refreshed_within
)
self.log.info(
f"Including ancestors refreshed since: {self._refreshed_since}"
)
lineage = self._meta_adapter.read_lineage(for_target=self._asset)
for ancestor, upstream_level in lineage:
if upstream_level in self._include_upstream_levels:
if ancestor.data_target not in self._ignore_ancestors:
self.log.info(
f"Checking load status of ancestor: {ancestor.data_target.name}"
)
load_status_ancestor = self._meta_adapter.read_load_status(
for_asset=ancestor.data_target
)
if load_status_ancestor.load_time > self._refreshed_since:
self.log.info(f"Load time is in the specified time-range.")
else:
self.log.info(f"Load time is NOT in the specified time-range.")
return False
return True
```
#### File: scripts/py/student.py
```python
from airtunnel import PandasDataAsset, PandasDataAssetIO
def rebuild_for_store(asset: PandasDataAsset, airflow_context):
student_data = PandasDataAssetIO.read_data_asset(
asset=asset, source_files=asset.pickedup_files(airflow_context)
)
student_data = asset.rename_fields_as_declared(student_data)
PandasDataAssetIO.write_data_asset(asset=asset, data=student_data)
```
#### File: scripts/py/student_pyspark.py
```python
import pyspark
from airtunnel import PySparkDataAsset, PySparkDataAssetIO
def rebuild_for_store(asset: PySparkDataAsset, airflow_context):
spark_session = pyspark.sql.SparkSession.builder.getOrCreate()
student_data = PySparkDataAssetIO.read_data_asset(
asset=asset,
source_files=asset.pickedup_files(airflow_context),
spark_session=spark_session,
header=True,
inferSchema=True,
)
student_data = asset.rename_fields_as_declared(student_data)
PySparkDataAssetIO.write_data_asset(asset=asset, data=student_data)
spark_session.stop()
```
#### File: test_airtunnel/metadata/test_entities.py
```python
from datetime import datetime, timedelta
from typing import Tuple
import pytest
from airtunnel.data_asset import ShellDataAsset
from airtunnel.metadata.adapter import SQLMetaAdapter, BaseMetaAdapter
from airtunnel.metadata.entities import Lineage, LoadStatus, IngestedFileMetadata
from test_airtunnel.test_utils import DUMMY_TABLE, DUMMY_TABLE2
@pytest.fixture(scope="function")
def load_status() -> LoadStatus:
yield LoadStatus(
for_asset=ShellDataAsset(DUMMY_TABLE2),
load_time=datetime.now(),
dag_id="test_dag",
task_id="test_task",
dag_exec_date=datetime.now(),
)
@pytest.fixture
def adapter(test_db_hook) -> BaseMetaAdapter:
return SQLMetaAdapter(sql_hook=test_db_hook)
@pytest.fixture(scope="function")
def lineage() -> Lineage:
yield Lineage(
data_sources=[ShellDataAsset(DUMMY_TABLE)],
data_target=ShellDataAsset(DUMMY_TABLE2),
dag_id="test_dag",
task_id="test_task",
dag_exec_date=datetime.now(),
)
@pytest.fixture(scope="function")
def ingested_file_metadata() -> IngestedFileMetadata:
yield IngestedFileMetadata(
for_asset=ShellDataAsset(DUMMY_TABLE),
filepath="test_path.csv",
filesize=100,
file_mod_time=datetime.now(),
file_create_time=datetime.now(),
dag_id="test_dag",
dag_exec_date=datetime.now(),
task_id="test_task_id",
)
def test_load_status_cls(load_status: LoadStatus, adapter: BaseMetaAdapter) -> None:
load_status_time = datetime.now()
adapter.write_load_status(load_status)
status = adapter.read_load_status(for_asset=load_status.for_asset)
read_ls_time = status.load_time
# check that the returned datetime is within bounds of 1 second:
assert load_status_time - timedelta(seconds=1) < read_ls_time + timedelta(seconds=1)
assert status.is_within(frame=timedelta(seconds=20))
# repr:
x = "t" + str(status)
assert isinstance(x, str)
def test_ingested_file_metadata_cls(
ingested_file_metadata: IngestedFileMetadata, adapter: BaseMetaAdapter
) -> None:
adapter.write_inspected_files([ingested_file_metadata])
inspected_files_read = adapter.read_inspected_files(
for_asset=ingested_file_metadata._for_asset,
dag_id=ingested_file_metadata._dag_id,
dag_exec_date=ingested_file_metadata._dag_exec_date,
)
assert len(inspected_files_read) == 1
assert inspected_files_read[0]._for_asset == ingested_file_metadata._for_asset
# repr:
x = "t" + str(inspected_files_read)
assert isinstance(x, str)
def test_lineage_cls(lineage: Lineage, adapter: BaseMetaAdapter) -> None:
adapter.write_lineage(lineage)
retrieved_lineage = adapter.read_lineage(lineage.data_target)
assert len(retrieved_lineage) > 0
assert retrieved_lineage[0][0].data_target == lineage.data_target
assert retrieved_lineage[0][0].data_sources == lineage.data_sources
# try with additional params:
l2 = adapter.read_lineage(lineage.data_target, dag_id=lineage.dag_id)
# ...another variation:
l3 = adapter.read_lineage(
lineage.data_target, dag_id=lineage.dag_id, dag_exec_date=lineage.dag_exec_date
)
# ...another variation:
l4 = adapter.read_lineage(lineage.data_target, dag_exec_date=lineage.dag_exec_date)
assert retrieved_lineage == l2 == l3 == l4
# repr:
x = "t" + str(retrieved_lineage)
assert isinstance(x, str)
# -----
# tests below test lineage parsing
@pytest.fixture
def test_sql1() -> Tuple[str, Lineage]:
return (
"""
insert into table table5
select * from table3
""",
Lineage(
data_sources=[ShellDataAsset("table3")],
data_target=ShellDataAsset("table5"),
),
)
@pytest.fixture
def test_sql2() -> Tuple[str, Lineage]:
return (
"""
INSERT OVERWRITE TABLE table4
select
from
table1 t1 join table2 t2
on t1.key = t2.key
where exist (select 1 from table3 as t3 where t3.key = t2.fkey
""",
Lineage(
data_sources=ShellDataAsset.from_names(["table1", "table2", "table3"]),
data_target=ShellDataAsset("table4"),
),
)
def test_lineage(test_sql1, test_sql2):
for test_sql, expected_lineage in (test_sql1, test_sql2):
assert (
Lineage.lineage_from_sql_statement(
test_sql, known_data_assets=[f"table{n}" for n in range(0, 10)]
)
== expected_lineage
)
# test lineage comparison when not equal:
assert test_sql1[1] != test_sql2[1]
def test_lineage_from_script() -> None:
Lineage.lineage_from_sql_script(
script_file_relative_path="/dml/test_schema/test_table.sql"
)
```
#### File: test_airtunnel/operators/test_sql_helpers.py
```python
import pytest
from airtunnel.operators.sql import sql_helpers
TEST_SCRIPT = "ddl/test_schema/test_table.sql"
@pytest.mark.parametrize(
argnames=("sql_path",),
argvalues=((TEST_SCRIPT,), ("/" + TEST_SCRIPT,), ((TEST_SCRIPT,),)),
)
def test_load_sql_script(sql_path: str):
# load with a single relative path
s = sql_helpers.load_sql_script(sql_path)
assert len(s) > 50
def test_split_sql_script():
sql_helpers.split_sql_script(sql_helpers.load_sql_script(TEST_SCRIPT))
def test_format_sql_script():
sql_helpers.format_sql_script(
sql_script=sql_helpers.load_sql_script(TEST_SCRIPT),
sql_params_dict={"idx_name": "i1", "idx_col": "c1"},
)
def test_prepare_sql_params(fake_airflow_context):
sql_helpers.prepare_sql_params(
compute_sql_params_function=lambda f: {"x": f["task_instance"]},
airflow_context=fake_airflow_context,
)
```
#### File: test/test_airtunnel/test_assetio_pandas.py
```python
import os
import shutil
from os import path
import pandas as pd
import pytest
from airtunnel import PandasDataAsset, PandasDataAssetIO
@pytest.fixture
def test_csv_asset() -> PandasDataAsset:
return PandasDataAsset("test_csv_out_asset_pandas")
@pytest.fixture
def test_xlsx_in_asset() -> PandasDataAsset:
return PandasDataAsset("test_xlsx_in_asset")
@pytest.fixture
def test_parquet_in_asset() -> PandasDataAsset:
return PandasDataAsset("test_parquet_in_asset")
def test_read_write_csv(test_csv_asset: PandasDataAsset, iris: pd.DataFrame) -> None:
# try without any extra kwargs:
PandasDataAssetIO.write_data_asset(asset=test_csv_asset, data=iris)
# try with additional kwargs:
PandasDataAssetIO.write_data_asset(
asset=test_csv_asset, data=iris, header=False, index=False
)
# test retrieval
# before we can retrieve, we need to move the data from 'staging' to 'ready'
os.makedirs(test_csv_asset.ready_path, exist_ok=True)
# load the prepared data
shutil.rmtree(test_csv_asset.ready_path)
shutil.move(test_csv_asset.staging_ready_path, test_csv_asset.ready_path)
retrieved = PandasDataAssetIO.retrieve_data_asset(
test_csv_asset, header=None
)
def test_read_write_xlsx(
test_xlsx_in_asset: PandasDataAsset, iris: pd.DataFrame, fake_airflow_context
) -> None:
p = path.join(
test_xlsx_in_asset.staging_pickedup_path(fake_airflow_context),
"test_xlsx_in.xls",
)
os.makedirs(path.dirname(p), exist_ok=True)
iris.to_excel(p)
# try without any extra kwargs:
PandasDataAssetIO.read_data_asset(asset=test_xlsx_in_asset, source_files=[p])
# try with additional kwargs:
PandasDataAssetIO.read_data_asset(
asset=test_xlsx_in_asset, source_files=[p], sheet_name=0
)
def test_read_write_parquet(
test_parquet_in_asset: PandasDataAsset, iris: pd.DataFrame, fake_airflow_context
) -> None:
p = path.join(
test_parquet_in_asset.staging_pickedup_path(fake_airflow_context),
"test_parquet_in.parquet",
)
os.makedirs(path.dirname(p), exist_ok=True)
iris.to_parquet(p)
PandasDataAssetIO.read_data_asset(test_parquet_in_asset, source_files=[p])
# try with additional kwargs:
PandasDataAssetIO.read_data_asset(
asset=test_parquet_in_asset, source_files=[p], engine="auto"
)
def test_read_empty(test_parquet_in_asset: PandasDataAsset) -> None:
with pytest.warns(UserWarning):
empty = PandasDataAssetIO.read_data_asset(asset=test_parquet_in_asset, source_files=[])
assert pd.DataFrame().equals(empty)
``` |
{
"source": "joerg-schneider/blizz",
"score": 2
} |
#### File: src/blizz/_apply.py
```python
import functools
import logging
from typing import List, Union, Dict, Callable
from blizz import _inspect, Field
from blizz._helpers import doublewrap
from ._primitives import Relation, Type, is_pandas_df, is_pyspark_df
try:
import pyspark
except ImportError: # pragma: no cover
pyspark = None # pragma: no cover
try:
import pandas
except ImportError: # pragma: no cover
pandas = None # pragma: no cover
logger = logging.getLogger(__name__)
ASC = "asc"
DESC = "desc"
@doublewrap
def deduplication(
__original_func=None,
*,
key: List[Field] = None,
sort_on: List[Field] = None,
sort_order: str = ASC,
):
"""Apply deduplication to a loaded Blizz relation."""
@functools.wraps(__original_func)
def _decorated(*args, **kwargs):
relation = _inspect.get_class_that_defined_method(__original_func)
assert relation is not None
res = __original_func(*args, **kwargs)
res = _deduplicate(
r=relation, data=res, key=key, sort_on=sort_on, sort_order=sort_order
)
return res
return _decorated
@doublewrap
def defaults(__original_func=None, *, fill: List[Field] = None):
"""Apply default values to a loaded Blizz relation."""
@functools.wraps(__original_func)
def _decorated(*args, **kwargs):
relation = _inspect.get_class_that_defined_method(__original_func)
assert relation is not None
res = __original_func(*args, **kwargs)
res = _fill_defaults(r=relation, data=res, fill=fill)
return res
return _decorated
def _deduplicate(
r: Type[Relation],
data: Union["pyspark.sql.DataFrame", "pandas.DataFrame"],
key: List[Field] = None,
sort_on: List[Field] = None,
sort_order: str = ASC,
) -> Union["pyspark.sql.DataFrame", "pandas.DataFrame"]:
if key is None:
key_fields = r.get_key_fields()
if len(key_fields) == 0:
logger.info("No key fields defined – deduplicating based on all fields.")
key = r.get_fields()
else:
logger.info(f"Deduplicating based on: {key_fields}")
key = key_fields
else:
missing_fields = {
key_field for key_field in key if key_field not in data.columns
}
if missing_fields:
raise ValueError(
f"Cannot deduplicate based on {missing_fields} – not in relation."
)
if is_pyspark_df(data, r):
data: pyspark.sql.DataFrame = data
from pyspark.sql.window import Window
from pyspark.sql.functions import dense_rank, asc, desc
if sort_on is not None:
key_cols = [k if isinstance(k, str) else k.name for k in key]
sort_cols = [s if isinstance(s, str) else s.name for s in sort_on]
row_ranked = Field("bliz__row_number")
window = Window.partitionBy(*key_cols).orderBy(*sort_cols)
if sort_order == ASC:
window = window.orderBy(asc(*sort_cols))
else:
window = window.orderBy(desc(*sort_cols))
rank_expression = dense_rank().over(window)
data_ranked = data.withColumn(row_ranked, rank_expression)
data = data_ranked.where(f"{row_ranked} = 1").drop(row_ranked)
else:
data = data.drop_duplicates(subset=key)
elif is_pandas_df(data, r):
data: pandas.DataFrame = data
if sort_on is not None:
if not isinstance(sort_on, List):
sort_on = [sort_on]
data = data.sort_values(by=sort_on, ascending=sort_order == ASC)
data = data.drop_duplicates(subset=key, keep="first")
logger.info(f"Applied deduplication to {r.name()}.")
return data
def _fill_defaults(
r: Type[Relation],
data: Union["pyspark.sql.DataFrame", "pandas.DataFrame"],
fill: List[Field] = None,
) -> Union["pyspark.sql.DataFrame", "pandas.DataFrame"]:
if fill is None:
fill = r.get_defaults()
else:
if not isinstance(fill, List):
fill = [fill]
# verify given list of fields to fill
has_no_defaults = {
field if isinstance(field, str) else field.name
for field in fill
if field not in r.get_defaults()
}
if has_no_defaults:
raise ValueError(
f"Cannot fill {has_no_defaults} – no defaults specified or not in relation."
)
fill = {
field: default
for field, default in r.get_defaults().items()
if field in fill
}
if is_pyspark_df(data, r):
data: pyspark.sql.DataFrame = data
data = data.fillna(fill)
elif is_pandas_df(data, r):
data: pandas.DataFrame = data
data = data.fillna(fill)
logger.info(f"Applied default values to NAs in {r.name()}.")
return data
@doublewrap
def renames(__original_func=None, *, columns: Dict[str, str] = None):
"""Apply renames values to a loaded Blizz relation."""
@functools.wraps(__original_func)
def _decorated(*args, **kwargs):
relation = _inspect.get_class_that_defined_method(__original_func)
assert relation is not None
res = __original_func(*args, **kwargs)
res = _rename_fields(r=relation, data=res, columns=columns)
return res
return _decorated
def _rename_fields(
r: Type[Relation],
data: Union["pyspark.sql.DataFrame", "pandas.DataFrame"],
columns: Dict[str, str] = None,
) -> Union["pyspark.sql.DataFrame", "pandas.DataFrame"]:
if columns is None:
columns = dict()
defined_renames_on_relation = r.get_field_renames()
all_renames: Dict[str, str] = dict()
all_renames.update(defined_renames_on_relation)
all_renames.update(columns)
cant_rename = {
source_field_name
for source_field_name in all_renames.keys()
if source_field_name not in data.columns
}
if cant_rename:
raise ValueError(f"Cannot renames {cant_rename} – not in loaded DataFrame.")
if is_pyspark_df(data, r):
data: pyspark.sql.DataFrame = data
for from_field_name, to_field_name in all_renames.items():
data = data.withColumnRenamed(from_field_name, to_field_name)
elif is_pandas_df(data, r):
data: pandas.DataFrame = data
data = data.rename(columns=all_renames)
logger.info(f"Applied the following field renames: {all_renames} to {r.name()}.")
return data
@doublewrap
def func(
__original_func=None,
*,
function: Callable[
[Type[Relation], Union["pyspark.sql.DataFrame", "pandas.DataFrame"]],
Union["pyspark.sql.DataFrame", "pandas.DataFrame"],
],
):
"""Apply a user defined function to a loaded Blizz relation."""
@functools.wraps(__original_func)
def _decorated(*args, **kwargs):
relation = _inspect.get_class_that_defined_method(__original_func)
assert relation is not None
res = __original_func(*args, **kwargs)
res = function(relation, res)
return res
return _decorated
```
#### File: src/blizz/_bootstrapping.py
```python
from pathlib import Path
from typing import Iterable, Optional, Union
try:
import pyspark
except ImportError: # pragma: no cover
pyspark = None # pragma: no cover
try:
import pandas
except ImportError: # pragma: no cover
pandas = None # pragma: no cover
from ._helpers import safe_name
TEMPLATE_FIELD = '{field_name_var} = Field(name="{field_name}")'
TEMPLATE_FIELD_TYPED = (
' {field_name_var} = Field(name="{field_name}", datatype={data_type})'
)
TEMPLATE_DATA_SOURCE = """
class {table_name}(Relation):
\"\"\"
todo: describe relation {table_name} in this docstring
\"\"\"
{fields}
@classmethod
@blizz.check.keys(on_fail = blizz.check.WARN)
@blizz.check.types(on_fail = blizz.check.WARN)
@blizz.check.fields(on_fail = blizz.check.WARN)
def load(cls) -> DataFrame:
\"\"\"
todo: describe relation {table_name} load process in this docstring
\"\"\"
# todo: implement data source loading here
pass
""".strip()
def data_source_definition_from_file(path: Path, table_name: str):
pass
# temp_df = pandas.read_csv(filepath_or_buffer=str(path), nrows=10000)
def data_source_definition(
field_names: Iterable[str],
dataframe_type: str,
table_name: str = "BootstrappedTable",
field_types: Iterable[str] = None,
add_imports: bool = True,
) -> str:
if field_types is None:
fields = [
TEMPLATE_FIELD.format(field_name_var=safe_name(fn.upper()), field_name=fn)
for fn in field_names
]
else:
fields = [
TEMPLATE_FIELD_TYPED.format(
field_name_var=safe_name(fn.upper()), field_name=fn, data_type=ft
)
for fn, ft in zip(field_names, field_types)
]
class_string = TEMPLATE_DATA_SOURCE.format(
table_name=table_name, fields="\n".join(fields)
)
if add_imports:
imports = ""
imports += "import blizz.check\n"
imports += "from blizz import Relation, Field\n"
if dataframe_type == "spark":
imports += "from pyspark.sql import DataFrame\n"
imports += "from pyspark.sql.types import *\n"
else:
imports += "import pandas as pd\n"
imports += "from pandas import DataFrame\n"
class_string = imports + "\n\n" + class_string
return class_string
def relation_from_dataframe(
df: Union["pyspark.sql.DataFrame", "pandas.DataFrame"],
name: Optional[str] = "BootstrappedTable",
print_text: bool = True,
add_imports: bool = True,
) -> Optional[str]:
field_names = []
field_types = []
if isinstance(df, pyspark.sql.DataFrame):
dataframe_type = "spark"
for s in df.schema:
field_names.append(s.name)
field_types.append(str(s.dataType))
elif isinstance(df, pandas.DataFrame):
dataframe_type = "pandas"
for c in df.columns:
field_names.append(c)
field_types.append(f'"{df[c].dtype.name}"')
else:
raise ValueError(f"Unsupported df passed of type: {type(df)}")
txt = data_source_definition(
table_name=name,
dataframe_type=dataframe_type,
field_names=field_names,
field_types=field_types,
add_imports=add_imports,
)
if print_text:
print(txt)
else:
return txt
```
#### File: src/blizz/_helpers.py
```python
import functools
import importlib
import importlib.util
import logging
import os
import re
from glob import glob
from itertools import chain
from pathlib import Path
from typing import Any, Iterable
logger = logging.getLogger(__name__)
try:
import pyspark
except ImportError:
pyspark = None
try:
import pandas
except ImportError:
pandas = None
def camel_case_to_snake(name: str) -> str:
# find all switches from lower to upper
switch_indices = []
for index, c in enumerate(name):
c: str = c
if index == 0:
continue
if c.isupper() and name[index - 1].islower():
switch_indices.append(index)
# insert underscores where switch occurred
for nth_time_insert, insert_at_original_index in enumerate(switch_indices):
name = (
name[0 : insert_at_original_index + nth_time_insert]
+ "_"
+ name[insert_at_original_index + nth_time_insert :]
)
return name.lower()
def safe_name(name: str) -> str:
return re.sub(r"[.\s-]", "_", name)
def recurse_dir_tree(base: Path) -> Iterable[Path]:
return (
Path(p)
for p in chain.from_iterable(
glob(os.path.join(x[0], "*")) for x in os.walk(base.as_posix())
)
)
def import_from_path(module_path: Path) -> Any:
spec = importlib.util.spec_from_file_location(
os.path.basename(module_path.as_posix()).replace(".py", ""), module_path
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def all_python_modules_in_path(basepath: Path) -> Iterable[Any]:
py_modules = [f for f in recurse_dir_tree(base=basepath) if str(f).endswith(".py")]
return (import_from_path(py_module) for py_module in py_modules)
def pandas_dtype_to_spark_type():
pass
def doublewrap(f):
"""
a decorator decorator, allowing the decorator to be used as:
@decorator(with, arguments, and=kwargs)
or
@decorator
"""
@functools.wraps(f)
def new_dec(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# actual decorated function
return f(args[0])
else:
# decorator arguments
return lambda realf: f(realf, *args, **kwargs)
return new_dec
def is_blizz_field(in_obj: Any) -> bool:
if "blizz._primitives.Field.__" in str(type(in_obj)):
return True
return False
```
#### File: src/blizz/_run_config.py
```python
from enum import Enum
from pathlib import Path
from typing import Dict, Any, NamedTuple, List, Type
import yaml
from schema import Schema, And, Use, Optional
from blizz import Feature, FeatureGroup, FeatureParameter
from blizz._inspect import find_feature_groups_on_path
class OutputFormat(Enum):
CSV = "csv"
PARQUET = "parquet"
def str_to_format(in_str: str) -> OutputFormat:
for e in OutputFormat:
e: OutputFormat = e
if in_str == e.value:
return e
raise ValueError(f"Unknown format {in_str}")
class FeatureRequest(NamedTuple):
feature: Type[Feature]
parameters: List[FeatureParameter]
class FeatureGroupRequest(NamedTuple):
feature_group: Type[FeatureGroup]
features: List[FeatureRequest]
passthrough: List[str] = []
pre_filter: str = None
post_filter: str = None
class RunConfig(NamedTuple):
feature_groups: List[FeatureGroupRequest]
output_format: OutputFormat
output_schema = Schema(
{"format": And(str, lambda s: s in ("csv", "parquet"), Use(str_to_format))}
)
parameters_schema = Schema(
And(
{"name-suffix": And(str, len), "grid": And(dict)},
Use(
lambda s: FeatureParameter(
suffix_format=s["name-suffix"], parameter_grid=s["grid"]
)
),
)
)
feature_schema = Schema(
{"name": And(str, len), Optional("parameters"): [parameters_schema]}
)
feature_group_schema = Schema(
{
"name": And(str, len),
Optional("pre-filter"): And(
str, len, error="pre-filter should be non-empty or omitted."
),
Optional("post-filter"): And(
str, len, error="post-filter should be non-empty or omitted."
),
"passthrough": [str],
"features": [feature_schema],
}
)
config_schema = Schema(
{"output": output_schema, "feature-groups": [feature_group_schema]}
)
def read_config(file: Path) -> Dict[str, Any]:
with open(str(file), "rt") as f:
return yaml.safe_load(f)
def validate_schema(raw: Dict[str, Any]) -> Dict[str, Any]:
return config_schema.validate(raw)
def run_config_from_file(file: Path, feature_library_base_path: Path) -> RunConfig:
# source_tables = find_source_tables_on_path(basepath=feature_library_base_path)
feature_groups = find_feature_groups_on_path(basepath=feature_library_base_path)
def _feature_group_for_name(fg_name: str) -> Type[FeatureGroup]:
# todo: validate feature group name validity earlier in Schema.validate() -
# pass feature_groups into validate_schema(..) func
for f in feature_groups:
if f.name() == fg_name:
return f
raw_config = read_config(file)
parsed_config = validate_schema(raw_config)
feature_group_requests = []
for raw_fg in parsed_config["feature-groups"]:
name = raw_fg["name"]
feature_group = _feature_group_for_name(name)
pre_filter = raw_fg.get("pre_filter", None)
post_filter = raw_fg.get("post_filter", None)
passthrough = raw_fg.get("passthrough", [])
feature_requests = []
for raw_f in raw_fg["features"]:
f_name = raw_f["name"]
feature = feature_group.get_feature(name=f_name)
parameters = raw_f.get("parameters", [])
feature_requests.append(
FeatureRequest(feature=feature, parameters=parameters)
)
feature_group_requests.append(
FeatureGroupRequest(
feature_group=feature_group,
features=feature_requests,
passthrough=passthrough,
pre_filter=pre_filter,
post_filter=post_filter,
)
)
output_format = parsed_config["output"]["format"]
rc = RunConfig(feature_groups=feature_group_requests, output_format=output_format)
return rc
```
#### File: src/blizz/_runtime.py
```python
import os
from pathlib import Path
from typing import Dict, Type
from pyspark.sql import DataFrame
from blizz._run_config import RunConfig, FeatureGroup, OutputFormat
def _cleanup_spark_files(in_path: Path) -> None:
for file in os.listdir(str(in_path)):
if file.startswith("_S") or file.startswith("._S") or file.startswith(".part-"):
os.remove(str(in_path.joinpath(file)))
def build_features(config: RunConfig) -> Dict[Type[FeatureGroup], DataFrame]:
results = {}
for fg in config.feature_groups:
feature_requests = [f for f in fg.features]
feature_parameters = {
f.feature.name(): f.parameters
for f in feature_requests
if len(f.parameters) > 0
}
computed = fg.feature_group.compute(
features=[f.feature for f in feature_requests],
keep=fg.passthrough,
parameters=feature_parameters,
)
results[fg.feature_group] = computed
return results
def write_results(
config: RunConfig,
out_path: Path,
results: Dict[Type[FeatureGroup], DataFrame],
overwrite: bool = False,
) -> None:
for fg, sdf in results.items():
full_output_path = str(out_path.joinpath(fg.name()))
save_mode = "overwrite" if overwrite else None
if config.output_format == OutputFormat.CSV:
sdf.write.mode(save_mode).csv(path=full_output_path, header=True)
if config.output_format == OutputFormat.PARQUET:
sdf.write.mode(save_mode).parquet(path=full_output_path)
_cleanup_spark_files(Path(full_output_path))
```
#### File: test/blizz/test_checks_spark.py
```python
import pytest
from pyspark.sql import DataFrame
from pyspark.sql.types import StringType, IntegerType
import blizz.check
from blizz import Field, Relation
from test.conftest import get_or_create_spark_session, path_student_performance_test
from test.test_spark_feature_library.data_sources import StudentPerformance
class StudentPerformanceFaulty1(Relation):
"""
Example of a defined field missing.
"""
THIS_IS_MISSING = Field(name="I'm missing")
@classmethod
@blizz.check.fields
def load(cls) -> DataFrame:
return get_or_create_spark_session().read.csv(
path=path_student_performance_test().as_posix(),
inferSchema=True,
header=True,
)
class StudentPerformanceFaulty2(Relation):
"""
Example of a defined field with faulty datatype.
"""
# this is actually a DoubleType:
MARKS = Field(name="Marks", datatype=StringType)
@classmethod
@blizz.check.fields
@blizz.check.types
def load(cls) -> DataFrame:
return get_or_create_spark_session().read.csv(
path=path_student_performance_test().as_posix(),
inferSchema=True,
header=True,
)
def test_field_existence_check() -> None:
"""
"""
with pytest.raises(
expected_exception=ValueError,
match="Field\(s\) 'I'm missing' not part of loaded Relation 'StudentPerformanceFaulty1'",
):
StudentPerformanceFaulty1.load()
def test_field_type_check() -> None:
"""
"""
with pytest.raises(
expected_exception=ValueError,
match="Type error for 'StudentPerformanceFaulty2.Marks'*",
):
StudentPerformanceFaulty2.load()
class StudentPerformanceFaulty3(Relation):
"""
Example of a duplicated field defined as key.
"""
STUDENT_ID = Field(name="Student_ID", datatype=StringType)
# this is actually not the key:
SEMSTER_NAME = Field("Semster_Name", datatype=StringType, key=True)
PAPER_ID = Field(name="Paper_ID", datatype=StringType)
MARKS = Field(name="Marks", datatype=IntegerType)
@classmethod
@blizz.check.fields
@blizz.check.types
@blizz.check.keys
def load(cls) -> DataFrame:
return get_or_create_spark_session().read.csv(
path=path_student_performance_test().as_posix(),
inferSchema=True,
header=True,
)
def test_key_check() -> None:
"""
"""
with pytest.raises(
expected_exception=ValueError,
match="Key error for 'StudentPerformanceFaulty3'*",
):
StudentPerformanceFaulty3.load()
def test_passes_checks() -> None:
sdf = StudentPerformance.load()
assert sdf is not None
```
#### File: test/test_spark_feature_library/data_sources.py
```python
from pyspark.sql import DataFrame
from pyspark.sql import functions as F
from pyspark.sql.types import (
IntegerType,
StringType,
DateType,
TimestampType,
)
import blizz.check
from blizz import Relation, Field
from test.conftest import (
path_student_counceling_test,
path_student_performance_test,
path_employee_test,
path_department_test,
get_or_create_spark_session,
)
class StudentCouncelingInformation(Relation):
"""
This is the example data source "StudentCouncelingInformation" for testing.
"""
STUDENT_ID = Field(
name="Student_ID", datatype=StringType, description="The ID of the student"
)
DATE_OF_ADMISSION = Field(
"DOA", datatype=DateType, description="Date of admission to university."
)
DATE_OF_BIRTH = Field(
name="DOB", datatype=DateType, description="Student's birth date."
)
DEPARTMENT_CHOICES = Field(
name="Department_Choices",
datatype=StringType,
description="Choice of department a student submitted",
)
DEPARTMENT_ADMISSION = Field(
name="Department_Admission",
datatype=StringType,
description="Department where student got admitted",
)
@classmethod
@blizz.check.fields
@blizz.check.types
def load(cls) -> DataFrame:
return (
get_or_create_spark_session()
.read.csv(
path=path_student_counceling_test().as_posix(),
inferSchema=True,
header=True,
)
.withColumn(
cls.DATE_OF_ADMISSION, F.expr(f"cast({cls.DATE_OF_ADMISSION} as date)")
)
.withColumn(cls.DATE_OF_BIRTH, F.expr(f"cast({cls.DATE_OF_BIRTH} as date)"))
)
class StudentPerformance(Relation):
"""
This is the example data source "StudentPerformance" for testing.
"""
STUDENT_ID = Field(name="Student_ID", datatype=StringType, key=True)
SEMSTER_NAME = Field("Semster_Name", datatype=StringType)
PAPER_ID = Field(name="Paper_ID", datatype=StringType)
MARKS = Field(name="Marks", datatype=IntegerType)
@classmethod
@blizz.check.fields
@blizz.check.types
def load(cls) -> DataFrame:
return get_or_create_spark_session().read.csv(
path=path_student_performance_test().as_posix(),
inferSchema=True,
header=True,
)
class EmployeeInformation(Relation):
"""
This is the example data source "EmployeeInformation" for testing.
"""
EMPLOYEE_ID = Field(name="Employee ID", datatype=StringType)
DATE_OF_BIRTH = Field(
name="DOB", datatype=DateType, description="Employee's birth date."
)
DOJ = Field(name="DOJ", datatype=DateType, description="Date Of Joining")
DEPARTMENT_ID = Field(name="Department_ID", datatype=StringType)
@classmethod
@blizz.check.fields
@blizz.check.types
def load(cls) -> DataFrame:
return get_or_create_spark_session().read.csv(
path=path_employee_test().as_posix(), inferSchema=True, header=True
)
class DepartmentInformation(Relation):
"""
This is the example data source "DepartmentInformation" for testing.
"""
DEPARTMENT_ID = Field(name="Department_ID", datatype=StringType)
DATE_OF_ESTABLISHMENT = Field(
name="DOE", datatype=TimestampType, description="Department Establishment Date"
)
DEPARTMENT_NAME = Field(name="Department_Name", datatype=StringType)
@classmethod
@blizz.check.fields
@blizz.check.types
def load(cls) -> DataFrame:
return get_or_create_spark_session().read.csv(
path=path_department_test().as_posix(), inferSchema=True, header=True
)
__all__ = [
"StudentCouncelingInformation",
"StudentPerformance",
"EmployeeInformation",
"DepartmentInformation",
]
``` |
{
"source": "Joerg-Schoemer/rfid4xmms2",
"score": 2
} |
#### File: rfid4xmms2/rfid4xmms2/observer.py
```python
import logging
import sys
import time
from os.path import join
from pathlib import Path
from pirc522 import RFID
from pygame import mixer
from rfid4xmms2 import config
from rfid4xmms2.xmms2 import Xmms2Ctl
xmms2ctl = Xmms2Ctl(config.SCRIPTS_DIR, config.COMMANDS_DIR)
run = True
reader = RFID()
last_command_file_name = None
logger = logging.getLogger(__name__)
def play_sound(_file):
if not mixer.get_init():
mixer.init()
mixer.music.load(_file)
mixer.music.play()
while mixer.music.get_busy():
time.sleep(0.2)
def play_error_sound():
play_sound(join(config.SOUNDS_DIR, 'no.mp3'))
def play_success_sound():
play_sound(join(config.SOUNDS_DIR, 'yes.mp3'))
def generate_command_file_name(_card_name):
return join(config.COMMANDS_DIR, _card_name)
def generate_file_name(_uid):
return '_'.join("{:02X}".format(i) for i in _uid) + '.cmd'
def set_last_command_file_name(_file):
global last_command_file_name
last_command_file_name = _file
def command_file_name_not_changed(_command_file_name):
return last_command_file_name == _command_file_name
def create_unknown_file(_card_name):
file_name = join(config.UNKNOWN_DIR, _card_name)
Path(file_name).touch()
def doit():
global run
play_success_sound()
while run:
last_read_time = time.time()
time.sleep(0.1)
reader.wait_for_tag()
(error, data) = reader.request()
if error:
set_last_command_file_name(None)
continue
(error, uid) = reader.anticoll()
if error:
set_last_command_file_name(None)
continue
card_name = generate_file_name(uid)
logger.info('card_name %s', card_name)
command_file_name = generate_command_file_name(card_name)
if time.time() - last_read_time < 0.5 and command_file_name_not_changed(command_file_name):
continue
if not Path(command_file_name).is_file():
logger.info('command file \'%s\' not found', command_file_name)
create_unknown_file(card_name)
play_error_sound()
set_last_command_file_name(command_file_name)
continue
set_last_command_file_name(command_file_name)
play_card = xmms2ctl.play_card(card_name)
if play_card is not None and not play_card:
play_error_sound()
continue
play_success_sound()
if play_card is not None and play_card:
xmms2ctl.start()
def end_read(signum, frame):
global run
logger.info('\nCtrl+C captured, ending read.')
run = False
reader.cleanup()
xmms2ctl.pause()
sys.exit(0)
def handle_hup(signum, frame):
logger.warning('SIGHUP received')
```
#### File: rfid4xmms2/rfid4xmms2/routes.py
```python
from os import remove
from os.path import join, exists
from platform import node
from tempfile import TemporaryDirectory
from flask import render_template, request, redirect, url_for
from rfid4xmms2 import application
from rfid4xmms2.cards import CardCtl
from rfid4xmms2.media import MediaCtl
from rfid4xmms2.observer import xmms2ctl
cardCtl = CardCtl()
mediaCtl = MediaCtl()
@application.route('/')
def index():
return render_template('index.html', hostname=node())
@application.route('/cards/known', methods=['GET', 'POST'])
def cards():
if request.method == 'GET':
return render_template('cards/known.html', cards=cardCtl.list_cards(), hostname=node())
card_name_ = request.form['card_name']
if request.form['action'] == 'delete':
cardCtl.release_action(card_name_)
return redirect(url_for('cards'))
xmms2ctl.play_card(card_name_)
xmms2ctl.start()
return redirect(url_for('player'))
@application.route('/cards/unknown', methods=['GET', 'POST'])
def unknown_cards():
if request.method == 'GET':
return render_template('cards/unknown.html', cards=cardCtl.list_unknown_cards(), hostname=node())
if request.form['action'] == 'delete':
file = join(application.config['UNKNOWN_DIR'], request.form['card_name'])
if exists(file):
remove(file)
return render_template('cards/unknown.html', cards=cardCtl.list_unknown_cards(), hostname=node())
@application.route('/cards/edit', methods=['POST'])
def edit():
if request.form['action'] == 'cancel':
return redirect(request.form['redirect'])
if request.form['action'] == 'save':
cardCtl.assign_action(
request.form['card_name'],
request.form['card_kind'],
request.form['card_what']
)
return redirect(request.form['redirect'])
card = {
'name': request.form['card_name'],
'friendly_name': request.form['card_friendly_name'],
'kind': request.form['card_kind'],
'what': request.form['card_what'],
}
card_kinds = ['album', 'advent', 'next', 'prev', 'title', 'toggle', 'url']
return render_template('cards/edit.html', card=card, card_kinds=card_kinds, redirect=request.form['redirect'],
hostname=node())
@application.route('/cards/assign', methods=['POST'])
def assign():
card_name = request.form['card_name']
if 'kind' in request.form.keys():
kind_ = request.form['kind']
what_ = '"' + str(request.form['what']).replace(':', '\\:') + '"'
cardCtl.assign_action(card_name, kind_, what_)
return redirect(url_for('unknown_cards'))
card_friendly_name = request.form['card_friendly_name']
return render_template('cards/assign.html', card_name=card_name, card_friendly_name=card_friendly_name,
albums=mediaCtl.get_albums(), hostname=node())
@application.route('/media/upload', methods=['POST', 'GET'])
def upload():
if request.method == 'GET':
return render_template('media/upload.html')
files = request.files.getlist('mp3')
with TemporaryDirectory() as tmp_dir:
print('created temporary directory ' + tmp_dir)
for file in files:
if file.filename == '':
return redirect(request.url)
mediaCtl.store_files(tmp_dir, file)
mediaCtl.convert_files(tmp_dir)
mediaCtl.move_files_to_media_lib(tmp_dir)
return redirect(url_for('unknown_cards'))
@application.route('/media/player', methods=['POST', 'GET'])
def player():
if request.method == 'GET' or request.form['action'] == 'reload':
return render_template('media/player.html', whats_playing=xmms2ctl.whats_playing())
xmms2ctl.action(request.form['action'])
return render_template('media/player.html', whats_playing=xmms2ctl.whats_playing())
```
#### File: rfid4xmms2/rfid4xmms2/xmms2.py
```python
import logging
from datetime import date
from json import loads
from os.path import join
from re import compile
from subprocess import run, PIPE
logger = logging.getLogger(__name__)
class Xmms2Ctl:
"""A controller class for xmms2 cli"""
def __init__(self, scripts_dir: str, commands_dir: str):
self.scripts_dir = scripts_dir
self.commands_dir = commands_dir
@staticmethod
def get_status_icon(status: str):
p = compile("^(play|pause|stop).*$")
m = p.match(status.lower())
return m.group(1)
def action(self, action: str):
logger.info('xmms2_action.sh %s', action)
run([join(self.scripts_dir, 'xmms2_action.sh'), action])
def stop(self):
self.action('stop')
def start(self):
self.action('play')
def pause(self):
self.action('pause')
def clear(self):
self.action('clear')
def toggle(self):
self.action('toggle')
def add_album(self, pattern: str):
self.action('add -o partofset,tracknr album:%s' % pattern)
def add_advent(self, pattern: str, day: int):
self.action('add album:%s AND tracknr:%d' % (pattern, day))
def add_url(self, url: str):
self.action('add %s' % url)
def add_title(self, title: str):
self.action('add title:%s' % title)
def play(self, kind: str, what: str):
if kind in ['play', 'toggle', 'pause', 'stop', 'prev', 'next']:
self.action(kind)
return None
if kind not in ['album', 'advent', 'url', 'title']:
return False
self.stop()
self.clear()
if kind == 'album':
self.add_album(what)
elif kind == 'advent':
today = date.today()
if today.month == 12 and today.day <= 24:
self.add_advent(what, today.day)
else:
self.add_album(what)
elif kind == 'url':
self.add_url(what)
elif kind == 'title':
self.add_title(what)
return True
def play_card(self, card_name: str):
with open(join(self.commands_dir, card_name), 'r') as file_:
kind_ = file_.readline().strip()
what_ = file_.readline().strip()
return self.play(kind_, what_)
def whats_playing(self):
result = run(
join(self.scripts_dir, 'currently_playing.sh'),
stdout=PIPE, stderr=PIPE, universal_newlines=True
)
splitlines = result.stdout.splitlines()
status = loads(splitlines[0])
status['status_icon'] = self.get_status_icon(status['status'])
status['play_class'] = 'pause' if status['status_icon'] == 'play' else 'play'
return status
``` |
{
"source": "joergschultzelutter/activesoup",
"score": 3
} |
#### File: src/activesoup/csv.py
```python
import requests
from activesoup.protocols import ActiveSoupResult
from activesoup.response import Response
class CsvResponse(Response):
def __init__(self, raw_response):
super().__init__(raw_response)
self.content = raw_response.content
def save(self, to):
if isinstance(to, type("")):
with open(to, "wb") as f:
self._write_to_file(f)
else:
self._write_to_file(to)
def _write_to_file(self, file_object):
file_object.write(self.content)
def __repr__(self) -> str:
return "CsvResponse"
def __str__(self) -> str:
return "<[csv]>"
class Resolver:
def resolve(self, raw_response: requests.Response) -> ActiveSoupResult:
return CsvResponse(raw_response)
```
#### File: activesoup/tests/conftest.py
```python
import atexit
import multiprocessing
from os import path
import pytest
_local_directory = path.dirname(path.abspath(__file__))
_test_files_directory = path.join(_local_directory, "test_files")
def render(req_path):
path_to_file = path.realpath(path.join(_test_files_directory, req_path))
if not path_to_file.startswith(_test_files_directory):
raise RuntimeError("Path outside of _test_files_directory")
with open(path_to_file, "r") as f:
return f.read()
class LocalWebServer:
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.stop()
def __init__(self, port):
self.port = port
def _start_local(self, parent_pipe, host, port):
import json
import flask
self._parent_pipe = parent_pipe
self._local_web_server = flask.Flask(__name__)
@self._local_web_server.route("/html/<name>")
def page(name):
return render(name)
@self._local_web_server.route("/form/<name>", methods=["GET", "POST"])
def form(name):
req = flask.request
if req.method == "POST":
return (json.dumps(req.form), 200, {"Content-Type": "application/json"})
else:
return render(name)
@self._local_web_server.route("/status")
def status():
return ""
@self._local_web_server.route("/json")
def json_request():
req = flask.request
return (json.dumps(req.args), 200, {"Content-Type": "application/json"})
self._local_web_server.run(host=host, port=port)
def _await_remote_server_up(self, timeout):
import time
import requests
for _ in range(timeout):
try:
if not self._serverthread.is_alive():
pytest.fail("Stub HTTP Server terminated unexpectedly")
if requests.get(
"http://localhost:{port}/status".format(port=self.port)
).status_code in range(200, 300):
return
except Exception as e:
print(e)
pass
time.sleep(1)
pytest.fail(
"Timed out waiting {timeout} seconds for local web server to start".format(
timeout=timeout
)
)
def start_remote(self, timeout=10):
self._remote_pipe = multiprocessing.Pipe()
self._serverthread = multiprocessing.Process(
target=self._start_local,
kwargs={
"parent_pipe": self._remote_pipe,
"host": "127.0.0.1",
"port": self.port,
},
)
self._serverthread.start()
atexit.register(self.stop)
self._await_remote_server_up(timeout)
def stop(self):
self._serverthread.terminate()
self._serverthread.join()
def serve_forever(self):
self.start_remote()
try:
self._serverthread.join()
except KeyboardInterrupt:
pass
finally:
self.stop()
@pytest.fixture(scope="session")
def localwebserver(request):
lws = LocalWebServer(port=60123)
request.addfinalizer(lws.stop)
lws.start_remote()
return lws
if __name__ == "__main__":
with LocalWebServer(port=60123) as lws:
lws.serve_forever()
```
#### File: activesoup/tests/test_json_decode.py
```python
from activesoup import driver
def test_json_response_decoded_as_json_object(localwebserver):
d = driver.Driver()
resp = d.get(f"http://localhost:{localwebserver.port}/json?foo=bar")
assert resp["foo"] == "bar"
``` |
{
"source": "joergsesterhenn/py-tic-tac-toe",
"score": 4
} |
#### File: tictactoe/ascii/tic_tac_toe_cli_view.py
```python
def print_field(data):
"""Display current game."""
print(
""" ###################################################
T I C T A C T O E
###################################################
|¯¯¯|¯¯¯|¯¯¯|
| """
+ data[0][0]
+ """ | """
+ data[1][0]
+ """ | """
+ data[2][0]
+ """ |
|---|---|---|
| """
+ data[0][1]
+ """ | """
+ data[1][1]
+ """ | """
+ data[2][1]
+ """ |
|---|---|---|
| """
+ data[0][2]
+ """ | """
+ data[1][2]
+ """ | """
+ data[2][2]
+ """ |
|___|___|___|
###################################################"""
)
def is_not_valid(text):
"""Validate input text."""
return not (
len(text) == 3
and text[0] in ("0", "1", "2")
and text[1] == " "
and text[2] in ("0", "1", "2")
)
class TicTacToeCLIView:
"""The tic-tac-toe CLI view."""
def __init__(self):
self.error_text = ""
self.input_text = ""
def get_player_input(self, spieler, data):
"""Get input from player and validate."""
valid_reply_received = False
while not valid_reply_received:
print_field(data)
if self.error_text != "":
print(f" {self.error_text}")
self.input_text = input(
f" Spieler {spieler}"
f", bitte wähle x→ und y↓ Koordinaten (Werte 0 bis 2): "
)
self.error_text = ""
if is_not_valid(self.input_text):
self.error_text = (
"Ungültige Eingabe "
+ self.input_text
+ "! Ein gültiger Eingabewert ist z.B.: 1 1 "
)
else:
valid_reply_received = True
return map(int, self.input_text.split())
@staticmethod
def congratulate_player(spieler, data):
"""Congratulate player who won."""
print_field(data)
print(f" Gratulation Spieler {spieler} du hast gewonnen!")
print(" ###################################################")
def set_error(self, error):
"""Display error message."""
self.error_text = error
```
#### File: tictactoe/flask/tic_tac_toe_game_controller.py
```python
from flask import Flask, Response, render_template, request
from tictactoe.model.tic_tac_toe_model import TicTacToeModel
class EndpointAction:
"""EndpointAction enables adding routes without annotations."""
def __init__(self, action):
self.action = action
def __call__(self, *args):
# Perform the action
answer = self.action()
# Create the answer (bundle it in a correctly formatted HTTP answer)
self.response = Response(answer, status=200, headers={})
# Send it
return self.response
class TicTacToeGameController:
"""The tic-tac-toe game controller."""
app = None
model = None
anweisung_text = ""
meldung_text = ""
images = [[]]
def __init__(self, name, image_path):
self.app = Flask(
name,
static_url_path="",
static_folder=image_path,
template_folder="src/tictactoe/flask/templates",
)
self.model = TicTacToeModel()
self.images = [[0] * 3 for _ in range(3)]
self.add_all_endpoints()
def run(self):
"""Run the flask App."""
self.app.run(debug=True, use_reloader=False)
def add_endpoint(self, endpoint=None, endpoint_name=None, handler=None, **options):
"""Add an endpoint/route to the controller."""
self.app.add_url_rule(
endpoint, endpoint_name, EndpointAction(handler), **options
)
def add_all_endpoints(self):
"""Add all endpoints/routes to the controller."""
# Add root endpoint
self.add_endpoint(
endpoint="/",
endpoint_name="/",
handler=self.tic_tac_toe_game,
methods=["GET"],
)
# Add action endpoints
self.add_endpoint(
endpoint="/take_coordinates",
endpoint_name="/take_coordinates",
handler=self.take_coordinates,
methods=["POST"],
)
def get_model(self):
"""Get the model."""
return self.model
def tic_tac_toe_game(self):
"""Render the HTML template."""
return render_template(
"tic_tac_toe_flask_view.html",
anweisungen=self.anweisung_text,
meldungen=self.meldung_text,
images=self.images,
)
def take_coordinates(self):
"""Take coordinates x/y for active player when button is pressed."""
x = int(request.form.get("a"))
y = int(request.form.get("b"))
if not self.get_model().game_won():
if self.get_model().coordinates_taken(x, y):
self.anweisung_text = (
"Koordinaten "
+ str(x)
+ " "
+ str(y)
+ " sind schon belegt! "
+ "Wähle andere Koordinaten "
)
else:
self.anweisung_text = ""
self.get_model().set_mark(x, y)
sign = self.get_model().get_active_player_sign()
if sign == self.get_model().player_one_sign:
filename = 1
else:
filename = 2
self.images[x][y] = filename
if self.get_model().game_won():
self.congratulate_player(
self.get_model().get_active_player_name())
else:
self.get_model().switch_player()
return render_template(
"tic_tac_toe_flask_view.html",
anweisungen=self.anweisung_text,
meldungen=self.meldung_text,
images=self.images,
)
def congratulate_player(self, active_player):
"""Prepare congratulation message."""
self.meldung_text = "<NAME> " + active_player + " !"
```
#### File: tictactoe/kivy/main.py
```python
import os
from tictactoe.kivy.tic_tac_toe_game_controller import TicTacToeGameController
def run():
"""Run the kivy App."""
image_path = os.path.abspath("images")
controller = TicTacToeGameController(image_path)
controller.run()
```
#### File: tictactoe/kivy/tic_tac_toe_kivy_view.py
```python
import kivy.resources
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import StringProperty, ObjectProperty
class TicTacToeKivyView(App):
"""The tic-tac-toe Kivy view."""
controller = ObjectProperty()
def __init__(self, image_path, controller):
super(TicTacToeKivyView, self).__init__()
self.controller = controller
kivy.resources.resource_add_path(image_path)
def build(self):
"""Initialize some variables."""
self.title = "TicTacToe"
self.root = RootWidget()
self.root.controller = self.controller
return self.root
def set_error(self, text):
"""Display error message."""
self.root.footer_text = text
def set_coordinates_to(self, x, y, mark_filename):
"""Set players mark on coordinates."""
getattr(self.root.ids, str(x) + "_" + str(y)).source = (
mark_filename + ".png"
)
def set_player(self, player):
"""Set current player."""
self.root.input_text = "Spieler " + player + " wähle ein Feld"
def congratulate_player(self, player):
"""Congratulate player who won."""
self.root.input_text = "Spieler " + player + " hat gewonnen!"
self.root.footer_text = "Herzlichen Glückwunsch!"
class RootWidget(BoxLayout):
"""The root widget of the app."""
footer_text = StringProperty()
input_text = StringProperty()
```
#### File: tictactoe/model/tic_tac_toe_model.py
```python
class TicTacToeModel:
"""The tic-tac-toe model."""
initial_data = [[" ", " ", " "], [" ", " ", " "], [" ", " ", " "]]
player_one_sign = "o"
player_two_sign = "x"
def __init__(self):
self.data = self.initial_data
self.active_player = "eins"
def set_mark(self, x_position, y_position):
"""Set a mark on position x/y for active player."""
self.data[x_position][y_position] = self.get_active_player_sign()
def get_data(self):
"""Return the current datamodel."""
return self.data
def get_active_player_name(self):
"""Get name of active player."""
return self.active_player
def get_active_player_sign(self):
"""Get sign of active player."""
if self.active_player == "eins":
return self.player_one_sign
return self.player_two_sign
def switch_player(self):
"""Switch the player."""
if self.active_player == "eins":
self.active_player = "zwei"
else:
self.active_player = "eins"
def game_won(self):
"""Check if the game is won."""
return (
self.data[0][0] == self.data[1][0] == self.data[2][0]
and self.coordinates_taken(2, 0)
or self.data[0][1] == self.data[1][1] == self.data[2][1]
and self.coordinates_taken(2, 1)
or self.data[0][2] == self.data[1][2] == self.data[2][2]
and self.coordinates_taken(2, 2)
or self.data[0][0] == self.data[0][1] == self.data[0][2]
and self.coordinates_taken(0, 2)
or self.data[1][0] == self.data[1][1] == self.data[1][2]
and self.coordinates_taken(1, 2)
or self.data[2][0] == self.data[2][1] == self.data[2][2]
and self.coordinates_taken(2, 2)
or self.data[0][0] == self.data[1][1] == self.data[2][2]
and self.coordinates_taken(2, 2)
or self.data[0][2] == self.data[1][1] == self.data[2][0]
and self.coordinates_taken(2, 0)
)
def coordinates_taken(self, a, b):
"""Check if coordinates are already taken."""
return self.data[a][b] != " "
```
#### File: py-tic-tac-toe/tests/test_tic_tac_toe_model.py
```python
import unittest
from tictactoe.model.tic_tac_toe_model import TicTacToeModel
class TicTacToeModelTest(unittest.TestCase):
"""Test the model."""
cut = TicTacToeModel()
def test_switch_player(self):
"""Test if player was correctly switched."""
self.cut.active_player = "eins"
self.cut.switch_player()
actual_player = self.cut.active_player
expected_player = "zwei"
self.assertEqual(actual_player, expected_player)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joergsimon/gesture-analysis",
"score": 3
} |
#### File: gesture-analysis/analysis/feature_selection.py
```python
from analysis.preparation import labelMatrixToArray
from analysis.preparation import normalizeZeroClassArray
from visualise.trace_features import trace_feature_origin
from visualise.confusion_matrix import plot_confusion_matrix
import numpy as np
import sklearn
import sklearn.linear_model
import sklearn.preprocessing as pp
import sklearn.svm as svm
import sklearn.feature_selection as fs
from analysis.classification import fit_classifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
# Interesting References:
# RFECV:
# <NAME>., <NAME>., <NAME>., & <NAME>. (2002). Gene selection for
# cancer classification using support vector machines. Mach. Learn.. 46(1-3). 389-422.
def feature_selection(train_data, train_labels, const):
train_labels_arr, exclude = labelMatrixToArray(train_labels, const.label_threshold)
train_data_clean = train_data.drop(exclude)
train_labels_arr, train_data_clean, _ = normalizeZeroClassArray(train_labels_arr, train_data_clean)
print "num features before selection: {}".format(train_data_clean.columns.size)
feature_index = variance_threshold(train_data_clean)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:,feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after variance threshold".format(clf_name))
print(classification_report(train_labels_arr,prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index,const)
feature_index = rfe(train_data_clean,train_labels_arr)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after RFE".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
feature_index = k_best_chi2(train_data_clean, train_labels_arr, 700)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after Chi2".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
feature_index = rfe_cv_f1(train_data_clean, train_labels_arr)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after RFECV".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
plt.show()
def get_values(data, feature_index, needs_scaling):
if needs_scaling:
values = data.values[:, feature_index]
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values)
return values
else:
return data.values[:, feature_index]
def variance_threshold(train_data):
# feature selection using VarianceThreshold filter
sel = fs.VarianceThreshold(threshold=(.8 * (1 - .8)))
fit = sel.fit(train_data.values)
col_index = fit.get_support(indices=True)
print "num features selected by VarianceThreshold: {}".format(len(col_index))
return col_index
def rfe(train_data, train_labels):
# important toto!
# todo: I think also for feature selection we should take care the 0 class is balanced!
# todo: if you use it that way, scale the features
print "Recursive eleminate features: "
svc = sklearn.linear_model.Lasso(alpha = 0.1) #svm.SVR(kernel="linear")
print "scale data"
values = train_data.values
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values) # pp.scale(values)
print "test fit."
svc.fit(values, np.array(train_labels))
print "run rfecv.."
rfecv = fs.RFE(estimator=svc, step=0.1, verbose=2)
rfecv.fit(values, np.array(train_labels))
print "get support..."
col_index = rfecv.get_support(indices=True)
print "num features selected by RFE(CV)/Lasso: {}".format(len(col_index))
return col_index
def rfe_cv_f1(train_data, train_labels):
# important toto!
# todo: I think also for feature selection we should take care the 0 class is balanced!
# todo: if you use it that way, scale the features
print "Recursive eleminate features: "
svc = svm.SVC(kernel="linear") #sklearn.linear_model.Lasso(alpha = 0.1)
print "scale data"
values = train_data.values
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values)#pp.scale(values)
print "test fit."
svc.fit(values, np.array(train_labels).astype(int))
print "run rfecv.."
rfecv = fs.RFECV(estimator=svc, step=0.05, verbose=2)
rfecv.fit(values, np.array(train_labels).astype(int))
print "get support..."
col_index = rfecv.get_support(indices=True)
print "num features selected by RFECV/SVR: {}".format(len(col_index))
return col_index
def k_best_chi2(train_data, train_labels, k):
values = train_data.values
if values.min() < 0:
values = values + abs(values.min())
kb = fs.SelectKBest(fs.chi2, k=k)
kb.fit(values, np.array(train_labels))
col_index = kb.get_support(indices=True)
print "num features selected by K-Best using chi2: {}".format(len(col_index))
return col_index
```
#### File: gesture-analysis/dataingestion/cache_control.py
```python
import os
import os.path as path
import sys
from const.constants import Constants
def has_preprocess_basic_cache(const):
ok = path.isfile(const.preprocessed_data_cache_file)
ok = ok and path.isfile(const.preprocessed_data_meta)
return ok
def has_window_cache(const):
ok = path.isfile(const.window_data_meta)
ok = ok and path.isfile(const.window_data_cache_file)
ok = ok and path.isfile(const.window_label_cache_file)
return ok
def clear_all_cache(const):
chaches = ["step1","step2","step3"]
clear_cache(chaches,const)
def clear_cache(cache_steps_to_clear, const):
for step in cache_steps_to_clear:
if step == "step1":
if path.isfile(const.init_data_cache_file):
os.remove(const.init_data_cache_file)
os.remove(const.init_data_meta)
elif step == "step2":
if path.isfile(const.preprocessed_data_cache_file):
os.remove(const.preprocessed_data_cache_file)
if path.isfile(const.preprocessed_data_meta):
os.remove(const.preprocessed_data_meta)
elif step == "step3":
if path.isfile(const.window_data_meta):
os.remove(const.window_data_meta)
if path.isfile(const.window_label_cache_file):
os.remove(const.window_label_cache_file)
if path.isfile(const.window_data_cache_file):
os.remove(const.window_data_cache_file)
if __name__ == '__main__':
const = Constants()
if sys.argv[1] == 'clear':
if sys.argv[2] == 'all':
clear_all_cache(const)
else:
clear_cache(sys.argv[2:],const)
```
#### File: gesture-analysis/utils/freshrotation.py
```python
import numpy as np
def rotX(theta):
return np.array([[1, 0, 0]
, [0, np.cos(theta), -np.sin(theta)]
, [0, np.sin(theta), np.cos(theta)]])
def rotY(theta):
return np.array([[np.cos(theta), 0, np.sin(theta)]
, [0, 1, 0]
, [-np.sin(theta), 0, np.cos(theta)]])
def rotZ(theta):
return np.array([[np.cos(theta), -np.sin(theta), 0]
, [np.sin(theta), np.cos(theta), 0]
, [0, 0, 1]])
def euler_matrix(x, y, z):
return rotX(x).dot(rotY(y)).dot(rotZ(z))
def vector_slerp(v1, v2, fraction):
perp_v = np.cross(v1, v2)
# perp_v /= np.linalg.norm(perp_v)
angle = np.arccos(np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))) * fraction
return rotation_matrix(angle, perp_v).dot(v1)
def unit_vector(v):
return v/np.linalg.norm(v)
def rotation_matrix(angle, direction):
sina = np.sin(angle)
cosa = np.cos(angle)
direction = unit_vector(direction)
# rotation matrix around unit vector
R = np.diag([cosa, cosa, cosa])
R += np.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += np.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
return R
``` |
{
"source": "joerick/cibuildwheel",
"score": 2
} |
#### File: cibuildwheel/unit_test/utils.py
```python
from cibuildwheel.options import CommandLineArguments
def get_default_command_line_arguments() -> CommandLineArguments:
defaults = CommandLineArguments()
defaults.platform = "auto"
defaults.allow_empty = False
defaults.archs = None
defaults.config_file = None
defaults.output_dir = None
defaults.package_dir = "."
defaults.prerelease_pythons = False
defaults.print_build_identifiers = False
return defaults
``` |
{
"source": "joerick/django-timecode",
"score": 3
} |
#### File: django-timecode/timecode/timecode.py
```python
class Timecode():
frames = 0
def __init__(self, string=None, fps=25, total_frames=0):
self.fps = fps
if string is None:
self.total_frames = int(total_frames)
else:
unpacked = string.split(':')
if len(unpacked) == 1:
hours = minutes = seconds = 0
frames = int(unpacked[0])
elif len(unpacked) == 2:
hours = minutes = 0
seconds, frames = (int(each) for each in unpacked)
elif len(unpacked) == 3:
hours = 0
minutes, seconds, frames = (int(each) for each in unpacked)
elif len(unpacked) == 4:
hours, minutes, seconds, frames = (int(each) for each in unpacked)
else:
raise ValueError('Invalid timecode %s' % string)
if hours > 99 or minutes > 59 or seconds > 59 or frames >= fps:
raise ValueError('Invalid timecode %s' % string)
self.total_frames = ((hours*60 + minutes)*60 + seconds)*fps + frames
def __repr__(self):
return "Timecode('%s', fps=%i)" % (str(self), self.fps)
def __str__(self):
return '%02i:%02i:%02i:%02i' % self.components()
def __cmp__(self, other):
if not isinstance(other, Timecode):
raise TypeError
return cmp(self.total_frames, other.total_frames)
def __eq__(self, other):
return isinstance(other, Timecode) and self.total_frames == other.total_frames
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.total_frames)
def __int__(self):
return self.total_frames
def __add__(self, other):
self._assert_equal_fps(other)
return Timecode(total_frames=self.total_frames + int(other))
def __sub__(self, other):
self._assert_equal_fps(other)
return Timecode(total_frames=self.total_frames - int(other))
def components(self):
frames_per_hour = self.fps * 60 * 60
frames_per_minute = self.fps * 60
hours, hours_remainder = divmod(self.total_frames, frames_per_hour)
minutes, minutes_remainder = divmod(hours_remainder, frames_per_minute)
seconds, frames = divmod(minutes_remainder, self.fps)
return (hours, minutes, seconds, frames)
def _assert_equal_fps(self, other):
if self.fps != other.fps:
raise self.FPSMismatch
@property
def hours(self):
return self.components()[0]
@property
def minutes(self):
return self.components()[1]
@property
def seconds(self):
return self.components()[2]
@property
def frames(self):
return self.components()[3]
class FPSMismatch(Exception):
pass
``` |
{
"source": "joerick/kindle-image-display",
"score": 3
} |
#### File: kindle-image-display/server/server.py
```python
import os, textwrap
from flask import Flask
from PIL import ImageFont, ImageDraw, Image
from StringIO import StringIO
from datetime import datetime
from pytz import timezone
import tweepy
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
def draw_text_centered(draw, center, text, font, spacing=0):
text_box_size = draw.multiline_textsize(text, font=font, spacing=spacing)
topleft = (
center[0] - text_box_size[0]/2,
center[1] - text_box_size[1]/2
)
draw.multiline_text(topleft, text, font=font, align='center', spacing=spacing)
consumer_key = os.environ.get("TWITTER_CONSUMER_KEY", "")
consumer_secret = os.environ.get("TWITTER_CONSUMER_SECRET", "")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
access_token = os.environ.get("TWITTER_TOKEN", "")
access_token_secret = os.environ.get("TWITTER_TOKEN_SECRET", "")
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
def get_latest_tweet():
public_tweets = api.user_timeline()
tweet = public_tweets[0]
return tweet.text
@app.route('/kindleimage')
def kindle_image():
image = Image.new('L', (800, 600), 255)
draw = ImageDraw.Draw(image)
small_font = ImageFont.truetype('Garamond.otf', 10)
# draw timestamp
# text = str(datetime.now(timezone('Europe/London')).strftime('%x %X'))
# draw.multiline_text((10, 10), text, font=small_font, spacing=20)
tweet = get_latest_tweet()
wrapped_tweet = '\n'.join(textwrap.wrap(tweet, width=30))
font = ImageFont.truetype('ArnoPro-Caption.otf', 60)
draw_text_centered(draw, text=wrapped_tweet, center=(400, 288), font=font, spacing=20)
tagline_font = ImageFont.truetype('Futura LT Medium.otf', 24)
draw_text_centered(draw, text=u'–\n@GrisedalePike', center=(400, 516), font=tagline_font)
image = image.transpose(Image.ROTATE_90)
image_data = StringIO()
image.save(image_data, format='png')
image_data.seek(0)
return (image_data.read(), 200, {
'content-type': 'image/png',
'cache-control': 'no-cache'
})
@app.route('/nook')
def nook_page():
page = '''
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
* {
margin: 0;
padding: 0;
}
html, body {
min-height: 100%;
}
.image {
width: 100%;
height: 100%;
background-size: contain;
background-position: center;
background-repeat: no-repeat;
}
</style>
</head>
<body>
<div id="image" class="image"></div>
<script>
function refresh() {
var timestamp = Math.floor(Date.now() / 1000);
var imageUrl = '/kindleimage' + '?' + timestamp;
var imageDiv = document.getElementById('image');
imageDiv.style.backgroundImage = 'url('+imageUrl+')';
// hack to force repaint
imageDiv.style.display='none';
imageDiv.offsetHeight; // no need to store this anywhere, the reference is enough
imageDiv.style.display='';
}
refresh();
setInterval(refresh, 4 * 60 * 60 * 1000);
</script>
<style></style>
</body>
</html>
'''
return (page, 200, {'content-type': 'text/html'})
@app.route('/nookvideo')
def nook_video_page():
page = '''
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
* {
margin: 0;
padding: 0;
}
html, body {
min-height: 100%;
}
.image {
width: 100%;
height: 100%;
background-size: contain;
background-position: center;
background-repeat: no-repeat;
}
</style>
</head>
<body>
<div id="image" class="image"></div>
<script>
function refresh() {
var timestamp = Math.floor(Date.now() / 1000);
var imageUrl = '/kindleimage' + '?' + timestamp;
var imageDiv = document.getElementById('image');
imageDiv.style.backgroundImage = 'url('+imageUrl+')';
// hack to force repaint
imageDiv.style.display='none';
imageDiv.offsetHeight; // no need to store this anywhere, the reference is enough
imageDiv.style.display='';
}
refresh();
setInterval(refresh, 2 * 60 * 60 * 1000);
</script>
<style></style>
</body>
</html>
'''
return (page, 200, {'content-type': 'text/html'})
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
``` |
{
"source": "joerick/mkdocs-include-markdown-plugin",
"score": 3
} |
#### File: mkdocs-include-markdown-plugin/mkdocs_include_markdown_plugin/event.py
```python
import html
import re
from pathlib import Path
INCLUDE_TAG_REGEX = re.compile(
r'''
{% # opening tag
\s*
include # directive name
\s+
"(?P#[^"]+)" # "filename"
\s*
%} # closing tag
''',
flags=re.VERBOSE,
)
INCLUDE_MARKDOWN_TAG_REGEX = re.compile(
r'''
{% # opening tag
\s*
include\-markdown # directive name
\s+
"(?P#[^"]+)" # "filename"
(?:\s+start="(?P<start>[^"]+)")? # optional start expression
(?:\s+end="(?P<end>[^"]+)")? # optional end expression
\s*
%} # closing tag
''',
flags=re.VERBOSE,
)
def _on_page_markdown(markdown, page, **kwargs):
page_src_path = page.file.abs_src_path
def found_include_tag(match):
filename = match.group('filename')
file_path_abs = Path(page_src_path).parent / filename
if not file_path_abs.exists():
raise ValueError('File \'%s\' not found' % filename)
text_to_include = file_path_abs.read_text(encoding='utf8')
# Allow good practice of having a final newline in the file
if text_to_include.endswith('\n'):
text_to_include = text_to_include[:-1]
return text_to_include
def found_include_markdown_tag(match):
filename = match.group('filename')
start = match.group('start')
end = match.group('end')
file_path_abs = Path(page_src_path).parent / filename
if not file_path_abs.exists():
raise ValueError('File \'%s\' not found' % filename)
text_to_include = file_path_abs.read_text(encoding='utf8')
if start:
_, _, text_to_include = text_to_include.partition(start)
if end:
text_to_include, _, _ = text_to_include.partition(end)
return (
'<!-- BEGIN INCLUDE %s %s %s -->\n' % (
filename, html.escape(start or ''), html.escape(end or '')
)
+ text_to_include
+ '\n<!-- END INCLUDE -->'
)
markdown = re.sub(INCLUDE_TAG_REGEX,
found_include_tag,
markdown)
markdown = re.sub(INCLUDE_MARKDOWN_TAG_REGEX,
found_include_markdown_tag,
markdown)
return markdown
```
#### File: joerick/mkdocs-include-markdown-plugin/setup.py
```python
import io
import os
import re
import sys
from setuptools import Command, find_packages, setup
URL = 'https://github.com/mondeja/mkdocs-include-markdown-plugin'
TEST_EXTRAS = [
'pytest==6.1.2',
'pytest-cov==2.10.1'
]
LINT_EXTRAS = [
'flake8==3.8.4',
'flake8-print==4.0.0',
'flake8-implicit-str-concat==0.2.0',
'isort==5.6.4',
'yamllint==1.25.0',
]
DEV_EXTRAS = [
'twine==3.2.0',
'bump2version==1.0.1',
'pre-commit==2.9.2'
] + TEST_EXTRAS + LINT_EXTRAS
HERE = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(HERE, "README.md"), encoding="utf-8") as f:
LONG_DESCRIPTION = "\n" + f.read()
ABOUT = {}
INIT_FILEPATH = os.path.join(
HERE, "mkdocs_include_markdown_plugin", "__init__.py")
with io.open(INIT_FILEPATH, encoding="utf-8") as f:
content = f.read()
ABOUT["__title__"] = \
re.search(r"__title__\s=\s[\"']([^\"']+)[\"']", content).group(1)
ABOUT["__version__"] = \
re.search(r"__version__\s=\s[\"']([^\"']+)[\"']", content).group(1)
ABOUT["__description__"] = \
re.search(r"__description__\s=\s[\"']([^\"']+)[\"']", content).group(1)
class UploadCommand(Command):
'Support setup.py upload.'
description = 'Build and publish the package.'
user_options = [
('test', None, 'Specify if you want to test your upload to Pypi.'),
]
@staticmethod
def status(s):
'Prints things in bold.'
sys.stdout.write('\033[1m{0}\033[0m\n'.format(s))
def initialize_options(self):
self.test = None
def finalize_options(self):
pass
def run(self):
from shutil import rmtree
try:
self.status('Removing previous builds…')
rmtree(os.path.join(HERE, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(
sys.executable))
self.status('Uploading the package to PyPI via Twine…')
cmd = 'twine upload%s dist/*' % (
' --repository-url https://test.pypi.org/legacy/' if self.test
else ''
)
os.system(cmd)
sys.exit()
setup(
name=ABOUT['__title__'],
version=ABOUT['__version__'],
description=ABOUT['__description__'],
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author='<NAME>',
url=URL,
packages=find_packages(exclude=["tests"]),
include_package_data=True,
entry_points={
'mkdocs.plugins': [
('include-markdown = mkdocs_include_markdown_plugin.plugin'
':IncludeMarkdownPlugin'),
]
},
zip_safe=False,
project_urls={
'Documentation': URL,
'Source': URL,
'Issue Tracker': URL + '/issues'
},
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Documentation',
'Topic :: Documentation',
'Topic :: Text Processing',
'Topic :: Text Processing :: Markup :: Markdown',
],
extras_require={
'dev': DEV_EXTRAS,
'test': TEST_EXTRAS
},
cmdclass={
'upload': UploadCommand,
}
)
``` |
{
"source": "joeriddles/dictionizr",
"score": 3
} |
#### File: tests/utils/utils.py
```python
def eq(a, b) -> bool:
if a is None and b is None:
return True
elif a is None or b is None:
return False
a_can_vars, a_vars = can_vars(a)
b_can_vars, b_vars = can_vars(b)
if a_can_vars and b_can_vars:
if len(a_vars) != len(b_vars):
return False
for a_key, a_value in a_vars.items():
b_value = b_vars[a_key]
if not eq(a_value, b_value):
return False
return True
elif a_can_vars != b_can_vars:
if isinstance(b, dict):
a, b = b, a
if isinstance(a, dict):
for a_key, a_value in a.items():
try:
b_value = getattr(b, a_key)
if not eq(a_value, b_value):
return False
except AttributeError:
return False
return True
else:
if isinstance(a, list) and isinstance(b, list):
if len(a) != len(b):
return False
try:
a = sorted(a)
b = sorted(b)
except ValueError:
pass
finally:
for index, a_value in enumerate(a):
b_value = b[index]
if not eq(a_value, b_value):
return False
return True
# if all else fails...
return a == b
def can_vars(obj) -> bool:
try:
obj_vars = vars(obj)
except TypeError:
return False, None
else:
return True, obj_vars
``` |
{
"source": "joeriddles/FAPP",
"score": 3
} |
#### File: FAPP/backend/db_base.py
```python
from typing import Generator
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from .settings import Settings
Base = declarative_base()
class DbBase:
def __init__(self, settings: Settings, **kwargs):
engine = create_engine(settings.db_uri, **kwargs)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base.metadata.create_all(bind=engine)
self.SessionLocal = SessionLocal
def get_db(self) -> Generator[Session, None, None]:
db: Session = self.SessionLocal()
try:
yield db
finally:
db.close()
```
#### File: FAPP/backend/main.py
```python
from backend.settings import Settings
from starlette.responses import Response
from backend.hero_service import HeroService
from typing import Optional
from fastapi import Depends, FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from sqlalchemy.orm.session import Session
from .db_base import DbBase
from .hero import CreateHero, Hero
app = FastAPI()
origins = [
"http://localhost:4200",
"http://127.0.0.1:4200",
"http://frontend:4200",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
settings = Settings()
db_base = DbBase(settings)
@app.get(
"/api/heroes/",
response_model=list[Hero]
)
def list_heroes(name: Optional[str]=None, db: Session = Depends(db_base.get_db)) -> list[Hero]:
hero_service = HeroService(db)
heroes = hero_service.list_heroes(name)
return heroes
@app.get(
"/api/heroes/{id}/",
response_model=Hero
)
def get_hero_by_id(id: int, db: Session = Depends(db_base.get_db)) -> Hero:
hero_service = HeroService(db)
hero = hero_service.get_hero_by_id(id)
if hero is None:
raise_404(id)
return hero
@app.post(
"/api/heroes/",
response_model=Hero
)
def add_hero(create_hero: CreateHero, db: Session = Depends(db_base.get_db)) -> Hero:
hero_service = HeroService(db)
hero = hero_service.create_hero(create_hero)
return hero
@app.put(
"/api/heroes/",
response_model=None,
response_class=Response,
status_code=204,
)
def update_hero(update: Hero, db: Session = Depends(db_base.get_db)) -> None:
hero_service = HeroService(db)
try:
hero_service.update_hero(update)
except ValueError:
raise_404(update.id)
@app.delete(
"/api/heroes/{id}/",
response_model=None,
response_class=Response,
status_code=204,
)
def delete_hero(id: int, db: Session = Depends(db_base.get_db)) -> None:
hero_service = HeroService(db)
hero_service.delete_hero(id)
def raise_404(id: int):
raise HTTPException(404, f"Hero with id={id} not found")
```
#### File: backend/tests/fixtures.py
```python
import os
import pytest
from backend.db_base import DbBase
from backend.db_hero import DbHero
from backend.files import TOP_LEVEL_DIR
from backend.settings import Settings
@pytest.fixture
def db():
# Remove test.db if it exists
sql_file = os.path.abspath(os.path.join(TOP_LEVEL_DIR, "test.db"))
if os.path.exists(sql_file):
os.remove(sql_file)
db_heroes = [
DbHero(id = 1, name = "Middle Manager of Justice"),
DbHero(id = 2, name = "Code Cowboy"),
]
settings = Settings.parse_obj({ "db_uri": "sqlite:///test.db" })
db_base = DbBase(settings, connect_args={"check_same_thread": False})
for db in db_base.get_db():
db.add_all(db_heroes)
db.commit()
yield db
# Clean up test.db
sql_file = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "test.db"))
os.remove(sql_file)
``` |
{
"source": "joeridebruijckere/Inspectra-Gadget",
"score": 3
} |
#### File: joeridebruijckere/Inspectra-Gadget/filters.py
```python
import numpy as np
from scipy import ndimage, signal
from scipy.interpolate import interp1d, interp2d
def derivative(data, method, times_x, times_y):
times_x, times_y = int(times_x), int(times_y)
if len(data) == 3:
for _ in range(times_x):
data[-1] = np.gradient(data[-1], data[0][:,0], axis=0)
for _ in range(times_y):
data[-1] = np.gradient(data[-1], data[1][0,:], axis=1)
elif len(data) == 2:
for _ in range(times_y):
data[-1] = np.gradient(data[-1], data[0])
return data
def smooth(data, method, width_x, width_y):
filters = {'Gauss': ndimage.gaussian_filter,
'Median': ndimage.median_filter}
filters1d = {'Gauss': ndimage.gaussian_filter1d}
if method == 'Gauss':
width_x, width_y = float(width_x), float(width_y)
elif method == 'Median':
width_x, width_y = int(np.ceil(float(width_x)))+1, int(np.ceil(float(width_y)))+1
if len(data) == 3:
if width_x:
if width_y:
data[-1] = filters[method](data[-1], [width_x, width_y])
else:
data[-1] = filters1d[method](data[-1], width_x, axis=0)
else:
if width_y:
data[-1] = filters1d[method](data[-1], width_y, axis=1)
elif len(data) == 2:
if width_y:
data[-1] = filters1d[method](data[-1], width_y)
return data
def sav_gol(data, method, window_length, polyorder):
polyorder = int(polyorder)
window_length = int(window_length)
if window_length < polyorder:
window_length = polyorder + 1
if window_length % 2 == 0:
window_length += 1
if 'Y' in method:
axis = 1
elif 'X' in method:
axis = 0
deriv = method.count('d')
if len(data) == 3:
data[-1] = signal.savgol_filter(data[-1], window_length, polyorder,
deriv=deriv, axis=axis)
for _ in range(deriv):
data[-1] /= np.gradient(data[axis], axis=axis)
elif len(data) == 2:
data[-1] = signal.savgol_filter(data[-1], window_length, polyorder,
deriv=deriv)
for _ in range(deriv):
data[-1] /= np.gradient(data[0])
return data
def crop_x(data, method, left, right):
if method != 'Lim':
min_data = np.min(data[0])
max_data = np.max(data[0])
left, right = float(left), float(right)
if (left < right and max_data > left and min_data < right):
if method == 'Abs':
mask = ((data[0] < left) | (data[0] > right))
elif method == 'Rel':
mask = (((data[0] >= min_data) & (data[0] <= min_data + abs(left))) |
((data[0] <= max_data) & (data[0] >= max_data - abs(right))))
if len(data) == 3:
for i in [1,2,0]:
data[i] = np.ma.compress_rowcols(np.ma.masked_array(data[i], mask=mask), axis=0)
elif len(data) == 2:
for i in [1,0]:
data[i] = np.ma.masked_array(data[i], mask=mask)
return data
def crop_y(data, method, bottom, top):
if len(data) == 3 and method != 'Lim':
min_data = np.min(data[1])
max_data = np.max(data[1])
bottom, top = float(bottom), float(top)
if (bottom < top and max_data > bottom and min_data < top):
for i in [0,2,1]:
if method == 'Abs':
mask = ((data[1] < bottom) | (data[1] > top))
elif method == 'Rel':
mask = (((data[1] >= min_data) & (data[1] <= min_data + abs(bottom))) |
((data[1] <= max_data) & (data[1] >= max_data - abs(top))))
data[i] = np.ma.compress_rowcols(
np.ma.masked_array(data[i], mask=mask), axis=1)
return data
def roll_x(data, method, position, amount):
if len(data) == 3:
amount = int(amount)
position = int(position)
data[2][:,position:] = np.roll(data[2][:,position:], shift=amount, axis=0)
return data
def roll_y(data, method, position, amount):
if len(data) == 3:
amount = int(amount)
position = int(position)
data[2][position:,:] = np.roll(data[2][position:,:], shift=amount, axis=1)
return data
def cut_x(data, method, left, width):
if len(data) == 3:
left, width = int(left), int(width)
part1 = data[-1][:left,:]
part2 = data[-1][left:left+width,:]
part3 = data[-1][left+width:,:]
data[-1] = np.vstack((part1,part3,part2))
return data
def cut_y(data, method, bottom, width):
if len(data) == 3:
bottom, width = int(bottom), int(width)
part1 = data[-1][:,:bottom]
part2 = data[-1][:,bottom:bottom+width]
part3 = data[-1][:,bottom+width:]
data[-1] = np.hstack((part1,part3,part2))
return data
def swap_xy(data, method, setting1, setting2):
data[0], data[1] = data[1], data[0]
return data
def flip(data, method, setting1, setting2):
if method == 'U-D':
data[-1] = np.fliplr(data[-1])
elif method == 'L-R':
data[-1] = np.flipud(data[-1])
return data
def normalize(data, method, point_x, point_y):
if method == 'Max':
norm_value = np.max(data[-1])
elif method == 'Min':
norm_value = np.min(data[-1])
elif method == 'Point' and len(data) == 3:
x_index = np.argmin(np.abs(data[0][:,0] - float(point_x)))
y_index = np.argmin(np.abs(data[1][0,:] - float(point_y)))
norm_value = data[-1][x_index,y_index]
elif method == 'Point' and len(data) == 2:
x_index = np.argmin(np.abs(data[0] - float(point_x)))
norm_value = data[-1][x_index]
data[-1] = data[-1] / norm_value
return data
def offset(data, method, setting1, setting2):
if method == 'X':
data[0] += float(setting1)
if method == 'Y':
data[1] += float(setting1)
if method == 'Z' and len(data) == 3:
data[2] += float(setting1)
return data
def absolute(data, method, setting1, setting2):
data[-1] = np.absolute(data[-1])
return data
def multiply(data, method, setting1, setting2):
if setting1 == 'e^2/h':
value = 0.025974
else:
value = float(setting1)
axis = {'X': 0, 'Y': 1, 'Z': 2}
if len(data) == 3:
data[axis[method]] *= value
elif len(data) == 2 and axis[method] < 2:
data[axis[method]] *= value
return data
def logarithm(data, method, setting1, setting2):
if method == 'Mask':
data[-1] = np.ma.log10(data[-1])
elif method == 'Shift':
min_value = np.amin(data[-1])
if min_value <= 0.0:
data[-1] = np.ma.log10(data[-1]-min_value)
else:
data[-1] = np.ma.log10(data[-1])
elif method == 'Abs':
data[-1] = np.ma.log10(np.abs(data[-1]))
return data
def root(data, method, setting1, setting2):
root = float(setting1)
if root > 0:
data[-1] = np.abs(data[-1])**(1/float(setting1))
return data
def interpolate(data, method, n_x, n_y):
if len(data) == 3:
x, y = data[0][:,0], data[1][0,:]
f_z = interp2d(y, x, data[2], kind=method)
n_x, n_y = int(n_x), int(n_y)
min_x, max_x = np.amin(data[0]), np.amax(data[0])
min_y, max_y = np.amin(data[1]), np.amax(data[1])
yp, xp = np.linspace(min_y, max_y, n_y), np.linspace(min_x, max_x, n_x)
data[1], data[0] = np.meshgrid(yp, xp)
data[2] = f_z(yp, xp)
elif len(data) == 2:
f = interp1d(data[0], data[1], kind=method)
n_x = int(n_x)
min_x, max_x = np.amin(data[0]), np.amax(data[0])
data[0] = np.linspace(min_x, max_x, n_x)
data[1] = f(data[0])
return data
def add_slope(data, method, a_x, a_y):
if len(data) == 3:
a_x, a_y = float(a_x), float(a_y)
data[-1] += a_x*data[0] + a_y*data[1]
elif len(data) == 2:
a_y = float(a_y)
data[-1] += a_y*data[0]
return data
def subtract_trace(data, method, index, setting2):
if len(data) == 3:
index = int(float(index))
if method == 'Hor':
data[-1] -= np.tile(data[-1][:,index], (len(data[-1][0,:]),1)).T
elif method == 'Ver':
data[-1] -= np.tile(data[-1][index,:], (len(data[-1][:,0]),1))
return data
def divide(data, method, setting1, setting2):
axis = {'X': 0, 'Y': 1, 'Z': 2}
if len(data) == 3:
data[axis[method]] /= float(setting1)
elif len(data) == 2 and axis[method] < 2:
data[axis[method]] /= float(setting1)
return data
def invert(data, method, setting1, setting2):
axis = {'X': 0, 'Y': 1, 'Z': -1}
data[axis[method]] = 1./data[axis[method]]
return data
``` |
{
"source": "joeriess/rasa",
"score": 2
} |
#### File: rasa/cli/export.py
```python
import argparse
import logging
import typing
from typing import List, Text, Optional
import rasa.cli.utils as cli_utils
import rasa.core.utils as rasa_core_utils
from rasa.cli.arguments import export as arguments
from rasa.constants import DOCS_URL_TRACKER_STORES, DOCS_URL_EVENT_BROKERS
from rasa.exceptions import PublishingError
from rasa.shared.exceptions import RasaException
if typing.TYPE_CHECKING:
from rasa.core.brokers.broker import EventBroker
from rasa.core.brokers.pika import PikaEventBroker
from rasa.core.tracker_store import TrackerStore
from rasa.core.exporter import Exporter
from rasa.core.utils import AvailableEndpoints
logger = logging.getLogger(__name__)
# noinspection PyProtectedMember
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
) -> None:
"""Add subparser for `rasa export`.
Args:
subparsers: Subparsers action object to which `argparse.ArgumentParser`
objects can be added.
parents: `argparse.ArgumentParser` objects whose arguments should also be
included.
"""
export_parser = subparsers.add_parser(
"export",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Export conversations using an event broker.",
)
export_parser.set_defaults(func=export_trackers)
arguments.set_export_arguments(export_parser)
def _get_tracker_store(endpoints: "AvailableEndpoints") -> "TrackerStore":
"""Get `TrackerStore` from `endpoints`.
Prints an error and exits if no tracker store could be loaded.
Args:
endpoints: `AvailableEndpoints` to initialize the tracker store from.
Returns:
Initialized tracker store.
"""
if not endpoints.tracker_store:
cli_utils.print_error_and_exit(
f"Could not find a `tracker_store` section in the supplied "
f"endpoints file. Instructions on how to configure a tracker store "
f"can be found here: {DOCS_URL_TRACKER_STORES}. "
f"Exiting. "
)
from rasa.core.tracker_store import TrackerStore
return TrackerStore.create(endpoints.tracker_store)
def _get_event_broker(endpoints: "AvailableEndpoints") -> Optional["EventBroker"]:
"""Get `EventBroker` from `endpoints`.
Prints an error and exits if no event broker could be loaded.
Args:
endpoints: `AvailableEndpoints` to initialize the event broker from.
Returns:
Initialized event broker.
"""
if not endpoints.event_broker:
cli_utils.print_error_and_exit(
f"Could not find an `event_broker` section in the supplied "
f"endpoints file. Instructions on how to configure an event broker "
f"can be found here: {DOCS_URL_EVENT_BROKERS}. Exiting."
)
from rasa.core.brokers.broker import EventBroker
return EventBroker.create(endpoints.event_broker)
def _get_requested_conversation_ids(
conversation_ids_arg: Optional[Text] = None,
) -> Optional[List[Text]]:
"""Get list of conversation IDs requested as a command-line argument.
Args:
conversation_ids_arg: Value of `--conversation-ids` command-line argument.
If provided, this is a string of comma-separated conversation IDs.
Return:
List of conversation IDs requested as a command-line argument.
`None` if that argument was left unspecified.
"""
if not conversation_ids_arg:
return None
return conversation_ids_arg.split(",")
def _assert_max_timestamp_is_greater_than_min_timestamp(
args: argparse.Namespace,
) -> None:
"""Inspect CLI timestamp parameters.
Prints an error and exits if a maximum timestamp is provided that is smaller
than the provided minimum timestamp.
Args:
args: Command-line arguments to process.
"""
min_timestamp = args.minimum_timestamp
max_timestamp = args.maximum_timestamp
if (
min_timestamp is not None
and max_timestamp is not None
and max_timestamp < min_timestamp
):
cli_utils.print_error_and_exit(
f"Maximum timestamp '{max_timestamp}' is smaller than minimum "
f"timestamp '{min_timestamp}'. Exiting."
)
def _prepare_event_broker(event_broker: "EventBroker") -> None:
"""Sets `should_keep_unpublished_messages` flag to `False` if
`self.event_broker` is a `PikaEventBroker`.
If publishing of events fails, the `PikaEventBroker` instance should not keep a
list of unpublished messages, so we can retry publishing them. This is because
the instance is launched as part of this short-lived export script, meaning the
object is destroyed before it might be published.
In addition, wait until the event broker reports a `ready` state.
"""
from rasa.core.brokers.pika import PikaEventBroker
if isinstance(event_broker, PikaEventBroker):
event_broker.should_keep_unpublished_messages = False
event_broker.raise_on_failure = True
if not event_broker.is_ready():
cli_utils.print_error_and_exit(
f"Event broker of type '{type(event_broker)}' is not ready. Exiting."
)
def export_trackers(args: argparse.Namespace) -> None:
"""Export events for a connected tracker store using an event broker.
Args:
args: Command-line arguments to process.
"""
_assert_max_timestamp_is_greater_than_min_timestamp(args)
endpoints = rasa_core_utils.read_endpoints_from_path(args.endpoints)
tracker_store = _get_tracker_store(endpoints)
event_broker = _get_event_broker(endpoints)
_prepare_event_broker(event_broker)
requested_conversation_ids = _get_requested_conversation_ids(args.conversation_ids)
from rasa.core.exporter import Exporter
exporter = Exporter(
tracker_store,
event_broker,
args.endpoints,
requested_conversation_ids,
args.minimum_timestamp,
args.maximum_timestamp,
)
try:
published_events = exporter.publish_events()
cli_utils.print_success(
f"Done! Successfully published {published_events} events 🎉"
)
except PublishingError as e:
command = _get_continuation_command(exporter, e.timestamp)
cli_utils.print_error_and_exit(
f"Encountered error while publishing event with timestamp '{e}'. To "
f"continue where I left off, run the following command:"
f"\n\n\t{command}\n\nExiting."
)
except RasaException as e:
cli_utils.print_error_and_exit(str(e))
def _get_continuation_command(exporter: "Exporter", timestamp: float) -> Text:
"""Build CLI command to continue 'rasa export' where it was interrupted.
Called when event publishing stops due to an error.
Args:
exporter: Exporter object containing objects relevant for this export.
timestamp: Timestamp of the last event attempted to be published.
"""
# build CLI command command based on supplied timestamp and options
command = "rasa export"
if exporter.endpoints_path is not None:
command += f" --endpoints {exporter.endpoints_path}"
command += f" --minimum-timestamp {timestamp}"
if exporter.maximum_timestamp is not None:
command += f" --maximum-timestamp {exporter.maximum_timestamp}"
if exporter.requested_conversation_ids:
command += (
f" --conversation-ids {','.join(exporter.requested_conversation_ids)}"
)
return command
```
#### File: core/brokers/pika.py
```python
import json
import logging
import os
import time
import typing
from collections import deque
from contextlib import contextmanager
from threading import Thread
from typing import (
Callable,
Deque,
Dict,
Optional,
Text,
Union,
Any,
List,
Tuple,
Generator,
)
from rasa.constants import (
DEFAULT_LOG_LEVEL_LIBRARIES,
ENV_LOG_LEVEL_LIBRARIES,
DOCS_URL_PIKA_EVENT_BROKER,
)
from rasa.core.brokers.broker import EventBroker
import rasa.shared.utils.io
from rasa.utils.endpoints import EndpointConfig
from rasa.shared.utils.io import DEFAULT_ENCODING
if typing.TYPE_CHECKING:
from pika.adapters.blocking_connection import BlockingChannel
from pika import SelectConnection, BlockingConnection, BasicProperties
from pika.channel import Channel
import pika
from pika.connection import Parameters, Connection
logger = logging.getLogger(__name__)
RABBITMQ_EXCHANGE = "rasa-exchange"
DEFAULT_QUEUE_NAME = "rasa_core_events"
def initialise_pika_connection(
host: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: float = 5,
) -> "BlockingConnection":
"""Create a Pika `BlockingConnection`.
Args:
host: Pika host
username: username for authentication with Pika host
password: password for authentication with Pika host
port: port of the Pika host
connection_attempts: number of channel attempts before giving up
retry_delay_in_seconds: delay in seconds between channel attempts
Returns:
`pika.BlockingConnection` with provided parameters
"""
import pika
with _pika_log_level(logging.CRITICAL):
parameters = _get_pika_parameters(
host, username, password, port, connection_attempts, retry_delay_in_seconds
)
return pika.BlockingConnection(parameters)
@contextmanager
def _pika_log_level(temporary_log_level: int) -> Generator[None, None, None]:
"""Change the log level of the `pika` library.
The log level will remain unchanged if the current log level is 10 (`DEBUG`) or
lower.
Args:
temporary_log_level: Temporary log level for pika. Will be reverted to
previous log level when context manager exits.
"""
pika_logger = logging.getLogger("pika")
old_log_level = pika_logger.level
is_debug_mode = logging.root.level <= logging.DEBUG
if not is_debug_mode:
pika_logger.setLevel(temporary_log_level)
yield
pika_logger.setLevel(old_log_level)
def _get_pika_parameters(
host: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: float = 5,
) -> "Parameters":
"""Create Pika `Parameters`.
Args:
host: Pika host
username: username for authentication with Pika host
password: password for authentication with Pika host
port: port of the Pika host
connection_attempts: number of channel attempts before giving up
retry_delay_in_seconds: delay in seconds between channel attempts
Returns:
`pika.ConnectionParameters` which can be used to create a new connection to a
broker.
"""
import pika
if host.startswith("amqp"):
# user supplied an AMQP URL containing all the info
parameters = pika.URLParameters(host)
parameters.connection_attempts = connection_attempts
parameters.retry_delay = retry_delay_in_seconds
if username:
parameters.credentials = pika.PlainCredentials(username, password)
else:
# host seems to be just the host, so we use our parameters
parameters = pika.ConnectionParameters(
host,
port=port,
credentials=pika.PlainCredentials(username, password),
connection_attempts=connection_attempts,
# Wait between retries since
# it can take some time until
# RabbitMQ comes up.
retry_delay=retry_delay_in_seconds,
ssl_options=create_rabbitmq_ssl_options(host),
)
return parameters
def initialise_pika_select_connection(
parameters: "Parameters",
on_open_callback: Callable[["SelectConnection"], None],
on_open_error_callback: Callable[["SelectConnection", Text], None],
) -> "SelectConnection":
"""Create a non-blocking Pika `SelectConnection`.
Args:
parameters: Parameters which should be used to connect.
on_open_callback: Callback which is called when the connection was established.
on_open_error_callback: Callback which is called when connecting to the broker
failed.
Returns:
A callback-based connection to the RabbitMQ event broker.
"""
import pika
return pika.SelectConnection(
parameters,
on_open_callback=on_open_callback,
on_open_error_callback=on_open_error_callback,
)
def initialise_pika_channel(
host: Text,
queue: Text,
username: Text,
password: <PASSWORD>,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: float = 5,
) -> "BlockingChannel":
"""Initialise a Pika channel with a durable queue.
Args:
host: Pika host.
queue: Pika queue to declare.
username: Username for authentication with Pika host.
password: Password for authentication with Pika host.
port: port of the Pika host.
connection_attempts: Number of channel attempts before giving up.
retry_delay_in_seconds: Delay in seconds between channel attempts.
Returns:
Pika `BlockingChannel` with declared queue.
"""
connection = initialise_pika_connection(
host, username, password, port, connection_attempts, retry_delay_in_seconds
)
return _declare_pika_channel_with_queue(connection, queue)
def _declare_pika_channel_with_queue(
connection: "BlockingConnection", queue: Text
) -> "BlockingChannel":
"""Declare a durable queue on Pika channel."""
channel = connection.channel()
channel.queue_declare(queue, durable=True)
return channel
def close_pika_channel(
channel: "Channel",
attempts: int = 1000,
time_between_attempts_in_seconds: float = 0.001,
) -> None:
"""Attempt to close Pika channel and wait until it is closed.
Args:
channel: Pika `Channel` to close.
attempts: How many times to try to confirm that the channel has indeed been
closed.
time_between_attempts_in_seconds: Wait time between attempts to confirm closed
state.
"""
from pika.exceptions import AMQPError
try:
channel.close()
logger.debug("Successfully initiated closing of Pika channel.")
except AMQPError:
logger.exception("Failed to initiate closing of Pika channel.")
while attempts:
if channel.is_closed:
logger.debug("Successfully closed Pika channel.")
return None
time.sleep(time_between_attempts_in_seconds)
attempts -= 1
logger.exception("Failed to close Pika channel.")
def close_pika_connection(connection: "Connection") -> None:
"""Attempt to close Pika connection."""
from pika.exceptions import AMQPError
try:
connection.close()
logger.debug("Successfully closed Pika connection with host.")
except AMQPError:
logger.exception("Failed to close Pika connection with host.")
class PikaEventBroker(EventBroker):
"""Pika-based event broker for publishing messages to RabbitMQ."""
def __init__(
self,
host: Text,
username: Text,
password: Text,
port: Union[int, Text] = 5672,
queues: Union[List[Text], Tuple[Text], Text, None] = None,
should_keep_unpublished_messages: bool = True,
raise_on_failure: bool = False,
log_level: Union[Text, int] = os.environ.get(
ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES
),
**kwargs: Any,
):
"""Initialise RabbitMQ event broker.
Args:
host: Pika host.
username: Username for authentication with Pika host.
password: Password for authentication with Pika host.
port: port of the Pika host.
queues: Pika queues to declare and publish to.
should_keep_unpublished_messages: Whether or not the event broker should
maintain a queue of unpublished messages to be published later in
case of errors.
raise_on_failure: Whether to raise an exception if publishing fails. If
`False`, keep retrying.
log_level: Logging level.
"""
logging.getLogger("pika").setLevel(log_level)
self.host = host
self.username = username
self.password = password
self.port = port
self.channel: Optional["Channel"] = None
self.queues = self._get_queues_from_args(queues)
self.should_keep_unpublished_messages = should_keep_unpublished_messages
self.raise_on_failure = raise_on_failure
# List to store unpublished messages which hopefully will be published later
self._unpublished_messages: Deque[Text] = deque()
self._run_pika()
def __del__(self) -> None:
if self.channel:
close_pika_channel(self.channel)
close_pika_connection(self.channel.connection)
def close(self) -> None:
"""Close the pika channel and connection."""
self.__del__()
@property
def rasa_environment(self) -> Optional[Text]:
"""Get value of the `RASA_ENVIRONMENT` environment variable."""
return os.environ.get("RASA_ENVIRONMENT")
@staticmethod
def _get_queues_from_args(
queues_arg: Union[List[Text], Tuple[Text], Text, None]
) -> Union[List[Text], Tuple[Text]]:
"""Get queues for this event broker.
The preferred argument defining the RabbitMQ queues the `PikaEventBroker` should
publish to is `queues` (as of Rasa Open Source version 1.8.2). This method
can be removed in the future, and `self.queues` should just receive the value of
the `queues` kwarg in the constructor.
Args:
queues_arg: Value of the supplied `queues` argument.
Returns:
Queues this event broker publishes to.
Raises:
`ValueError` if no valid `queues` argument was found.
"""
if queues_arg and isinstance(queues_arg, (list, tuple)):
return queues_arg
if queues_arg and isinstance(queues_arg, str):
logger.debug(
f"Found a string value under the `queues` key of the Pika event broker "
f"config. Please supply a list of queues under this key, even if it is "
f"just a single one. See {DOCS_URL_PIKA_EVENT_BROKER}"
)
return [queues_arg]
rasa.shared.utils.io.raise_warning(
f"No `queues` argument provided. It is suggested to "
f"explicitly specify a queue as described in "
f"{DOCS_URL_PIKA_EVENT_BROKER}. "
f"Using the default queue '{DEFAULT_QUEUE_NAME}' for now."
)
return [DEFAULT_QUEUE_NAME]
@classmethod
def from_endpoint_config(
cls, broker_config: Optional["EndpointConfig"]
) -> Optional["PikaEventBroker"]:
"""Initialise `PikaEventBroker` from `EndpointConfig`.
Args:
broker_config: `EndpointConfig` to read.
Returns:
`PikaEventBroker` if `broker_config` was supplied, else `None`.
"""
if broker_config is None:
return None
return cls(broker_config.url, **broker_config.kwargs)
def _run_pika(self) -> None:
parameters = _get_pika_parameters(
self.host, self.username, self.password, self.port
)
self._pika_connection = initialise_pika_select_connection(
parameters, self._on_open_connection, self._on_open_connection_error
)
# Run Pika io loop in extra thread so it's not blocking
self._run_pika_io_loop_in_thread()
def _on_open_connection(self, connection: "SelectConnection") -> None:
logger.debug(f"RabbitMQ connection to '{self.host}' was established.")
connection.channel(on_open_callback=self._on_channel_open)
def _on_open_connection_error(self, _, error: Text) -> None:
logger.warning(
f"Connecting to '{self.host}' failed with error '{error}'. Trying again."
)
def _on_channel_open(self, channel: "Channel") -> None:
logger.debug("RabbitMQ channel was opened. Declaring fanout exchange.")
# declare exchange of type 'fanout' in order to publish to multiple queues
# (https://www.rabbitmq.com/tutorials/amqp-concepts.html#exchange-fanout)
channel.exchange_declare(RABBITMQ_EXCHANGE, exchange_type="fanout")
for queue in self.queues:
channel.queue_declare(queue=queue, durable=True)
channel.queue_bind(exchange=RABBITMQ_EXCHANGE, queue=queue)
self.channel = channel
while self._unpublished_messages:
# Send unpublished messages
message = self._unpublished_messages.popleft()
self._publish(message)
logger.debug(
f"Published message from queue of unpublished messages. "
f"Remaining unpublished messages: {len(self._unpublished_messages)}."
)
def _run_pika_io_loop_in_thread(self) -> None:
thread = Thread(target=self._run_pika_io_loop, daemon=True)
thread.start()
def _run_pika_io_loop(self) -> None:
# noinspection PyUnresolvedReferences
self._pika_connection.ioloop.start()
def is_ready(
self, attempts: int = 1000, wait_time_between_attempts_in_seconds: float = 0.01
) -> bool:
"""Spin until the pika channel is open.
It typically takes 50 ms or so for the pika channel to open. We'll wait up
to 10 seconds just in case.
Args:
attempts: Number of retries.
wait_time_between_attempts_in_seconds: Wait time between retries.
Returns:
`True` if the channel is available, `False` otherwise.
"""
while attempts:
if self.channel:
return True
time.sleep(wait_time_between_attempts_in_seconds)
attempts -= 1
return False
def publish(
self,
event: Dict[Text, Any],
retries: int = 60,
retry_delay_in_seconds: int = 5,
headers: Optional[Dict[Text, Text]] = None,
) -> None:
"""Publish `event` into Pika queue.
Args:
event: Serialised event to be published.
retries: Number of retries if publishing fails
retry_delay_in_seconds: Delay in seconds between retries.
headers: Message headers to append to the published message (key-value
dictionary). The headers can be retrieved in the consumer from the
`headers` attribute of the message's `BasicProperties`.
"""
body = json.dumps(event)
while retries:
try:
self._publish(body, headers)
return
except Exception as e:
logger.error(
f"Could not open Pika channel at host '{self.host}'. "
f"Failed with error: {e}"
)
self.channel = None
if self.raise_on_failure:
raise e
retries -= 1
time.sleep(retry_delay_in_seconds)
logger.error(f"Failed to publish Pika event on host '{self.host}':\n{body}")
def _get_message_properties(
self, headers: Optional[Dict[Text, Text]] = None
) -> "BasicProperties":
"""Create RabbitMQ message `BasicProperties`.
The `app_id` property is set to the value of `self.rasa_environment` if
present, and the message delivery mode is set to 2 (persistent). In
addition, the `headers` property is set if supplied.
Args:
headers: Message headers to add to the message properties of the
published message (key-value dictionary). The headers can be retrieved in
the consumer from the `headers` attribute of the message's
`BasicProperties`.
Returns:
`pika.spec.BasicProperties` with the `RASA_ENVIRONMENT` environment variable
as the properties' `app_id` value, `delivery_mode`=2 and `headers` as the
properties' headers.
"""
from pika.spec import BasicProperties
# make message persistent
kwargs = {"delivery_mode": 2}
if self.rasa_environment:
kwargs["app_id"] = self.rasa_environment
if headers:
kwargs["headers"] = headers
return BasicProperties(**kwargs)
def _basic_publish(
self, body: Text, headers: Optional[Dict[Text, Text]] = None
) -> None:
self.channel.basic_publish(
exchange=RABBITMQ_EXCHANGE,
routing_key="",
body=body.encode(DEFAULT_ENCODING),
properties=self._get_message_properties(headers),
)
logger.debug(
f"Published Pika events to exchange '{RABBITMQ_EXCHANGE}' on host "
f"'{self.host}':\n{body}"
)
def _publish(self, body: Text, headers: Optional[Dict[Text, Text]] = None) -> None:
if self._pika_connection.is_closed:
# Try to reset connection
self._run_pika()
self._basic_publish(body, headers)
elif not self.channel and self.should_keep_unpublished_messages:
logger.warning(
f"RabbitMQ channel has not been assigned. Adding message to "
f"list of unpublished messages and trying to publish them "
f"later. Current number of unpublished messages is "
f"{len(self._unpublished_messages)}."
)
self._unpublished_messages.append(body)
else:
self._basic_publish(body, headers)
def create_rabbitmq_ssl_options(
rabbitmq_host: Optional[Text] = None,
) -> Optional["pika.SSLOptions"]:
"""Create RabbitMQ SSL options.
Requires the following environment variables to be set:
RABBITMQ_SSL_CLIENT_CERTIFICATE - path to the SSL client certificate (required)
RABBITMQ_SSL_CLIENT_KEY - path to the SSL client key (required)
RABBITMQ_SSL_CA_FILE - path to the SSL CA file for verification (optional)
RABBITMQ_SSL_KEY_PASSWORD - SSL private key password (optional)
Details on how to enable RabbitMQ TLS support can be found here:
https://www.rabbitmq.com/ssl.html#enabling-tls
Args:
rabbitmq_host: RabbitMQ hostname
Returns:
Pika SSL context of type `pika.SSLOptions` if
the RABBITMQ_SSL_CLIENT_CERTIFICATE and RABBITMQ_SSL_CLIENT_KEY
environment variables are valid paths, else `None`.
"""
client_certificate_path = os.environ.get("RABBITMQ_SSL_CLIENT_CERTIFICATE")
client_key_path = os.environ.get("RABBITMQ_SSL_CLIENT_KEY")
if client_certificate_path and client_key_path:
import pika
import rasa.server
logger.debug(f"Configuring SSL context for RabbitMQ host '{rabbitmq_host}'.")
ca_file_path = os.environ.get("RABBITMQ_SSL_CA_FILE")
key_password = os.environ.get("RABBITMQ_SSL_KEY_PASSWORD")
ssl_context = rasa.server.create_ssl_context(
client_certificate_path, client_key_path, ca_file_path, key_password
)
return pika.SSLOptions(ssl_context, rabbitmq_host)
else:
return None
```
#### File: training_data/converters/nlu_markdown_to_yaml_converter.py
```python
from pathlib import Path
from typing import Dict, Text, Any
from rasa.cli.utils import print_success
from rasa.nlu.utils.pattern_utils import read_lookup_table_file
from rasa.shared.nlu.training_data.formats import MarkdownReader
from rasa.shared.nlu.training_data.formats.rasa_yaml import RasaYAMLWriter
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.utils.converter import TrainingDataConverter
class NLUMarkdownToYamlConverter(TrainingDataConverter):
@classmethod
def filter(cls, source_path: Path) -> bool:
"""Checks if the given training data file contains NLU data in `Markdown` format
and can be converted to `YAML`.
Args:
source_path: Path to the training data file.
Returns:
`True` if the given file can be converted, `False` otherwise
"""
return MarkdownReader.is_markdown_nlu_file(source_path)
@classmethod
def convert_and_write(cls, source_path: Path, output_path: Path) -> None:
"""Converts the given training data file and saves it to the output directory.
Args:
source_path: Path to the training data file.
output_path: Path to the output directory.
"""
output_nlu_path = cls.generate_path_for_converted_training_data_file(
source_path, output_path
)
yaml_training_data = MarkdownReader().read(source_path)
RasaYAMLWriter().dump(output_nlu_path, yaml_training_data)
for lookup_table in yaml_training_data.lookup_tables:
cls._write_nlu_lookup_table_yaml(lookup_table, output_path)
print_success(f"Converted NLU file: '{source_path}' >> '{output_nlu_path}'.")
@classmethod
def _write_nlu_lookup_table_yaml(
cls, lookup_table: Dict[Text, Any], output_dir_path: Path
) -> None:
"""Converts and writes lookup tables examples from `txt` to `YAML` format.
Args:
lookup_table: Lookup tables items.
output_dir_path: Path to the target output directory.
"""
lookup_table_file = lookup_table.get("elements")
if not lookup_table_file or not isinstance(lookup_table_file, str):
return
examples_from_file = read_lookup_table_file(lookup_table_file)
target_filename = cls.generate_path_for_converted_training_data_file(
Path(lookup_table_file), output_dir_path
)
entity_name = Path(lookup_table_file).stem
RasaYAMLWriter().dump(
target_filename,
TrainingData(
lookup_tables=[{"name": entity_name, "elements": examples_from_file}]
),
)
```
#### File: shared/utils/test_common.py
```python
import rasa.shared.utils.common
def test_all_subclasses():
class TestClass:
pass
subclasses = [type(f"Subclass{i}", (TestClass,), {}) for i in range(10)]
sub_subclasses = [
type(f"Sub-subclass_{subclass.__name__}", (subclass,), {})
for subclass in subclasses
]
expected = subclasses + sub_subclasses
assert rasa.shared.utils.common.all_subclasses(TestClass) == expected
def test_sort_dicts_by_keys():
test_data = [{"Z": 1}, {"A": 10}]
expected = [{"A": 10}, {"Z": 1}]
actual = rasa.shared.utils.common.sort_list_of_dicts_by_first_key(test_data)
assert actual == expected
``` |
{
"source": "JoeriHermans/amortized-experimental-design",
"score": 2
} |
#### File: JoeriHermans/amortized-experimental-design/ratio_estimation.py
```python
import hypothesis as h
import torch
import numpy as np
from hypothesis.nn import build_ratio_estimator
from hypothesis.nn.ratio_estimation import BaseRatioEstimator
from hypothesis.util.data import NamedDataset
from hypothesis.util.data import NumpyDataset
from torch.utils.data import TensorDataset
class RatioEstimator(BaseRatioEstimator):
def __init__(self, denominator):
random_variables = {
"configs": (1,),
"inputs": (1,),
"outputs": (1,)}
Class = build_ratio_estimator("mlp", random_variables, denominator=denominator)
activation = torch.nn.SELU
trunk = [128] * 3
r = Class(activation=activation, trunk=trunk)
super(RatioEstimator, self).__init__(r=r)
self._r = r
def log_ratio(self, configs, inputs, outputs, **kwargs):
configs = configs - 45
configs = configs / 5
return self._r.log_ratio(configs=configs, inputs=inputs, outputs=outputs)
class RatioEstimatorTypeI(RatioEstimator):
def __init__(self):
denominator = "inputs|outputs,configs"
super(RatioEstimatorTypeI, self).__init__(denominator)
class RatioEstimatorTypeII(RatioEstimator):
def __init__(self):
denominator = "inputs,outputs|configs"
super(RatioEstimatorTypeII, self).__init__(denominator)
class DatasetTrain(NamedDataset):
def __init__(self):
inputs = np.load("data/train/inputs.npy")
configs = np.load("data/train/configs.npy")
outputs = np.load("data/train/outputs.npy")
inputs = TensorDataset(torch.from_numpy(inputs))
configs = TensorDataset(torch.from_numpy(configs))
outputs = TensorDataset(torch.from_numpy(outputs))
super(DatasetTrain, self).__init__(
configs=configs,
inputs=inputs,
outputs=outputs)
class DatasetTest(NamedDataset):
def __init__(self):
inputs = np.load("data/test/inputs.npy")
configs = np.load("data/test/configs.npy")
outputs = np.load("data/test/outputs.npy")
inputs = TensorDataset(torch.from_numpy(inputs))
configs = TensorDataset(torch.from_numpy(configs))
outputs = TensorDataset(torch.from_numpy(outputs))
super(DatasetTest, self).__init__(
configs=configs,
inputs=inputs,
outputs=outputs)
``` |
{
"source": "JoeriHermans/awflow",
"score": 2
} |
#### File: awflow/examples/pi.py
```python
import argparse
import awflow
import glob
import sys
import numpy as np
import os
from awflow import after, ensure, job, schedule
# Prepare argument parser
parser = argparse.ArgumentParser('awflow π demo.')
parser.add_argument('--backend', type=str, default='local', help='Compute backend (default: local).')
parser.add_argument('--partition', type=str, default=None, help='Partition to deploy the jobs on and can only be specified through the Slurm backend (default: none).')
arguments, _ = parser.parse_known_args()
## BEGIN Workflow definition ###################################################
# Workflow parameters
n = 10000
tasks = 25
@ensure(lambda i: os.path.exists(f'pi-{i}.npy'))
@job(cpus='4', memory='4GB', array=tasks)
def estimate(i):
print(f'Executing task {i + 1} / {tasks}.')
x = np.random.random(n)
y = np.random.random(n)
pi_estimate = (x**2 + y**2 <= 1)
np.save(f'pi-{i}.npy', pi_estimate)
@after(estimate)
@ensure(lambda: len(glob.glob('pi-*.npy')) == tasks, when='before') # Check precondition before start at runtime
@ensure(lambda: os.path.exists('pi.npy')) # Postcondition
@job(cpus='4', name='merge_and_show') # Ability to overwrite job name
def merge():
files = glob.glob('pi-*.npy')
stack = np.vstack([np.load(f) for f in files])
pi_estimate = stack.sum() / (n * tasks) * 4
print('π ≅', pi_estimate)
np.save('pi.npy', pi_estimate)
# Schedule the jobs for execution
schedule(merge, backend=arguments.backend)
if arguments.backend == 'slurm':
print('Jobs have been submitted!')
``` |
{
"source": "JoeriHermans/constraining-dark-matter-with-stellar-streams-and-ml",
"score": 2
} |
#### File: experiments/experiment-inference/coverage.py
```python
import argparse
import hypothesis
import matplotlib.pyplot as plt
import numpy as np
import torch
from hypothesis.stat import highest_density_level
from util import MarginalizedAgePrior
from util import Prior
from scipy.stats import chi2
from util import load_ratio_estimator
@torch.no_grad()
def main(arguments):
# Load the ratio estimator
ratio_estimator = load_ratio_estimator(arguments.model)
# Load the densities
densities = torch.from_numpy(np.load(arguments.data + "/density-contrasts-cut-noised.npy")).float()
# Check if the non-marginalized model has been specified
resolution = arguments.resolution
if "not-marginalized" in arguments.model:
prior = Prior()
degrees_of_freedom = 2
masses = torch.from_numpy(np.load(arguments.data + "/masses.npy")).view(-1, 1).float()
ages = torch.from_numpy(np.load(arguments.data + "/ages.npy")).view(-1, 1).float()
nominals = torch.cat([masses, ages], dim=1)
masses = torch.linspace(prior.low[0], prior.high[0] - 0.01, resolution).view(-1, 1)
masses = masses.to(hypothesis.accelerator)
ages = torch.linspace(prior.low[1], prior.high[1] - 0.01, resolution).view(-1, 1)
ages = ages.to(hypothesis.accelerator)
grid_masses, grid_ages = torch.meshgrid(masses.view(-1), ages.view(-1))
inputs = torch.cat([grid_masses.reshape(-1,1), grid_ages.reshape(-1, 1)], dim=1)
else:
prior = MarginalizedAgePrior()
degrees_of_freedom = 1
# Prepare inputs
nominals = torch.from_numpy(np.load(arguments.data + "/masses.npy")).view(-1, 1).float()
masses = torch.linspace(prior.low, prior.high - 0.01, resolution).view(-1, 1)
masses = masses.to(hypothesis.accelerator)
inputs = masses
# Prepare the diagnostic
nominals = nominals.to(hypothesis.accelerator)
densities = densities.to(hypothesis.accelerator)
results = []
indices = np.random.randint(0, len(densities), size=arguments.n)
for index in indices:
# Get current density and nominal value
nominal = nominals[index].view(1, -1)
density = densities[index].view(1, -1)
# Prepare the outputs
outputs = density.repeat(len(inputs), 1)
# Check if we have to compute Bayesian credible regions
if not arguments.frequentist:
# Compute Bayesian credible region
# Compute the posterior pdf
log_ratios = ratio_estimator.log_ratio(inputs=inputs, outputs=outputs)
log_pdf = log_ratios # Uniform prior
pdf = log_pdf.exp()
norms = (inputs - nominal).norm(dim=1).cpu().numpy()
nominal_index = np.argmin(norms)
nominal_pdf = pdf[nominal_index].item()
level = highest_density_level(pdf, arguments.level, bias=arguments.bias)
if nominal_pdf >= level:
covered = True
else:
covered = False
else:
# Compute Frequentist confidence interval based on Wilks' theorem.
# Compute the maximum theta
log_ratios = ratio_estimator.log_ratio(inputs=inputs, outputs=outputs)
max_ratio = log_ratios[log_ratios.argmax()]
test_statistic = -2 * (log_ratios - max_ratio)
test_statistic -= test_statistic.min()
x = chi2.isf(1 - arguments.level, df=degrees_of_freedom)
norms = (inputs - nominal).norm(dim=1).cpu().numpy()
nominal_index = np.argmin(norms)
if test_statistic[nominal_index].item() <= x:
covered = True
else:
covered = False
results.append(covered)
# Save the results of the diagnostic.
np.save(arguments.out, results)
def parse_arguments():
parser = argparse.ArgumentParser("Emperical coverage estimation")
parser.add_argument("--bias", type=float, default=0.0, help="Bias-term to for high-density-level estimation (default: 0.0)")
parser.add_argument("--data", type=str, default=None, help="Path of the data directory (default: none).")
parser.add_argument("--frequentist", action="store_true", help="Flag to compute frequentist confidence intervals instead of Bayesian credible regions (default: false).")
parser.add_argument("--level", type=float, default=0.95, help="Credible level (default: 0.997 - 3 sigma.)")
parser.add_argument("--model", type=str, default=None, help="Will load all ratio estimators matching this path query (default: none).")
parser.add_argument("--n", type=int, default=1000, help="Number of times to repeat the experiment (default: 1000).")
parser.add_argument("--out", type=str, default=None, help="Path of the output file (default: none).")
parser.add_argument("--resolution", type=int, default=100, help="Resolution for every variable (default: 100).")
arguments, _ = parser.parse_known_args()
return arguments
if __name__ == "__main__":
arguments = parse_arguments()
main(arguments)
```
#### File: experiments/experiment-inference/diagnose-ratio.py
```python
import argparse
import hypothesis
import numpy as np
import pickle
import torch
from ratio_estimation import Classifier
from sklearn import svm, datasets
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import label_binarize
from util import load_ratio_estimator
@torch.no_grad()
def main(arguments):
marginal_samples = np.load("../experiment-simulations/data/train/density-contrasts-cut-noised.npy")
model = load_ratio_estimator(arguments.model)
result = {}
result["auc"] = []
result["fpr"] = []
result["tpr"] = []
for _ in range(arguments.repeat):
nominal, likelihood_samples = load_experiment(arguments.experiment)
reweighted_samples = reweigh_samples(marginal_samples, likelihood_samples, nominal, model, batch_size=1024)
likelihood_samples = torch.tensor(likelihood_samples)
reweighted_samples = torch.tensor(reweighted_samples)
x = torch.cat([reweighted_samples, likelihood_samples], dim=0)
n = len(likelihood_samples)
ones = torch.ones(n).view(-1, 1)
zeros = torch.zeros(n).view(-1, 1)
y = torch.cat([ones, zeros], dim=0)
x = x.numpy()
y = y.numpy()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, shuffle=True)
classifier = MLPClassifier(early_stopping=True, hidden_layer_sizes=(128, 128,))
classifier.fit(x_train, y_train.reshape(-1))
y_score = classifier.predict_proba(x_test)
fpr, tpr, _ = roc_curve(y_test, y_score[:, 1])
roc_auc = auc(fpr, tpr)
result["auc"].append(roc_auc)
result["fpr"].append(fpr)
result["tpr"].append(tpr)
# Save the results.
if arguments.out is not None:
with open(arguments.out, "wb") as fd:
pickle.dump(result, fd)
@torch.no_grad()
def reweigh_samples(marginal_samples, likelihood_samples, nominal, model, batch_size=1):
weights = np.zeros(len(marginal_samples))
inputs = torch.from_numpy(nominal).view(1, -1).float()
ins = inputs.to(hypothesis.accelerator)
inputs = ins.repeat(batch_size, 1)
index = 0
n = len(marginal_samples)
with tqdm(total=n) as pbar:
while index < n:
if (n - index) < batch_size:
batch_size = n - index
inputs = ins.repeat(batch_size, 1)
density = torch.from_numpy(marginal_samples[index:index + batch_size,:]).view(batch_size, -1).float()
density = density.to(hypothesis.accelerator)
weight = model.log_ratio(inputs=inputs, outputs=density).exp().view(-1).cpu().numpy()
weights[index:index + batch_size] = weight
index += batch_size
pbar.update(batch_size)
weights /= np.sum(weights)
sampled_indices = np.random.choice(np.arange(len(weights)), size=len(likelihood_samples), replace=False, p=weights)
reweighted_samples = []
for index in sampled_indices:
reweighted_samples.append(marginal_samples[index].reshape(1, -1))
reweighted_samples = np.vstack(reweighted_samples).astype(np.float32)
return reweighted_samples
def load_experiment(index):
suffix = str(index).zfill(5)
base = "../experiment-simulations/data/nominal/block-" + suffix
likelihood_samples = np.load(base + "/density-contrasts-cut-noised.npy").astype(np.float32)
nominal = np.array([np.load(base + "/masses.npy")[0]]).reshape(1, -1).astype(np.float32)
return nominal, likelihood_samples
def parse_arguments():
parser = argparse.ArgumentParser("Quality of the ratio approximation")
parser.add_argument("--experiment", type=int, default=0, help="Experiment index (default: 0).")
parser.add_argument("--model", type=str, default=None, help="Query path to the model weights (default: none).")
parser.add_argument("--out", type=str, default=None, help="Path of the output file (default: none).")
parser.add_argument("--repeat", type=int, default=10, help="Repitition of the training and subsampling of the data (default: 10).")
arguments, _ = parser.parse_known_args()
return arguments
if __name__ == "__main__":
arguments = parse_arguments()
main(arguments)
```
#### File: experiments/experiment-inference/ratio_estimation.py
```python
import hypothesis
import torch
from hypothesis.auto.training import LikelihoodToEvidenceRatioEstimatorTrainer as Trainer
from hypothesis.nn import MultiLayeredPerceptron as MLP
from hypothesis.nn import ResNetHead
from hypothesis.nn.amortized_ratio_estimation import BaseLikelihoodToEvidenceRatioEstimator
from hypothesis.nn.amortized_ratio_estimation import BaseRatioEstimator
from hypothesis.nn.amortized_ratio_estimation import LikelihoodToEvidenceRatioEstimatorMLP
from hypothesis.nn.util import compute_dimensionality
### Defaults ###################################################################
activation = torch.nn.SELU
batchnorm = True
resnet_depth = 101
dropout = float(0.0)
shape_inputs = (1,)
shape_outputs = (62,) # 39 for smaller range, 62 for wide.
trunk = [512] * 3
### Models #####################################################################
class RatioEstimator(BaseRatioEstimator):
def __init__(self,
activation=activation,
batchnorm=True,
depth=resnet_depth,
dim_inputs=1,
dropout=dropout,
normalize_inputs=False):
super(RatioEstimator, self).__init__()
shape = shape_outputs
self.output_elements = shape[0]
# Create the ResNet head
self.head = ResNetHead(
depth=depth,
activation=activation,
batchnorm=batchnorm,
dilate=True,
channels=1,
shape_xs=shape_outputs)
self.dimensionality = self.head.embedding_dimensionality()
# Ratio estimator trunk
dimensionality = self.dimensionality + dim_inputs
self.trunk = MLP(
activation=activation,
dropout=dropout,
layers=trunk,
shape_xs=(dimensionality,),
shape_ys=(1,),
transform_output=None)
if normalize_inputs:
raise NotImplementedError
else:
self._normalizer = self._normalize_identity
def _normalize_identity(self, inputs):
return inputs
def forward(self, inputs, outputs):
log_ratios = self.log_ratio(inputs=inputs, outputs=outputs)
return log_ratios.sigmoid(), log_ratios
def log_ratio(self, inputs, outputs):
inputs = self._normalizer(inputs)
outputs = outputs.view(-1, 1, self.output_elements) # Reshape outputs
z = self.head(outputs).view(-1, self.dimensionality)
features = torch.cat([inputs, z], dim=1)
return self.trunk(features)
class LSTMRatioEstimator(BaseRatioEstimator):
def __init__(self,
activation=activation,
dim_inputs=1,
dropout=dropout,
hidden_size=128,
normalize_inputs=False):
super(LSTMRatioEstimator, self).__init__()
# MLP to initialize the hidden and cell state of the LSTM based on the inputs.
self.conditioner_mlp_hidden = MLP(
activation=activation,
dropout=dropout,
layers=[hidden_size * 2],
shape_xs=(dim_inputs,),
shape_ys=(hidden_size,),
transform_output=None)
self.conditioner_mlp_cell = MLP(
activation=activation,
dropout=dropout,
layers=[hidden_size * 2],
shape_xs=(dim_inputs,),
shape_ys=(hidden_size,),
transform_output=None)
# MLP which transforms the final hidden state into a log-ratio.
self.mlp = MLP(
activation=activation,
dropout=dropout,
layers=[hidden_size * 2] * 3,
shape_xs=(hidden_size + dim_inputs,),
shape_ys=(1,),
transform_output=None)
# LSTM which compresses the inputs and outputs to a latent code.
self.num_stacked = 1
self.lstm = torch.nn.LSTM(
input_size=1, # Expected features of the observable
hidden_size=hidden_size, # Hidden state
num_layers = self.num_stacked,
batch_first=True,
dropout=dropout)
# Check if the inputs have to be normalized.
if normalize_inputs:
raise NotImplementedError
else:
self._normalizer = self._normalize_identity
def _normalize_identity(self, inputs):
return inputs
def log_ratio(self, inputs, outputs):
inputs = self._normalizer(inputs)
outputs = outputs.view(-1, shape_outputs[0], 1)
# Initialize the hidden state
initial_hidden_state = self.conditioner_mlp_hidden(inputs).unsqueeze(0).repeat(self.num_stacked, 1, 1)
initial_cell_state = self.conditioner_mlp_cell(inputs).unsqueeze(0).repeat(self.num_stacked, 1, 1)
h, _ = self.lstm(outputs, (initial_hidden_state, initial_cell_state))
h = h[:, 1, :]
z = torch.cat([h, inputs], dim=1)
log_ratios = self.mlp(z)
return log_ratios
class MLPRatioEstimator(LikelihoodToEvidenceRatioEstimatorMLP):
def __init__(self,
activation=activation,
batchnorm=batchnorm,
dropout=dropout,
dim_inputs=1,
normalize_inputs=True):
super(MLPRatioEstimator, self).__init__(
shape_inputs=(dim_inputs,),
shape_outputs=shape_outputs,
activation=activation,
layers=trunk,
dropout=dropout)
dim = dim_inputs + shape_outputs[0]
if normalize_inputs:
self.bn_inputs = torch.nn.BatchNorm1d(dim_inputs)
if batchnorm:
self.bn_outputs = torch.nn.BatchNorm1d(shape_outputs[0])
self.batchnorm = batchnorm
self.normalize_inputs = normalize_inputs
def log_ratio(self, inputs, outputs):
if self.normalize_inputs:
inputs = self.bn_inputs(inputs)
if self.batchnorm:
outputs = self.bn_outputs(outputs)
return super().log_ratio(inputs=inputs, outputs=outputs)
class SingleRatioEstimator(RatioEstimator):
def __init__(self,
activation=activation,
batchnorm=batchnorm,
depth=resnet_depth,
dropout=dropout):
super(SingleRatioEstimator, self).__init__(
activation=activation,
batchnorm=batchnorm,
depth=depth,
dim_inputs=1,
dropout=dropout)
class DoubleRatioEstimator(RatioEstimator):
def __init__(self,
activation=activation,
batchnorm=batchnorm,
depth=resnet_depth,
dropout=dropout):
super(DoubleRatioEstimator, self).__init__(
activation=activation,
batchnorm=batchnorm,
depth=depth,
dim_inputs=2,
dropout=dropout)
class Classifier(torch.nn.Module):
def __init__(self, activation=activation,
batchnorm=batchnorm,
depth=resnet_depth,
dropout=dropout):
super(Classifier, self).__init__()
# Create the ResNet head
self.head = ResNetHead(
depth=depth,
activation=activation,
batchnorm=batchnorm,
dilate=False,
channels=1,
shape_xs=shape_outputs)
self.dimensionality = self.head.embedding_dimensionality()
# Ratio estimator trunk
dimensionality = self.dimensionality
self.trunk = MLP(
activation=activation,
dropout=dropout,
layers=trunk,
shape_xs=(dimensionality,),
shape_ys=(1,),
transform_output=None)
def forward(self, x):
x = x.view(-1, 1, 39)
z = self.head(x).view(-1, self.dimensionality)
z = self.trunk(z)
return z.sigmoid()
```
#### File: experiments/experiment-simulations/impacts.py
```python
import argparse
import numpy as np
def main(arguments):
impacts = np.load(arguments.input)
has_impacts = impacts > 0
np.save(arguments.output, has_impacts)
def parse_arguments():
parser = argparse.ArgumentParser("Impacts preparation")
parser.add_argument("--input", type=str, default=None, help="Path to the inputs file (default: none)")
parser.add_argument("--output", type=str, default=None, help="Path to the outputs file (default: none)")
arguments, _ = parser.parse_known_args()
return arguments
if __name__ == "__main__":
arguments = parse_arguments()
main(arguments)
```
#### File: experiments/experiment-simulations/simulators.py
```python
import galpy
import gd1_util
import glob
import hypothesis
import numpy as np
import os
import pickle
import torch
import torch.multiprocessing as multiprocessing
from galpy.util import bovy_conversion
from hypothesis.simulation import Simulator as BaseSimulator
from util import compute_obs_density
from util import compute_obs_density_no_interpolation
from util import lb_to_phi12
from util import simulate_subhalos_mwdm
class GD1StreamSimulator(BaseSimulator):
def __init__(self, hernquist_profile=True, max_subhalo_impacts=64):
super(GD1StreamSimulator, self).__init__()
self.hernquist_profile = hernquist_profile
self.isob = 0.45
self.chunk_size = 64
self.length_factor = float(1)
self.max_subhalo_impacts = int(max_subhalo_impacts)
self.new_orb_lb = [188.04928416766532, 51.848594007807456, 7.559027173643999, 12.260258757214746, -5.140630283489461, 7.162732847549563]
def _compute_impact_times(self, age):
impact_times = []
time_in_gyr = bovy_conversion.time_in_Gyr(vo=float(220), ro=float(8))
for time in np.arange(1, self.max_subhalo_impacts + 1) / (self.max_subhalo_impacts):
impact_times.append(time / time_in_gyr)
return impact_times
def _simulate_stream(self, age):
sigv_age = (0.3 * 3.2) / age
impacts = self._compute_impact_times(age)
# Smooth stream.
# stream_smooth_leading = gd1_util.setup_gd1model(age=age,
# isob=self.isob,
# leading=True,
# new_orb_lb=self.new_orb_lb,
# sigv=sigv_age)
stream_smooth_trailing = gd1_util.setup_gd1model(age=age,
isob=self.isob,
leading=False,
new_orb_lb=self.new_orb_lb,
sigv=sigv_age)
# Leading stream.
# stream_leading = gd1_util.setup_gd1model(age=age,
# hernquist=self.hernquist_profile,
# isob=self.isob,
# leading=True,
# length_factor=self.length_factor,
# new_orb_lb=self.new_orb_lb,
# sigv=sigv_age,
# timpact=impacts)
# Trailing stream.
stream_trailing = gd1_util.setup_gd1model(age=age,
hernquist=self.hernquist_profile,
isob=self.isob,
leading=False,
length_factor=self.length_factor,
new_orb_lb=self.new_orb_lb,
sigv=sigv_age,
timpact=impacts)
# return stream_smooth_leading, stream_smooth_trailing, stream_leading, stream_trailing
return None, stream_smooth_trailing, None, stream_trailing
def forward(self, inputs):
outputs = []
for input in inputs:
success = False
while not success:
try:
outputs.append(self._simulate_stream(input.item()))
success = True
except:
pass
return outputs
class WDMSubhaloSimulator(BaseSimulator):
def __init__(self, streams, resolution=0.01, record_impacts=False, allow_no_impacts=True):
super(WDMSubhaloSimulator, self).__init__()
# Simulator parameters.
self.record_impacts = record_impacts
self.allow_no_impacts = allow_no_impacts # True -> allow observations with no impacts.
# Subhalo parameters.
self.Xrs = float(5)
self.ravg = float(20)
# Streams.
self.streams = streams
# Precomputed smooth stream properties.
self.apars = np.arange(0.01, 1., resolution)
self.dens_unp_leading = None
self.omega_unp_leading = None
self.dens_unp_trailing = None
self.omega_unp_trailing = None
self._precompute_smooth_stream_properties()
def _precompute_smooth_stream_properties(self):
# self._precompute_smooth_stream_leading()
self._precompute_smooth_stream_trailing()
def _precompute_smooth_stream_leading(self):
stream_smooth = self.streams[0]
self.dens_unp_leading = [stream_smooth._density_par(a) for a in self.apars]
self.omega_unp_leading = [stream_smooth.meanOmega(a, oned=True) for a in self.apars]
def _precompute_smooth_stream_trailing(self):
stream_smooth = self.streams[1]
self.dens_unp_trailing = [stream_smooth._density_par(a) for a in self.apars]
self.omega_unp_trailing = [stream_smooth.meanOmega(a, oned=True) for a in self.apars]
def _simulate_observation(self, wdm_mass, leading):
if leading:
return self._simulate_observation_leading(wdm_mass)
else:
return self._simulate_observation_trailing(wdm_mass)
def _simulate_observation_leading(self, wdm_mass):
success = False
stream = self.streams[2] # Leading peppered stream
dens_unp = self.dens_unp_leading
omega_unp = self.omega_unp_leading
while not success: # This is done to cover errors in the subhalo simulation code.
try:
output = self._simulate(wdm_mass, stream, dens_unp, omega_unp)
success = True
except Exception as e:
print(e)
return output
def _simulate_observation_trailing(self, wdm_mass):
success = False
stream = self.streams[3] # Trailing peppered stream
dens_unp = self.dens_unp_trailing
omega_unp = self.omega_unp_trailing
while not success:
try:
output = self._simulate(wdm_mass, stream, dens_unp, omega_unp)
success = True
except Exception as e:
print(e)
return output
def _simulate(self, wdm_mass, stream, dens_unp, omega_unp):
outputs = simulate_subhalos_mwdm(stream, m_wdm=wdm_mass, r=self.ravg, Xrs=self.Xrs)
impact_angles = outputs[0]
impactbs = outputs[1]
subhalovels = outputs[2]
timpacts = outputs[3]
GMs = outputs[4]
rss = outputs[5]
# Check if subhalo hits were detected.
num_impacts = len(GMs)
has_impacts = num_impacts > 0
if not self.allow_no_impacts and not has_impacts:
raise ValueError("Only observations with impacts are allowed!")
if has_impacts:
stream.set_impacts(impactb=impactbs, subhalovel=subhalovels,
impact_angle=impact_angles, timpact=timpacts, rs=rss, GM=GMs)
densOmega = np.array([stream._densityAndOmega_par_approx(a) for a in self.apars]).T
mO = densOmega[1]
else:
mO = omega_unp
mT = stream.meanTrack(self.apars, _mO=mO, coord="lb")
phi = lb_to_phi12(mT[0], mT[1], degree=True)[:, 0]
phi[phi > 180] -= 360
if has_impacts:
density = compute_obs_density(phi, self.apars, densOmega[0], densOmega[1])
else:
density = compute_obs_density(phi, self.apars, dens_unp, omega_unp)
# Check if the computed density has a nan-value.
if np.isnan(density).sum() > 0:
raise ValueError("nan values have been computed.")
# Check if the impacts need to be recorded.
if self.record_impacts:
output = num_impacts, phi, density
else:
output = phi, density
return output
def _simulate_observations(self, wdm_mass):
#leading = self._simulate_observation(wdm_mass, leading=True)
trailing = self._simulate_observation(wdm_mass, leading=False)
#output = leading, trailing
#return output
return trailing
def forward(self, inputs):
outputs = []
inputs = inputs.view(-1, 1)
for input in inputs:
observation = self._simulate_observations(input.item())
outputs.append(observation)
return outputs
class PresimulatedStreamsWDMSubhaloSimulator(WDMSubhaloSimulator):
def __init__(self, datadir, stream_model_index, resolution=0.01):
streams = self._load_streams(datadir, stream_model_index)
super(PresimulatedStreamsWDMSubhaloSimulator, self).__init__(
streams, resolution=resolution)
def _load_streams(self, datadir, index):
# Stream models metadata.
stream_models_path_query = datadir + "/stream-models/streams-*"
directories = glob.glob(stream_models_path_query)
directories.sort()
stream_directories = directories
stream_blocks = len(directories)
streams_per_block = len(np.load(stream_directories[0] + "/inputs.npy"))
streams_total = stream_blocks * streams_per_block
# Load the streams.
block_index = int(index / streams_per_block)
stream_index_in_block = index % streams_per_block
stream_directory = stream_directories[block_index]
path = stream_directory + "/outputs.pickle"
with open(path, "rb") as f:
streams = pickle.load(f)[stream_index_in_block]
return streams
```
#### File: experiments/experiment-simulations/util.py
```python
import MWPotential2014Likelihood
import astropy.units as u
import gd1_util
import hypothesis
import numpy
import numpy as np
import numpy as np
import pickle
import torch
from galpy.orbit import Orbit
from galpy.potential import MWPotential2014, turn_physical_off, vcirc
from galpy.util import bovy_conversion, bovy_coords, save_pickles, bovy_plot
from gd1_util_MWhaloshape import lb_to_phi12
from scipy import integrate, interpolate
from scipy.integrate import quad
from torch.distributions.uniform import Uniform
def allocate_prior_stream_age():
lower = torch.tensor(3).float().to(hypothesis.accelerator)
upper = torch.tensor(7).float().to(hypothesis.accelerator)
return Uniform(lower, upper)
def allocate_prior_wdm_mass():
lower = torch.tensor(1).float().to(hypothesis.accelerator)
upper = torch.tensor(50).float().to(hypothesis.accelerator)
return Uniform(lower, upper)
def load_observed_gd1(path, phi, degree=1):
data = np.genfromtxt(path, names=True)
phi_max = max(phi) + 5 # For stability in fitting the splines
phi_min = min(phi) - 5 # For stability in fitting the splines
phi_data = data["phi1mid"]
if phi_min < min(phi_data) or phi_max > max(phi_data):
raise ValueError("Angles not supported by observation.")
indices = (phi_data <= phi_max) & (phi_data >= phi_min)
phi_data = phi_data[indices]
linear_density = data["lindens"][indices]
error = data["e_lindens"][indices]
trend = np.polyfit(phi_data, linear_density, deg=degree)
fitted = np.poly1d(trend)(phi_data)
error /= fitted
linear_density /= fitted
# Fit a spline and extract the requested values
l = np.array(linear_density)
fit_density = interpolate.InterpolatedUnivariateSpline(phi_data, linear_density)
fit_error = interpolate.InterpolatedUnivariateSpline(phi_data, error)
linear_density = fit_density(phi)
trend = np.polyfit(phi, linear_density, deg=degree)
fitted = np.poly1d(trend)(phi)
linear_density /= fitted
error = fit_error(phi)
error /= fitted
return linear_density, error
h=0.6774
ro=8.
vo=220.
def parse_times(times,age):
if 'sampling' in times:
nsam= int(times.split('sampling')[0])
return [float(ti)/bovy_conversion.time_in_Gyr(vo,ro)
for ti in np.arange(1,nsam+1)/(nsam+1.)*age]
return [float(ti)/bovy_conversion.time_in_Gyr(vo,ro)
for ti in times.split(',')]
def parse_mass(mass):
return [float(m) for m in mass.split(',')]
def nsubhalo(m):
return 0.3*(10.**6.5/m)
def rs(m,plummer=False,rsfac=1.):
if plummer:
#print ('Plummer')
return 1.62*rsfac/ro*(m/10.**8.)**0.5
else:
return 1.05*rsfac/ro*(m/10.**8.)**0.5
def alpha(m_wdm):
return (0.048/h)*(m_wdm)**(-1.11) #in Mpc , m_wdm in keV
def lambda_hm(m_wdm):
nu=1.12
return 2*numpy.pi*alpha(m_wdm)/(2**(nu/5.) - 1.)**(1/(2*nu))
def M_hm(m_wdm):
Om_m=0.3089
rho_c=1.27*10**11 #Msun/Mpc^3
rho_bar=Om_m*rho_c
return (4*numpy.pi/3)*rho_bar*(lambda_hm(m_wdm)/2.)**3
def Einasto(r):
al=0.678 #alpha_shape
rm2=199 #kpc, see Erkal et al 1606.04946 for scaling to M^1/3
return numpy.exp((-2./al)*((r/rm2)**al -1.))*4*numpy.pi*(r**2)
def dndM_cdm(M,c0kpc=2.02*10**(-13),mf_slope=-1.9):
#c0kpc=2.02*10**(-13) #Msun^-1 kpc^-3 from Denis' paper
m0=2.52*10**7 #Msun from Denis' paper
return c0kpc*((M/m0)**mf_slope)
def fac(M,m_wdm):
beta=-0.99
gamma=2.7
return (1.+gamma*(M_hm(m_wdm)/M))**beta
def dndM_wdm(M,m_wdm,c0kpc=2.02*10**(-13),mf_slope=-1.9):
return fac(M,m_wdm)*dndM_cdm(M,c0kpc=c0kpc,mf_slope=mf_slope)
def nsub_cdm(M1,M2,r=20.,c0kpc=2.02*10**(-13),mf_slope=-1.9):
#number density of subhalos in kpc^-3
m1=10**(M1)
m2=10**(M2)
return integrate.quad(dndM_cdm,m1,m2,args=(c0kpc,mf_slope))[0]*integrate.quad(Einasto,0.,r)[0]*(8.**3.)/(4*numpy.pi*(r**3)/3) #in Galpy units
def nsub_wdm(M1,M2,m_wdm,r=20.,c0kpc=2.02*10**(-13),mf_slope=-1.9):
m1=10**(M1)
m2=10**(M2)
return integrate.quad(dndM_wdm,m1,m2,args=(m_wdm,c0kpc,mf_slope))[0]*integrate.quad(Einasto,0.,r)[0]*(8.**3)/(4*numpy.pi*(r**3)/3) #in Galpy units
def simulate_subhalos_mwdm(sdf_pepper,m_wdm,mf_slope=-1.9,c0kpc=2.02*10**(-13),r=20.,Xrs=5.,sigma=120./220.):
Mbin_edge=[5.,6.,7.,8.,9.]
Nbins=len(Mbin_edge)-1
#compute number of subhalos in each mass bin
nden_bin=np.empty(Nbins)
rate_bin=np.empty(Nbins)
for ll in range(Nbins):
nden_bin[ll]=nsub_wdm(Mbin_edge[ll],Mbin_edge[ll+1],m_wdm=m_wdm,r=r,c0kpc=c0kpc,mf_slope=mf_slope)
Mmid=10**(0.5*(Mbin_edge[ll]+Mbin_edge[ll+1]))
rate_bin[ll]=sdf_pepper.subhalo_encounters(sigma=sigma,nsubhalo=nden_bin[ll],bmax=Xrs*rs(Mmid,plummer=True))
rate = np.sum(rate_bin)
Nimpact= numpy.random.poisson(rate)
norm= 1./quad(lambda M : fac(M,m_wdm)*((M)**(mf_slope +0.5)),10**(Mbin_edge[0]),10**(Mbin_edge[Nbins]))[0]
def cdf(M):
return quad(lambda M : norm*fac(M,m_wdm)*(M)**(mf_slope +0.5),10**Mbin_edge[0],M)[0]
MM=numpy.linspace(Mbin_edge[0],Mbin_edge[Nbins],10000)
cdfl=[cdf(i) for i in 10**MM]
icdf= interpolate.InterpolatedUnivariateSpline(cdfl,10**MM,k=1)
timpact_sub= numpy.array(sdf_pepper._uniq_timpact)[numpy.random.choice(len(sdf_pepper._uniq_timpact),size=Nimpact,
p=sdf_pepper._ptimpact)]
# Sample angles from the part of the stream that existed then
impact_angle_sub= numpy.array([sdf_pepper._icdf_stream_len[ti](numpy.random.uniform())
for ti in timpact_sub])
sample_GM=lambda: icdf(numpy.random.uniform())/bovy_conversion.mass_in_msol(vo,ro)
GM_sub= numpy.array([sample_GM() for a in impact_angle_sub])
rs_sub= numpy.array([rs(gm*bovy_conversion.mass_in_msol(vo,ro)) for gm in GM_sub])
# impact b
impactb_sub= (2.*numpy.random.uniform(size=len(impact_angle_sub))-1.)*Xrs*rs_sub
# velocity
subhalovel_sub= numpy.empty((len(impact_angle_sub),3))
for ii in range(len(timpact_sub)):
subhalovel_sub[ii]=sdf_pepper._draw_impact_velocities(timpact_sub[ii],sigma,impact_angle_sub[ii],n=1)[0]
# Flip angle sign if necessary
if not sdf_pepper._gap_leading: impact_angle_sub*= -1.
return impact_angle_sub,impactb_sub,subhalovel_sub,timpact_sub,GM_sub,rs_sub
def compute_obs_density_no_interpolation(phi1, apars, dens_apar):
apar_edge=[]
phi1_edge=[]
abw0=apars[1]-apars[0]
apar_edge.append(apars[0]-(abw0/2.))
phi1bw0=phi1[1]-phi1[0]
phi1_edge.append(phi1[0]-(phi1bw0/2.))
for ii in range(len(apars)-1):
abw=apars[ii+1]-apars[ii]
phi1bw=phi1[ii+1]-phi1[ii]
apar_edge.append(apars[ii]+abw/2.)
phi1_edge.append(phi1[ii]+phi1bw/2.)
abw_last=apars[len(apars)-1]-apars[len(apars)-2]
apar_edge.append(apars[len(apars)-1]+(abw_last/2.))
phi1bw_last=phi1[len(phi1)-1]-phi1[len(phi1)-2]
phi1_edge.append(phi1[len(phi1)-1]+(phi1bw_last/2.))
#compute the Jacobian d(apar)/d(phi1) using finite difference method
dapar_dphi1=np.fabs(numpy.diff(apar_edge)/numpy.diff(phi1_edge))
density = dens_apar * dapar_dphi1
return density
def compute_obs_density(phi1,apars,dens_apar,Omega):
apar_edge=[]
phi1_edge=[]
abw0=apars[1]-apars[0]
apar_edge.append(apars[0]-(abw0/2.))
phi1bw0=phi1[1]-phi1[0]
phi1_edge.append(phi1[0]-(phi1bw0/2.))
for ii in range(len(apars)-1):
abw=apars[ii+1]-apars[ii]
phi1bw=phi1[ii+1]-phi1[ii]
apar_edge.append(apars[ii]+abw/2.)
phi1_edge.append(phi1[ii]+phi1bw/2.)
abw_last=apars[len(apars)-1]-apars[len(apars)-2]
apar_edge.append(apars[len(apars)-1]+(abw_last/2.))
phi1bw_last=phi1[len(phi1)-1]-phi1[len(phi1)-2]
phi1_edge.append(phi1[len(phi1)-1]+(phi1bw_last/2.))
#compute the Jacobian d(apar)/d(phi1) using finite difference method
dapar_dphi1=np.fabs(numpy.diff(apar_edge)/numpy.diff(phi1_edge))
#print (dapar_dphi1)
#Interpolate dens(apar)
ipdens_apar= interpolate.InterpolatedUnivariateSpline(apars,dens_apar)
#Interpolate apar(phi1)
if phi1[1] < phi1[0] : # ad-hoc way of checking whether increasing or decreasing
ipphi1= interpolate.InterpolatedUnivariateSpline(phi1[::-1],apars[::-1])
#Interpolate Jacobian
ipdapar_dphi1=interpolate.InterpolatedUnivariateSpline(phi1[::-1],dapar_dphi1[::-1])
#Interpolate density(phi1) by multiplying by jacobian
dens_phi1=interpolate.InterpolatedUnivariateSpline(phi1[::-1],ipdens_apar(ipphi1(phi1[::-1]))*ipdapar_dphi1(phi1[::-1]))
else :
ipphi1= interpolate.InterpolatedUnivariateSpline(phi1,apars)
#Interpolate Jacobian
ipdapar_dphi1=interpolate.InterpolatedUnivariateSpline(phi1,dapar_dphi1)
#Interpolate density(phi1) by multiplying by jacobian
dens_phi1=interpolate.InterpolatedUnivariateSpline(phi1,ipdens_apar(ipphi1(phi1))*ipdapar_dphi1(phi1))
return (dens_phi1(phi1))
```
#### File: constraining-dark-matter-with-stellar-streams-and-ml/notebooks/util.py
```python
import glob
import hypothesis
import numpy as np
import os
import requests
import torch
from hypothesis.nn.amortized_ratio_estimation import RatioEstimatorEnsemble
from ratio_estimation import DoubleRatioEstimator
from ratio_estimation import MLPRatioEstimator
from ratio_estimation import RatioEstimator
from ratio_estimation import SingleRatioEstimator
from ratio_estimation import resnet_depth
from torch.distributions.uniform import Uniform
def load(model, marginalized=True):
model = model.lower()
if marginalized:
t = "marginalized"
else:
t = "not-marginalized"
mapping = {
"all": "models/4096/" + t + "/selu/*-batchnorm-0*/best-model.th",
"all-bn": "models/4096/" + t + "/selu/*-batchnorm-1*/best-model.th",
"mlp": "models/4096/" + t + "/selu/ratio-estimator-mlp-*-batchnorm-0*/best-model.th",
"mlp-bn": "models/4096/" + t + "/selu/ratio-estimator-mlp-*-batchnorm-1*/best-model.th",
"resnet-18": "models/4096/" + t + "/selu/ratio-estimator-resnet-18-*-batchnorm-0*/best-model.th",
"resnet-18-bn": "models/4096/" + t + "/selu/ratio-estimator-resnet-18-*-batchnorm-1*/best-model.th",
"resnet-50": "models/4096/" + t + "/selu/ratio-estimator-resnet-50-*-batchnorm-0*/best-model.th",
"resnet-50-bn": "models/4096/" + t + "/selu/ratio-estimator-resnet-50-*-batchnorm-1*/best-model.th"}
if model not in mapping.keys():
raise ValueError("Unknown model!")
return load_ratio_estimator(mapping[model])
@torch.no_grad()
def MarginalizedAgePrior():
lower = torch.tensor(1).float()
lower = lower.to(hypothesis.accelerator)
upper = torch.tensor(50.01).float()
upper = upper.to(hypothesis.accelerator)
return Uniform(lower, upper)
@torch.no_grad()
def Prior():
lower = torch.tensor([1, 3]).float()
lower = lower.to(hypothesis.accelerator)
upper = torch.tensor([50.01, 7]).float()
upper = upper.to(hypothesis.accelerator)
return Uniform(lower, upper)
def load_activation(activation):
activations = {
"elu": torch.nn.ELU,
"leakyrelu": torch.nn.LeakyReLU,
"prelu": torch.nn.PReLU,
"relu": torch.nn.ReLU,
"prelu": torch.nn.PReLU,
"selu": torch.nn.SELU,
"tanh": torch.nn.Tanh}
if activation not in activations.keys():
raise ValueError("Activation", activation, "is not available.")
return activations[activation]
def load_ratio_estimator(path, normalize_inputs=False):
if '*' in path:
estimator = load_ensemble_ratio_estimator(path, normalize_inputs)
else:
estimator = load_single_ratio_estimator(path, normalize_inputs)
# Move to the default Hypothesis accelerator
estimator.to(hypothesis.accelerator)
estimator.eval()
return estimator
def load_ensemble_ratio_estimator(query, normalize_inputs=False):
paths = glob.glob(query)
estimators = []
for path in paths:
estimators.append(load_single_ratio_estimator(path, normalize_inputs))
if(len(estimators) == 0):
raise ValueError("No ratio estimators found! Verify the specified path.")
return RatioEstimatorEnsemble(estimators)
def load_single_ratio_estimator(path, normalize_inputs=False):
# Check if the path exists.
if not os.path.exists(path):
raise ValueError("Path " + path + " does not exist.")
weights = torch.load(path)
dirname = os.path.dirname(path)
segments = path.split('/')
# Check what activation to use
activation = load_activation(path.split('/')[-3])
segments = dirname.split('-')
# Extract the dropout setting
index = segments.index("dropout")
dropout = float(segments[index + 1])
# Extract the batch normalization setting
index = segments.index("batchnorm")
batchnorm = bool(int(segments[index + 1]))
# Check if it's the marginalized model.
if "not-marginalized" in path:
inputs_dim = 2
else:
inputs_dim = 1
# Extract the ResNet depth configuration
try:
index = segments.index("resnet")
depth = int(segments[index + 1])
mlp = False
except:
mlp = True
# Load the MLP
if not mlp:
# Allocate the ratio estimator
ratio_estimator = RatioEstimator(
activation=activation,
batchnorm=batchnorm,
depth=depth,
dim_inputs=inputs_dim,
dropout=dropout,
normalize_inputs=normalize_inputs)
# Backward compatibility
if "_normalizer.weight" in weights.keys():
weights["bn_inputs.weight"] = weights["_normalizer.weight"]
del weights["_normalizer.weight"]
weights["bn_inputs.bias"] = weights["_normalizer.bias"]
del weights["_normalizer.bias"]
weights["bn_inputs.running_mean"] = weights["_normalizer.running_mean"]
del weights["_normalizer.running_mean"]
weights["bn_inputs.running_var"] = weights["_normalizer.running_var"]
del weights["_normalizer.running_var"]
weights["bn_inputs.num_batches_tracked"] = weights["_normalizer.num_batches_tracked"]
del weights["_normalizer.num_batches_tracked"]
else:
ratio_estimator = MLPRatioEstimator(
activation=activation,
batchnorm=batchnorm,
dim_inputs=inputs_dim,
dropout=dropout,
normalize_inputs=normalize_inputs)
ratio_estimator.load_state_dict(weights)
ratio_estimator = ratio_estimator.eval()
return ratio_estimator
def download(id, destination="."):
r"""Adapted from:
https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
"""
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
``` |
{
"source": "JoeriHermans/hypothesis",
"score": 2
} |
#### File: bin/ratio_estimation/train.py
```python
import argparse
import hypothesis
import importlib
import numpy as np
import os
import torch
from hypothesis.auto.training import LikelihoodToEvidenceRatioEstimatorTrainer as Trainer
from hypothesis.auto.training import create_trainer
from hypothesis.nn.amortized_ratio_estimation import BaseConservativeCriterion
from hypothesis.nn.amortized_ratio_estimation import BaseCriterion
from hypothesis.nn.amortized_ratio_estimation import BaseExperimentalCriterion
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import TensorDataset
from tqdm import tqdm
def main(arguments):
# Allocate the datasets
dataset_test = allocate_dataset_test(arguments)
dataset_train = allocate_dataset_train(arguments)
# Allocate the ratio estimator
estimator = allocate_estimator(arguments)
# Check if the gradients have to be clipped.
if arguments.clip_grad != 0.0:
for p in estimator.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -arguments.clip_grad, arguments.clip_grad))
# Allocate the optimizer
optimizer = torch.optim.AdamW(
estimator.parameters(),
amsgrad=arguments.amsgrad,
lr=arguments.lr,
weight_decay=arguments.weight_decay)
# Prepare the training criterion
if arguments.conservativeness > 0.0:
criterion = BaseConservativeCriterion(
batch_size=arguments.batch_size,
beta=arguments.conservativeness,
denominator=arguments.denominator,
estimator=estimator,
logits=arguments.logits)
else:
criterion = BaseCriterion(
batch_size=arguments.batch_size,
denominator=arguments.denominator,
estimator=estimator,
logits=arguments.logits)
# Check if the experimental settings have to be activated
if arguments.experimental:
criterion = BaseExperimentalCriterion(
batch_size=arguments.batch_size,
denominator=arguments.denominator,
estimator=estimator,
logits=arguments.logits)
# Allocate the learning rate scheduler, if requested.
if arguments.lrsched:
if arguments.lrsched_every is None or arguments.lrsched_gamma is None:
lr_scheduler = ReduceLROnPlateau(optimizer, verbose=True)
else:
lr_scheduler = StepLR(optimizer, step_size=arguments.lrsched_every, gamma=arguments.lrsched_gamma)
else:
lr_scheduler = None
# Allocate the trainer
Trainer = create_trainer(criterion, arguments.denominator)
trainer = Trainer(
accelerator=hypothesis.accelerator,
batch_size=arguments.batch_size,
criterion=criterion,
dataset_test=dataset_test,
dataset_train=dataset_train,
epochs=arguments.epochs,
estimator=estimator,
lr_scheduler=lr_scheduler,
shuffle=(not arguments.dont_shuffle),
optimizer=optimizer,
workers=arguments.workers)
# Register the callbacks
if arguments.show:
# Callbacks
progress_bar = tqdm(total=arguments.epochs)
def report_test_loss(caller):
trainer = caller
current_epoch = trainer.current_epoch
test_loss = trainer.losses_test[-1]
progress_bar.set_description("Test loss %s" % test_loss)
progress_bar.update(1)
trainer.add_event_handler(trainer.events.epoch_complete, report_test_loss)
# Run the optimization procedure
summary = trainer.fit()
if arguments.show:
# Cleanup the progress bar
progress_bar.close()
print(summary)
if arguments.out is None:
return # No output directory has been specified, exit.
# Create the directory if it does not exist.
if not os.path.exists(arguments.out):
os.mkdir(arguments.out)
best_model_weights = summary.best_model()
final_model_weights = summary.final_model()
train_losses = summary.train_losses()
test_losses = summary.test_losses()
# Save the results.
np.save(arguments.out + "/losses-train.npy", train_losses)
np.save(arguments.out + "/losses-test.npy", test_losses)
torch.save(best_model_weights, arguments.out + "/best-model.th")
torch.save(final_model_weights, arguments.out + "/model.th")
summary.save(arguments.out + "/result.summary")
@torch.no_grad()
def allocate_dataset_train(arguments):
return load_class(arguments.data_train)()
@torch.no_grad()
def allocate_dataset_test(arguments):
if arguments.data_test is not None:
dataset = load_class(arguments.data_test)()
else:
dataset = None
return dataset
@torch.no_grad()
def allocate_estimator(arguments):
estimator = load_class(arguments.estimator)()
# Check if we are able to allocate a data parallel model.
if torch.cuda.device_count() > 1 and arguments.data_parallel:
estimator = torch.nn.DataParallel(estimator)
estimator = estimator.to(hypothesis.accelerator)
return estimator
def load_class(full_classname):
if full_classname is None:
raise ValueError("The specified classname cannot be `None`.")
module_name, class_name = full_classname.rsplit('.', 1)
module = __import__(module_name, fromlist=[class_name])
return getattr(module, class_name)
def parse_arguments():
parser = argparse.ArgumentParser("Amortised Approximate Ratio Estimator training")
# General settings
parser.add_argument("--data-parallel", action="store_true", help="Enable data-parallel training if multiple GPU's are available (default: false).")
parser.add_argument("--disable-gpu", action="store_true", help="Disable the usage of the GPU, not recommended. (default: false).")
parser.add_argument("--out", type=str, default=None, help="Output directory (default: none).")
parser.add_argument("--show", action="store_true", help="Show the progress and the final result (default: false).")
parser.add_argument("--dont-shuffle", action="store_true", help="Disables shuffling of the batch loader (default: false).")
parser.add_argument("--denominator", type=str, default="inputs|outputs", help="Random variables in the denominator and their (in)dependence relation (default: 'inputs|outputs').")
# Optimization settings
parser.add_argument("--amsgrad", action="store_true", help="Use AMSGRAD version of Adam (default: false).")
parser.add_argument("--batch-size", type=int, default=64, help="Batch size (default: 64).")
parser.add_argument("--conservativeness", type=float, default=0.0, help="Conservative term (default: 0.0).")
parser.add_argument("--clip-grad", type=float, default=0.0, help="Value to clip the gradients with (default: 0.0 or no clipping).")
parser.add_argument("--epochs", type=int, default=1, help="Number of epochs (default: 1).")
parser.add_argument("--logits", action="store_true", help="Use the logit-trick for the minimization criterion (default: false).")
parser.add_argument("--lr", type=float, default=0.001, help="Learning rate (default: 0.001).")
parser.add_argument("--lrsched", action="store_true", help="Enable learning rate scheduling (default: false).")
parser.add_argument("--lrsched-every", type=int, default=None, help="Schedule the learning rate every n epochs (default: none).")
parser.add_argument("--lrsched-gamma", type=float, default=None, help="Learning rate scheduling stepsize (default: none).")
parser.add_argument("--weight-decay", type=float, default=0.0, help="Weight decay (default: 0.0).")
parser.add_argument("--workers", type=int, default=2, help="Number of concurrent data loaders (default: 2).")
# Data settings
parser.add_argument("--data-test", type=str, default=None, help="Full classname of the testing dataset (default: none, optional).")
parser.add_argument("--data-train", type=str, default=None, help="Full classname of the training dataset (default: none).")
# Ratio estimator settings
parser.add_argument("--estimator", type=str, default=None, help="Full classname of the ratio estimator (default: none).")
# Experimental settings
parser.add_argument("--experimental", action="store_true", help="Enable experimental settings (default: false).")
arguments, _ = parser.parse_known_args()
return arguments
if __name__ == "__main__":
arguments = parse_arguments()
main(arguments)
```
#### File: hypothesis/hypothesis/__init__.py
```python
r"""Hypothesis is a python module for statistical inference and the
mechanization of science.
The package contains (approximate) inference algorithms to solve statistical
problems. Utilities are provided for data loading, efficient
simulation, visualization, fire-and-forget inference, and validation.
"""
__version__ = "0.0.3"
__author__ = [
"<NAME>"]
__email__ = [
"<EMAIL>"]
################################################################################
# Global variables
################################################################################
import multiprocessing
import torch
cpu_count = multiprocessing.cpu_count()
"""int: Number of available processor cores.
Variable will be initialized when ``hypothesis`` is loaded fro the first time.
"""
workers = cpu_count
"""int: Number of default workers.
Default number of workers in Hypothesis.
"""
def set_workers(n):
r"""Sets the number of default parallel hypothesis workers."""
assert(n >= 1)
hypothesis.workers = n
accelerator = torch.device("cuda" if torch.cuda.is_available() else "cpu")
a = accelerator # Short name
"""torch.device: PyTorch device describing the accelerator backend.
The variable will be initialized when ``hypothesis`` is loaded for the first
time. It will check for the availibility of a CUDA device. If a CUDA enabled
device is present, ``hypothesis`` will select the CUDA device defined in the
``CUDA_VISIBLE_DEVICES`` environment variable. If no such device is specified,
the variable will default to GPU 0.
"""
def disable_gpu():
r"""Disables GPU acceleration. Hypothesis' accelerator will have been
set to 'cpu'."""
hypothesis.accelerator = "cpu"
hypothesis.a = hypothesis.accelerator
def enable_gpu():
r"""Tries to enable GPU acceleration. If a GPU is present, a CUDA
device will be set, else it will default to 'cpu'."""
hypothesis.accelerator = torch.device("cuda" if torch.cuda.is_available() else "cpu")
hypothesis.a = hypothesis.accelerator
return hypothesis.accelerator
def gpu_available():
r"""Checks if GPU acceleration is available."""
return hypothesis.accelerator != "cpu"
################################################################################
# Hypothesis' defaults
################################################################################
import hypothesis.default
```
#### File: nn/amortized_ratio_estimation/likelihood_to_evidence.py
```python
import hypothesis
import hypothesis.nn
import torch
from .base import BaseCriterion
from .base import BaseConservativeCriterion
from .base import BaseRatioEstimator
DENOMINATOR = "inputs|outputs"
class LikelihoodToEvidenceCriterion(BaseCriterion):
def __init__(self,
estimator,
batch_size=hypothesis.default.batch_size,
logits=False):
super(LikelihoodToEvidenceCriterion, self).__init__(
batch_size=batch_size,
denominator=DENOMINATOR,
estimator=estimator,
logits=logits)
class ConservativeLikelihoodToEvidenceCriterion(BaseConservativeCriterion):
def __init__(self,
estimator,
beta=0.001,
batch_size=hypothesis.default.batch_size,
logits=False):
super(ConservativeLikelihoodToEvidenceCriterion, self).__init__(
batch_size=batch_size,
denominator=DENOMINATOR,
estimator=estimator,
logits=logits)
class BaseLikelihoodToEvidenceRatioEstimator(BaseRatioEstimator):
def __init__(self):
super(BaseLikelihoodToEvidenceRatioEstimator, self).__init__()
def forward(self, inputs, outputs):
log_ratios = self.log_ratio(inputs=inputs, outputs=outputs)
return log_ratios.sigmoid(), log_ratios
def log_ratio(self, inputs, outputs):
raise NotImplementedError
```
#### File: nn/amortized_ratio_estimation/util.py
```python
import hypothesis
import numpy as np
import torch
def build_ratio_estimator(architecture, variables, **kwargs):
creator = architectures[architecture]
return creator(architecture, variables, **kwargs)
def build_mlp_ratio_estimator(architecture, variables, **kwargs):
from hypothesis.nn.amortized_ratio_estimation.multi_layered_perceptron import build_ratio_estimator
return build_ratio_estimator(variables)
def build_resnet_ratio_estimator(architecture, variables, **kwargs):
from hypothesis.nn.amortized_ratio_estimation.resnet import build_ratio_estimator
return build_ratio_estimator(variables, **kwargs)
def build_resnet_with_depth_ratio_estimator(architecture, variables, **kwargs):
_, depth = architecture.split('-')
kwargs["depth"] = depth
return build_resnet_ratio_estimator(architecture, variables, **kwargs)
def build_densenet_ratio_estimator(architecture, variables, **kwargs):
from hypothesis.nn.amortized_ratio_estimation.densenet import build_ratio_estimator
return build_ratio_estimator(variables, **kwargs)
def build_densenet_with_depth_ratio_estimator(architecture, variables, **kwargs):
raise NotImplementedError
architectures = {
# Multi-Layered Perceptron
"mlp": build_mlp_ratio_estimator,
# ResNet
"resnet": build_resnet_ratio_estimator,
"resnet-18": build_resnet_with_depth_ratio_estimator,
"resnet-34": build_resnet_with_depth_ratio_estimator,
"resnet-50": build_resnet_with_depth_ratio_estimator,
"resnet-101": build_resnet_with_depth_ratio_estimator,
"resnet-152": build_resnet_with_depth_ratio_estimator,
# DenseNet
"densenet": build_densenet_ratio_estimator,
"densenet-121": build_densenet_with_depth_ratio_estimator,
"densenet-161": build_densenet_with_depth_ratio_estimator,
"densenet-169": build_densenet_with_depth_ratio_estimator,
"densenet-201": build_densenet_with_depth_ratio_estimator}
```
#### File: data/numpy/storage.py
```python
import numpy as np
import os
import torch
class BaseStorage:
def close(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __getitem__(self):
raise NotImplementedError
def __del__(self):
self.close()
class InMemoryStorage(BaseStorage):
def __init__(self, path):
super(InMemoryStorage, self).__init__()
# Check if the specified path exists.
if path is None or not os.path.exists(path):
raise ValueError("The path", path, "does not exists.")
# Storage properties.
self.path = path
self.data = np.load(path)
def close(self):
if hasattr(self, "data"):
del self.data
self.data = None
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return torch.from_numpy(self.data[index])
class PersistentStorage(BaseStorage):
def __init__(self, path):
super(PersistentStorage, self).__init__()
# Check if the specified path exists.
if path is None or not os.path.exists(path):
raise ValueError("The path", path, "does not exists.")
# Storage properties.
self.path = path
self.fd = open(self.path, "rb")
self.header, self.offset = self._parse_header(self.fd)
self.fd.close()
self.fd = None
self.data_shape = self.header["shape"][1:]
self.data_type = self.header["descr"]
self.data_dimensionality = self._compute_dimensionality(self.data_shape)
self.data_bytes = int(self.data_type[-1]) * self.data_dimensionality
self.size = self.header["shape"][0]
def _retrieve(self, index):
if self.fd is None:
self.fd = open(self.path, "rb")
self.fd.seek(self.offset + index * self.data_bytes)
data = np.fromfile(self.fd, dtype=self.data_type, count=self.data_dimensionality)
return data.reshape(self.data_shape)
def close(self):
if hasattr(self, "fd") and self.fd is not None:
self.fd.close()
self.fd = None
def __getitem__(self, index):
return self._retrieve(index)
item = torch.from_numpy(data)
return item
def __len__(self):
return self.size
@staticmethod
def _compute_dimensionality(shape):
dimensionality = 1
for size in shape:
dimensionality *= size
return dimensionality
@staticmethod
def _parse_header(fd):
r"""
Parses the ``numpy`` header of the specified file descriptor.
Note:
* The first 6 bytes are a magic string: exactly \x93NUMPY.
* The next 1 byte is an unsigned byte: the major version number of the file format, e.g. \x01.
* The next 1 byte is an unsigned byte: the minor version number of the file format, e.g. \x00. Note: the version of the file format is not tied to the version of the numpy package.
* The next 2 bytes form a little-endian unsigned short int: the length of the header data HEADER_LEN.
"""
prefix = fd.read(10) # Read fixed header.
header_offset = int.from_bytes(prefix[-2:], byteorder="little")
header = eval(fd.read(header_offset)) # Not very secure but whatever.
header_offset += 10
return header, header_offset
``` |
{
"source": "JoeriHermans/Intelligent-Automation-System",
"score": 3
} |
#### File: python/cooling_system/cooling_system.py
```python
import sys
import socket
import struct
import time
from threading import Thread
from numpy import *
# Global members, which are required for the communication
# with the remote IAS controller.
gDeviceIdentifier = sys.argv[1]
gControllerAddress = sys.argv[2]
gControllerPort = int(sys.argv[3])
gSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
gSocket.connect((gControllerAddress,gControllerPort))
gRunning = True
# Global state members.
gUpdateInterval = int(sys.argv[4])
gTemperatureResolution = 0.1 # In meters
gCoolingSystemWidth = 2 # In meters
gCoolingSystemDepth = 1 # In meters
gCoolingSystemHeight = 1.9 # In meters
gTemperatureMap = None
## COOLING SYSTEM FUNCTIONS ####################################################
def initializeTemperatureMap():
global gTemperatureResolution
global gCoolingSystemWidth
global gCoolingSystemDepth
global gCoolingSystemHeight
def updateTemperatureMap():
print("TODO Implement.")
## CORE FUNCTIONS ##############################################################
def updateState( stateIdentifier , newValue ):
global gSocket
stateIdentifierLength = len(stateIdentifier)
newValueLength = len(newValue)
data = struct.pack("!BBB",0x01,stateIdentifierLength,newValueLength);
data += str.encode(stateIdentifier)
data += str.encode(newValue)
gSocket.sendall(data)
def authenticate():
global gDeviceIdentifier
global gSocket;
identifierLength = len(gDeviceIdentifier)
message = struct.pack("!BB",0x00,identifierLength) + bytes(gDeviceIdentifier.encode("ascii"));
gSocket.sendall(message);
def processFeature(featureIdentifier,parameter):
# TODO Implement
print("Executing " + featureIdentifier + " with " + parameter)
def processCommand():
global gSocket
global gRunning
data = gSocket.recv(3);
data = struct.unpack("!BBB",data)
if( data[0] != 0x01 ):
gRunning = False
return
featureIdentifierLength = data[1]
parameterLength = data[2]
featureIdentifier = gSocket.recv(featureIdentifierLength)
featureIdentifier = featureIdentifier.decode("ascii")
if( parameterLength > 0 ):
parameter = gSocket.recv(parameterLength)
parameter = parameter.decode("ascii")
else:
parameter = ""
processFeature(featureIdentifier,parameter)
def processCommands():
global gRunning
while( gRunning ):
try:
processCommand()
except:
gRunning = False
def monitor():
global gRunning
global gUpdateInterval
while( gRunning ):
updateTemperatureMap()
time.sleep(gUpdateInterval)
def main():
authenticate()
thread = Thread(target = monitor)
initializeTemperatureMap()
thread.start()
processCommands()
thread.join()
if( __name__ == "__main__" ):
main()
```
#### File: scripts/python/framework.py
```python
import sys
import socket
import struct
# Global members, which are required for the communication
# with the remote IAS controller.
gDeviceIdentifier = sys.argv[1]
gControllerAddress = sys.argv[2]
gControllerPort = int(sys.argv[3])
gSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
gSocket.connect((gControllerAddress,gControllerPort))
gRunning = True
def updateState( stateIdentifier , newValue ):
global gSocket
stateIdentifierLength = len(stateIdentifier)
newValueLength = len(newValue)
data = struct.pack("!BBB",0x01,stateIdentifierLength,newValueLength);
data += str.encode(stateIdentifier)
data += str.encode(newValue)
gSocket.sendall(data)
def authenticate():
global gDeviceIdentifier
global gSocket;
identifierLength = len(gDeviceIdentifier)
message = struct.pack("!BB",0x00,identifierLength) + bytes(gDeviceIdentifier.encode("ascii"));
gSocket.sendall(message);
def processFeature(featureIdentifier,parameter):
# TODO Implement
print("Executing " + featureIdentifier + " with " + parameter)
def processCommand():
global gSocket
global gRunning
data = gSocket.recv(3);
data = struct.unpack("!BBB",data)
if( data[0] != 0x01 ):
gRunning = False
return
featureIdentifierLength = data[1]
parameterLength = data[2]
featureIdentifier = gSocket.recv(featureIdentifierLength)
featureIdentifier = featureIdentifier.decode("ascii")
if( parameterLength > 0 ):
parameter = gSocket.recv(parameterLength)
parameter = parameter.decode("ascii")
else:
parameter = ""
processFeature(featureIdentifier,parameter)
def processCommands():
global gRunning
while( gRunning ):
try:
processCommand()
except:
gRunning = False
def main():
authenticate()
processCommands()
if( __name__ == "__main__" ):
main()
``` |
{
"source": "JoeRimsky/Slackmojis-Webscraper",
"score": 3
} |
#### File: JoeRimsky/Slackmojis-Webscraper/Webscraper.py
```python
import argparse
import os
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from shutil import rmtree
from slack_emoji_upload import Slack
from urllib.parse import urlparse
class Webscraper():
def __init__(self, chromedriver_path):
options = Options()
options.headless = True
self.driver = webdriver.Chrome(executable_path=chromedriver_path, options=options)
self.destination = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'GIFs')
self.current_emojis = []
def start(self, links, slack):
def scrape():
for element in gif_elements:
url = element.get_attribute('src')
response = requests.get(url=url)
full_path = urlparse(url)
base_path = os.path.basename(full_path.path)
emoji_name = os.path.splitext(base_path)[0]
if emoji_name not in self.current_emojis:
with open((os.path.join(self.destination,emoji_name+'.gif')), 'ab+') as gif_out:
gif_out.write(response.content)
gif_out.seek(0)
image = {'image': gif_out}
slack.upload_emoji(name=emoji_name, image=image)
if not os.path.exists(self.destination):
os.makedirs(self.destination)
if links:
self.driver.get(links.pop(0))
gif_elements = self.driver.find_elements_by_css_selector('img[loading="lazy"]')
if gif_elements:
scrape()
elif links:
self.start(links, slack)
# Can be used to remove emojis
# Currently set to remove all current workspace emojis
def remove_emojis(self, slack):
for emoji in self.current_emojis:
slack.remove_emoji(emoji)
def cleanup(self):
self.driver.quit()
rmtree(self.destination)
if os.path.exists('debug.log'):
os.remove('debug.log')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='driver_path', help='Path to chromedriver.exe', required=True)
parser.add_argument('-w', dest='workspace', help='Slack workspace/team', required=True)
parser.add_argument('-t', dest='token', help='Slack token', required=True)
args = parser.parse_args()
return args.driver_path, args.workspace, args.token
def main():
chromedriver_path, workspace, token = parse_args()
scraper = Webscraper(chromedriver_path)
slack = Slack(workspace=workspace, token=token)
scraper.current_emojis = slack.get_current_list()
see_more_elements = scraper.driver.find_elements_by_class_name('seemore')
links = ['https://slackmojis.com/',]
for element in see_more_elements:
links.append(element.get_attribute('href'))
scraper.start(links, slack)
scraper.cleanup()
if __name__ == '__main__':
main()
``` |
{
"source": "JoerivanEngelen/Nile_Delta_post",
"score": 2
} |
#### File: JoerivanEngelen/Nile_Delta_post/determine_steady_state.py
```python
import xarray as xr
import pandas as pd
from glob import glob
import os
import numpy as np
from collections import defaultdict
def get_first_true(df, condition):
time = df[condition].iloc[0:1].index.values
if time.size == 0:
time = df.iloc[-2:-1].index.values
return(time)
#%%Path management
fw_path = r"./plots/FW_volumes/*-S_fw.nc"
fw_paths = glob(fw_path)
or_path = r"./plots/FW_volumes/*-S_origins.csv"
or_paths = glob(or_path)
#%%Read fresh water volumes
d_fw = {}
open_opt = dict(decode_times=False,
drop_variables = ["total_fw_pumpable", "total_onshore"])
for p in fw_paths:
name = os.path.basename(p).split("_fw.nc")[0]
d_fw[name] = xr.open_dataset(p, **open_opt)
#%%Differentiate
for name, ds in d_fw.items():
ds["fw_norm_diff"] = (
ds["total_fw"]/ds["total_fw"].max()
# ds["total_fw"]/8734.5725
).isel(time=slice(None, -7)).differentiate("time")
#%%time to reach steady state fw_vol
diff = xr.merge(
[ds["fw_norm_diff"].rename(name) for name, ds in d_fw.items()]
).drop(["dx", "dy"]).to_dataframe()
diff = np.log10(np.abs(diff))
time_steady={}
for name in diff.columns:
time_steady[name]=get_first_true(diff[name], diff[name] < -6)
#%%Read origins
colnames = []
d_or = defaultdict()
for csv in or_paths:
name = os.path.basename(csv).split("_origins.csv")[0]
d_or[name] = pd.read_csv(csv, header=0).set_index("time").drop(columns=["dx", "dy"])
colnames.extend([(name, var) for var in d_or[name].columns])
d_or = pd.concat(d_or, axis=1)
#%%Differentiate
#Use xarray to differentiate, as it automatically differentiates properly
tot_vol = d_or.loc[:, ("C-F-B-S", slice(None))].sum(axis=1).iloc[0]
diff_or = xr.Dataset(d_or/tot_vol).differentiate("time").to_dataframe()
diff_or = np.log10(np.abs(diff_or))
time_steady_or={}
for name in diff_or.columns:
time_steady_or[name]=get_first_true(diff_or[name], diff_or[name] < -6.25)
#All this stacking, reseting and dropping is to get rid the table in the right format
time_steady_or=pd.DataFrame(time_steady_or).stack().reset_index(level=[0]).drop(columns="level_0")
mx_time_steady_or = time_steady_or[time_steady_or.index=="River"].max(axis=0)
mx_time_steady_or.to_csv(os.path.join(or_path, "..", "time_to_steady.csv"))
#%%
```
#### File: JoerivanEngelen/Nile_Delta_post/plot_isoline_distances.py
```python
import pandas as pd
import seaborn as sns
from glob import glob
import os
import matplotlib.pyplot as plt
import numpy as np
#%%Estimator
def median_count(arr, lim_count=100):
if len(arr) < lim_count:
return(np.nan)
else:
return(np.median(arr))
#%%Path Management
plotfolder = r"/plots/"
res_folder = os.path.join(plotfolder, "distance_isolines")
isoval=[3.0, 10., 20., 30.]
sns.set_style("darkgrid", {"axes.facecolor": ".65", "ytick.left": True})
colors = [[0.5703125, 0.7734375, 0.87109375],
[0.8671875, 0.9375, 0.8125],
[0.99609375, 0.87109375, 0.6015625],
[0.9609375, 0.5625, 0.32421875]]
color_pal = dict(i for i in zip(isoval, colors))
#%%Combination plot
fig, axes = plt.subplots(1, 4, figsize=(8, 4))
codes = ["H-T", "hO-F-T", "C-N-T", "hO-N-T",]
files = [os.path.join(res_folder, code+r".csv") for code in codes]
codes = ["O-N-T", "H-F-T", "C-N-T", "H-N-T"]
estimator = np.median
#estimator = median_count
for i, f in enumerate(files):
dist = pd.read_csv(f)
sns_plot = sns.pointplot(ax = axes[i], x = "distance (km)", y = "z (m)", data = dist, hue = "TDS (g/l)",
orient = "h", ci = None, estimator=estimator, markers=".", palette = color_pal)
#Make sure pointplot is rendered on top of stripplot.
plt.setp(sns_plot.lines, zorder=100)
plt.setp(sns_plot.collections, zorder=100, label="")
sns_plot = sns.stripplot(ax = axes[i], x = "distance (km)", y = "z (m)", data = dist, hue = "TDS (g/l)",
orient = "h", alpha = 0.05, jitter=True, dodge=True, palette = color_pal, size=2.5)
sns_plot.invert_yaxis()
sns_plot.set_xlim(-15, 15)
sns_plot.set_title(codes[i])
set_d = dict(xlabel='$\omega$ (km)')
#Correct yticklabels
if i == 0:
nu_labels = sns_plot.get_yticklabels()[::2]
sns_plot.set_yticks(sns_plot.get_yticks()[::2])
sns_plot.set_yticklabels(nu_labels)
set_d["ylabel"]="z (m)"
else:
sns_plot.set_yticks([])
set_d["ylabel"]=""
axes[i].set(**set_d)
if f == files[-1]:
handles, labels = sns_plot.get_legend_handles_labels()
l = sns_plot.legend(handles[:len(isoval)], labels[:len(isoval)], title="TDS (g/l)",
handletextpad=0, columnspacing=1,
loc="lower right", ncol=1, frameon=True)
for text in l.get_texts():
text.set_color("white")
l.get_title().set_color("white")
else:
sns_plot.legend_.remove()
plt.tight_layout()
plt.savefig(os.path.join(plotfolder, r"distance_isolines", "distance_isolines_three_model.png"), dpi=300)
plt.savefig(os.path.join(plotfolder, r"distance_isolines", "distance_isolines_three_model.svg"))
plt.close()
``` |
{
"source": "joerivrij/harpokratos",
"score": 3
} |
#### File: samplestack/python-example/main.py
```python
from typing import List, Optional
from fastapi import FastAPI
import uvicorn
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
description: Optional[str] = None
price: float
tax: Optional[float] = None
tags: List[str] = []
@app.get("/api/v1/item", response_model=Item)
async def get_item():
item = Item(name="pong", price=5.0, tags=["some", "endpoint"], tax=21.0, description="one")
return item
if __name__ == '__main__':
uvicorn.run(app, port=5000, host='0.0.0.0')
``` |
{
"source": "joerivrij/lexiko",
"score": 2
} |
#### File: xerxes/locustfiles/sokrates-locustfile.py
```python
from locust import HttpUser, task, between
class Sokrates(HttpUser):
"""runs the loadtests for Sokrates api"""
wait_time = between(1, 5)
@task(1)
def health(self):
self.client.get("/sokrates/v1/health")
@task(1)
def ping(self):
self.client.get("/sokrates/v1/ping")
@task(6)
def create_nomina_question(self):
with self.client.get("/sokrates/v1/chapters/nomina", catch_response=True) as response:
chapters = response.json()['lastChapter']
for i in range(chapters):
self.client.get(f"/sokrates/v1/createQuestion?category=nomina&chapter={i+1}")
@task(6)
def create_verba_question(self):
with self.client.get("/sokrates/v1/chapters/verba", catch_response=True) as response:
chapters = response.json()['lastChapter']
for i in range(chapters):
self.client.get(f"/sokrates/v1/createQuestion?category=verba&chapter={i+1}")
@task(6)
def create_misc_question(self):
with self.client.get("/sokrates/v1/chapters/misc", catch_response=True) as response:
chapters = response.json()['lastChapter']
for i in range(chapters):
self.client.get(f"/sokrates/v1/createQuestion?category=misc&chapter={i+1}")
@task(8)
def check_answer(self):
body = {"answerProvided": "godin", "quizWord": "θεός", "category": "nomina"}
self.client.post("/sokrates/v1/answer", json=body)
``` |
{
"source": "JoeriWissink/OnlineCoalitionGame",
"score": 2
} |
#### File: OnlineCoalitionGame/Online_Coalition_Game_Introduction/views.py
```python
from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class Overview(Page):
def vars_for_template(self):
return self.player.vars_for_template()
def is_displayed(self):
if self.participant.vars['kicked'] == False:
return True
class GeneralInstructions(Page):
def vars_for_template(self):
return self.player.vars_for_template()
def is_displayed(self):
if self.participant.vars['kicked'] == False:
return True
class InstructionsSeats(Page):
def vars_for_template(self):
return self.player.vars_for_template()
def is_displayed(self):
if self.participant.vars['kicked'] == False:
return True
class InstructionsPhases(Page):
def vars_for_template(self):
return self.player.vars_for_template()
def is_displayed(self):
if self.participant.vars['kicked'] == False:
return True
class Testinstructions(Page):
def is_displayed(self):
return self.session.config['earned']
def vars_for_template(self):
return self.player.vars_for_template()
def is_displayed(self):
if self.session.config['earned'] == True and self.participant.vars['kicked'] == False:
return True
class PracticeFields(Page):
form_model = 'player'
form_fields = ['practice']
def vars_for_template(self):
return self.player.vars_for_template()
def practice_error_message(self, value):
if value != 150:
return 'This is incorrect. To make an offer of 150 million you need to insert 150 in the text box. Please try again.'
def is_displayed(self):
if self.participant.vars['kicked'] == False:
return True
class Testslider(Page):
def vars_for_template(self):
return self.player.vars_for_template()
def is_displayed(self):
if self.session.config['earned'] == True and self.participant.vars['kicked'] == False:
return True
def get_form_fields(self):
return ['tslider{}'.format(i) for i in range(1, 22)]
form_model = 'player'
def before_next_page(self):
self.player.tscore = 0
tsliders = [self.player.tslider1, self.player.tslider2, self.player.tslider3, self.player.tslider4,
self.player.tslider5, self.player.tslider6, self.player.tslider7, self.player.tslider8,
self.player.tslider9, self.player.tslider10, self.player.tslider11, self.player.tslider12,
self.player.tslider13, self.player.tslider14, self.player.tslider15, self.player.tslider16,
self.player.tslider17, self.player.tslider18, self.player.tslider19, self.player.tslider20,
self.player.tslider21]
for tslider in tsliders:
if tslider == 50:
self.player.tscore += 1
def get_timeout_seconds(self):
return self.session.config['slider_time']
class Testresults(Page):
def vars_for_template(self):
vars = self.player.vars_for_template()
vars.update({'tscore': self.player.tscore})
return vars
def is_displayed(self):
if self.session.config['earned'] == True and self.participant.vars['kicked'] == False:
return True
class Groupassignment(Page):
def is_displayed(self):
if self.participant.vars['kicked'] == False:
return True
def vars_for_template(self):
return self.player.vars_for_template()
def before_next_page(self):
import time
self.participant.vars['wait_page_arrival'] = time.time()
class Kicked(Page):
def is_displayed(self):
if self.participant.vars['kicked'] == True:
return True
else:
return False
def vars_for_template(self):
return self.player.vars_for_template()
page_sequence = [
Overview,
GeneralInstructions,
InstructionsSeats,
InstructionsPhases,
PracticeFields,
Testinstructions,
Testslider,
Testresults,
Groupassignment,
Kicked,
]
``` |
{
"source": "joerkig/simple-ohshapes-requests",
"score": 2
} |
#### File: joerkig/simple-ohshapes-requests/chatbot.py
```python
import sys
import os
import irc.bot
import requests
import subprocess
import zipfile
import re
class TwitchBot(irc.bot.SingleServerIRCBot):
def __init__(self, username, client_id, token, channel):
self.client_id = client_id
self.token = token
self.channel = '#' + channel
# Get the channel id, we will need this for v5 API calls
url = 'https://api.twitch.tv/kraken/users?login=' + channel
headers = {'Client-ID': client_id, 'Accept': 'application/vnd.twitchtv.v5+json'}
r = requests.get(url, headers=headers).json()
self.channel_id = r['users'][0]['_id']
# Create IRC bot connection
server = 'irc.chat.twitch.tv'
port = 6667
print ('Connecting to ' + server + ' on port ' + str(port) + '...')
irc.bot.SingleServerIRCBot.__init__(self, [(server, port, 'oauth:'+token)], username, username)
def on_welcome(self, c, e):
print ('Joining ' + self.channel)
# You must request specific capabilities before you can use them
c.cap('REQ', ':twitch.tv/membership')
c.cap('REQ', ':twitch.tv/tags')
c.cap('REQ', ':twitch.tv/commands')
c.join(self.channel)
message = "/me bot connected"
c.privmsg(self.channel, message)
def on_pubmsg(self, c, e):
# If a chat message starts with an exclamation point, try to run it as a command
print (e)
print (e.arguments[0])
if e.arguments[0][:1] == '!':
cmd = e.arguments[0].split(' ')[0][1:]
try:
arg1 = e.arguments[0].split(' ')[1][0:]
except IndexError:
arg1 = None
try:
sender = e.source.split('!')[0]
except IndexError:
sender = None
try:
mod = e.tags[8]['value']
except IndexError:
mod = None
print ('Received command: ' + cmd)
self.do_command(e, cmd, arg1, sender, mod)
return
def do_command(self, e, cmd, arg1, sender, mod):
try:
c = self.connection
howto = "To request a map find the key of it on http://ohshapes.com and put it behind this command"
notfound = "A map with that key was not found, recheck key on http://ohshapes.com"
# # Poll the API to get current game.
# if cmd == "game":
# url = 'https://api.twitch.tv/kraken/channels/' + self.channel_id
# headers = {'Client-ID': self.client_id, 'Accept': 'application/vnd.twitchtv.v5+json'}
# r = requests.get(url, headers=headers).json()
# c.privmsg(self.channel, r['display_name'] + ' is currently playing ' + r['game'])
#
# # Poll the API the get the current status of the stream
# elif cmd == "title":
# url = 'https://api.twitch.tv/kraken/channels/' + self.channel_id
# headers = {'Client-ID': self.client_id, 'Accept': 'application/vnd.twitchtv.v5+json'}
# r = requests.get(url, headers=headers).json()
# c.privmsg(self.channel, r['display_name'] + ' channel title is currently ' + r['status'])
#
# Responds with most recently uploaded map
if cmd == "oslatest":
url = 'http://ohshapes.com/api/maps/latest/0?'
r = requests.get(url).json()
c.privmsg(self.channel, 'Most recently uploaded was ' + r['docs'][0]['metadata']['songName'] + ' by ' + r['docs'][0]['metadata']['songAuthorName'] + ' uploaded by ' + r['docs'][0]['uploader']['username'] + " (" + r['docs'][0]['key'] + ")" )
# Request a map from http://OhShapes.com and put it in current directory
elif cmd == "osr":
if arg1 == None:
c.privmsg(self.channel, howto)
elif arg1 == "?":
c.privmsg(self.channel, howto)
elif arg1 == "help":
c.privmsg(self.channel, howto)
elif arg1 == "howto":
c.privmsg(self.channel, howto)
else:
url = 'http://ohshapes.com/api/maps/detail/'
try:
with open('blocklist.txt') as f:
blocked = [line.rstrip() for line in f]
if arg1 in blocked :
c.privmsg(self.channel, arg1 + ' was blocked')
else:
r = requests.get(url + arg1).json()
if len(str(r['stats']['rating'])[2:4]) == 1:
rating = str(r['stats']['rating'])[2:4] + "0"
else:
rating = str(r['stats']['rating'])[2:4]
c.privmsg(self.channel, r['metadata']['songName'] + ' ' + str(rating) + '% (' + r['key'] + ') was added to requests.')
# Downloading map
url2 = 'http://ohshapes.com'
r2 = requests.get(url2 + r['directDownload'])
open(r['key'] + ' (' + re.sub('[^A-z0-9 ]+', '', r['metadata']['songName']) + ' - ' + re.sub('[^A-z0-9 ]+', '', r['metadata']['levelAuthorName']) + ')' + '.zip' , 'xb').write(r2.content)
os.mkdir(os.getcwd() + '\\' + r['key'] + ' (' + re.sub('[^A-z0-9 ]+', '', r['metadata']['songName']) + ' - ' + re.sub('[^A-z0-9 ]+', '', r['metadata']['levelAuthorName']) + ')' + '\\')
# Unzipping
zip = zipfile.ZipFile(r['key'] + ' (' + re.sub('[^A-z0-9 ]+', '', r['metadata']['songName']) + ' - ' + re.sub('[^A-z0-9 ]+', '', r['metadata']['levelAuthorName']) + ')' + '.zip')
zip.extractall(os.getcwd() + '\\' + r['key'] + ' (' + re.sub('[^A-z0-9 ]+', '', r['metadata']['songName']) + ' - ' + re.sub('[^A-z0-9 ]+', '', r['metadata']['levelAuthorName']) + ')' + '\\')
except FileExistsError:
print("File already exists")
except ValueError:
c.privmsg(self.channel, notfound)
# Block a map from being requested
elif cmd == "block":
if self.channel[1:] == sender or mod == "1" :
if arg1 == None:
c.privmsg(self.channel, 'Specify a key to block')
else:
# Open the file in append & read mode ('a+')
with open('blocklist.txt') as f:
blocked = [line.rstrip() for line in f]
if arg1 in blocked :
c.privmsg(self.channel, arg1 + ' is already blocked')
else:
with open("blocklist.txt", "a+") as file_object:
# Move read cursor to the start of file.
file_object.seek(0)
data = file_object.read(100)
# Append text at the end of file
file_object.write(arg1)
file_object.write("\n")
c.privmsg(self.channel, arg1 + ' has been blocked')
else:
c.privmsg(self.channel, 'Only mods can block')
# Unblock a map so it can be requested again
elif cmd == "unblock":
if self.channel[1:] == sender or mod == 1 :
with open('blocklist.txt') as f:
blocked = [line.rstrip() for line in f]
if arg1 == None:
c.privmsg(self.channel, 'Specify a key to block')
elif arg1 in blocked :
#read input file
fin = open("blocklist.txt", "rt")
#read file contents to string
data = fin.read()
#replace all occurrences of the required string
data = data.replace(arg1 + "\n" , '')
#close the input file
fin.close()
#open the input file in write mode
fin = open("blocklist.txt", "wt")
#overrite the input file with the resulting data
fin.write(data)
#close the file
fin.close()
c.privmsg(self.channel, arg1 + ' in now unblocked')
else:
c.privmsg(self.channel, arg1 + ' was not blocked')
else:
c.privmsg(self.channel, 'Only mods can unblock')
# The command was not recognized
else:
print("Did not understand command: " + cmd + " " + arg1 )
except:
c.privmsg(self.channel, 'Something went wrong, please contact joerkig#1337 on Discord')
def main():
if len(sys.argv) != 5:
print("Usage: twitchbot <username> <client id> <token> <channel>")
sys.exit(1)
if os.path.exists('blocklist.txt') == False :
open('blocklist.txt', 'a').close()
username = sys.argv[1]
client_id = sys.argv[2]
token = sys.argv[3]
channel = sys.argv[4]
bot = TwitchBot(username, client_id, token, channel)
bot.start()
if __name__ == "__main__":
main()
``` |
{
"source": "joerlane/meerk40t",
"score": 2
} |
#### File: meerk40t/balormk/main.py
```python
import os
from meerk40t.balor.command_list import CommandList
from meerk40t.balormk.BalorDriver import BalorDriver
from meerk40t.core.spoolers import Spooler
from meerk40t.core.units import Length, ViewPort
from meerk40t.kernel import Service
from meerk40t.svgelements import Angle, Path, Point, Polygon
class BalorDevice(Service, ViewPort):
"""
The BalorDevice is a MeerK40t service for the device type. It should be the main method of interacting with
the rest of meerk40t. It defines how the scene should look and contains a spooler which meerk40t will give jobs
to. This class additionally defines commands which exist as console commands while this service is activated.
"""
def __init__(self, kernel, path, *args, **kwargs):
Service.__init__(self, kernel, path)
self.name = "balor"
_ = kernel.translation
self.register(
"format/op cut",
"{enabled}{pass}{element_type} {speed}mm/s @{power} {frequency}kHz",
)
self.register(
"format/op engrave",
"{enabled}{pass}{element_type} {speed}mm/s @{power} {frequency}kHz",
)
self.register(
"format/op hatch",
"{enabled}{penpass}{pass}{element_type} {speed}mm/s @{power} {frequency}kHz",
)
self.register(
"format/op raster",
"{enabled}{pass}{element_type}{direction}{speed}mm/s @{power} {frequency}kHz",
)
self.register(
"format/op image",
"{enabled}{penvalue}{pass}{element_type}{direction}{speed}mm/s @{power} {frequency}kHz",
)
self.register(
"format/op dots",
"{enabled}{pass}{element_type} {dwell_time}ms dwell {frequency}kHz",
)
self.register("format/op console", "{enabled}{command}")
choices = [
{
"attr": "label",
"object": self,
"default": "balor-device",
"type": str,
"label": _("Label"),
"tip": _("What is this device called."),
},
{
"attr": "corfile_enabled",
"object": self,
"default": False,
"type": bool,
"label": _("Enable Correction File"),
"tip": _("Use correction file?"),
},
{
"attr": "corfile",
"object": self,
"default": None,
"type": str,
"style": "file",
"wildcard": "*.cor",
"conditional": (self, "corfile_enabled"),
"label": _("Correction File"),
"tip": _("Provide a correction file for the machine"),
},
{
"attr": "lens_size",
"object": self,
"default": "110mm",
"type": Length,
"label": _("Width"),
"tip": _("Lens Size"),
},
{
"attr": "offset_x",
"object": self,
"default": "0mm",
"type": Length,
"label": _("Offset X"),
"tip": _("Offset in the X axis"),
},
{
"attr": "offset_y",
"object": self,
"default": "0mm",
"type": Length,
"label": _("Offset Y"),
"tip": _("Offset in the Y axis"),
},
{
"attr": "offset_angle",
"object": self,
"default": "0",
"type": Angle.parse,
"label": _("Angle"),
"tip": _("Angle to adjust fiber laser to match red laser"),
},
{
"attr": "scale_x",
"object": self,
"default": "0",
"type": float,
"label": _("Scale X"),
"tip": _("Scale the X axis"),
},
{
"attr": "scale_y",
"object": self,
"default": "0",
"type": float,
"label": _("Scale Y"),
"tip": _("Scale the Y axis"),
},
{
"attr": "flip_x",
"object": self,
"default": False,
"type": bool,
"label": _("Flip X"),
"tip": _("Flip the X axis for the Balor device"),
},
{
"attr": "flip_y",
"object": self,
"default": True,
"type": bool,
"label": _("Flip Y"),
"tip": _("Flip the Y axis for the Balor device"),
},
{
"attr": "interpolate",
"object": self,
"default": 50,
"type": int,
"label": _("Curve Interpolation"),
"tip": _("Number of curve interpolation points"),
},
{
"attr": "mock",
"object": self,
"default": False,
"type": bool,
"label": _("Run mock-usb backend"),
"tip": _(
"This starts connects to fake software laser rather than real one for debugging."
),
},
{
"attr": "machine_index",
"object": self,
"default": 0,
"type": int,
"label": _("Machine index to select"),
"tip": _(
"Which machine should we connect to? -- Leave at 0 if you have 1 machine."
),
},
]
self.register_choices("balor", choices)
choices = [
{
"attr": "redlight_speed",
"object": self,
"default": "8000",
"type": int,
"label": _("Redlight travel speed"),
"tip": _("Speed of the galvo when using the red laser."),
},
{
"attr": "redlight_offset_x",
"object": self,
"default": "0mm",
"type": Length,
"label": _("Redlight X Offset"),
"tip": _("Offset the redlight positions by this amount in x"),
},
{
"attr": "redlight_offset_y",
"object": self,
"default": "0mm",
"type": Length,
"label": _("Redlight Y Offset"),
"tip": _("Offset the redlight positions by this amount in y"),
},
]
self.register_choices("balor-redlight", choices)
choices = [
{
"attr": "default_power",
"object": self,
"default": 50.0,
"type": float,
"label": _("Laser Power"),
"tip": _("How what power level do we cut at?"),
},
{
"attr": "default_speed",
"object": self,
"default": 100.0,
"type": float,
"label": _("Cut Speed"),
"tip": _("How fast do we cut?"),
},
{
"attr": "default_frequency",
"object": self,
"default": 30.0,
"type": float,
"label": _("Q Switch Frequency"),
"tip": _("QSwitch Frequency value"),
},
{
"attr": "default_rapid_speed",
"object": self,
"default": 2000.0,
"type": float,
"label": _("Travel Speed"),
"tip": _("How fast do we travel when not cutting?"),
},
]
self.register_choices("balor-global", choices)
choices = [
{
"attr": "delay_laser_on",
"object": self,
"default": 100.0,
"type": float,
"label": _("Laser On Delay"),
"tip": _("Delay for the start of the laser"),
},
{
"attr": "delay_laser_off",
"object": self,
"default": 100.0,
"type": float,
"label": _("Laser Off Delay"),
"tip": _("Delay amount for the end of the laser"),
},
{
"attr": "delay_polygon",
"object": self,
"default": 100.0,
"type": float,
"label": _("Polygon Delay"),
"tip": _("Delay amount between different points in the path travel."),
},
{
"attr": "delay_end",
"object": self,
"default": 300.0,
"type": float,
"label": _("End Delay"),
"tip": _("Delay amount for the end TC"),
},
]
self.register_choices("balor-global-timing", choices)
choices = [
{
"attr": "first_pulse_killer",
"object": self,
"default": 200,
"type": int,
"label": _("First Pulse Killer"),
"tip": _("Unknown"),
},
{
"attr": "pwm_half_period",
"object": self,
"default": 125,
"type": int,
"label": _("PWM Half Period"),
"tip": _("Unknown"),
},
{
"attr": "pwm_pulse_width",
"object": self,
"default": 125,
"type": int,
"label": _("PWM Pulse Width"),
"tip": _("Unknown"),
},
{
"attr": "standby_param_1",
"object": self,
"default": 2000,
"type": int,
"label": _("Standby Parameter 1"),
"tip": _("Unknown"),
},
{
"attr": "standby_param_2",
"object": self,
"default": 20,
"type": int,
"label": _("Standby Parameter 2"),
"tip": _("Unknown"),
},
{
"attr": "timing_mode",
"object": self,
"default": 1,
"type": int,
"label": _("Timing Mode"),
"tip": _("Unknown"),
},
{
"attr": "delay_mode",
"object": self,
"default": 1,
"type": int,
"label": _("Delay Mode"),
"tip": _("Unknown"),
},
{
"attr": "laser_mode",
"object": self,
"default": 1,
"type": int,
"label": _("Laser Mode"),
"tip": _("Unknown"),
},
{
"attr": "control_mode",
"object": self,
"default": 0,
"type": int,
"label": _("Control Mode"),
"tip": _("Unknown"),
},
{
"attr": "fpk2_p1",
"object": self,
"default": 0xFFB,
"type": int,
"label": _("First Pulse Killer, Parameter 1"),
"tip": _("Unknown"),
},
{
"attr": "fpk2_p2",
"object": self,
"default": 1,
"type": int,
"label": _("First Pulse Killer, Parameter 2"),
"tip": _("Unknown"),
},
{
"attr": "fpk2_p3",
"object": self,
"default": 409,
"type": int,
"label": _("First Pulse Killer, Parameter 3"),
"tip": _("Unknown"),
},
{
"attr": "fpk2_p4",
"object": self,
"default": 100,
"type": int,
"label": _("First Pulse Killer, Parameter 4"),
"tip": _("Unknown"),
},
{
"attr": "fly_res_p1",
"object": self,
"default": 0,
"type": int,
"label": _("Fly Res, Parameter 1"),
"tip": _("Unknown"),
},
{
"attr": "fly_res_p2",
"object": self,
"default": 99,
"type": int,
"label": _("Fly Res, Parameter 2"),
"tip": _("Unknown"),
},
{
"attr": "fly_res_p3",
"object": self,
"default": 1000,
"type": int,
"label": _("Fly Res, Parameter 3"),
"tip": _("Unknown"),
},
{
"attr": "fly_res_p4",
"object": self,
"default": 25,
"type": int,
"label": _("Fly Res, Parameter 4"),
"tip": _("Unknown"),
},
]
self.register_choices("balor-extra", choices)
self.state = 0
unit_size = float(Length(self.lens_size))
galvo_range = 0xFFFF
units_per_galvo = unit_size / galvo_range
ViewPort.__init__(
self,
self.lens_size,
self.lens_size,
native_scale_x=units_per_galvo,
native_scale_y=units_per_galvo,
origin_x=1.0 if self.flip_x else 0.0,
origin_y=1.0 if self.flip_y else 0.0,
show_origin_x=0.5,
show_origin_y=0.5,
flip_x=self.flip_x,
flip_y=self.flip_y,
)
self.spooler = Spooler(self)
self.driver = BalorDriver(self)
self.spooler.driver = self.driver
self.add_service_delegate(self.spooler)
self.viewbuffer = ""
@self.console_command(
"spool",
help=_("spool <command>"),
regex=True,
input_type=(None, "plan", "device", "balor"),
output_type="spooler",
)
def spool(
command, channel, _, data=None, data_type=None, remainder=None, **kwgs
):
"""
Registers the spool command for the Balor driver.
"""
spooler = self.spooler
if data is not None:
if data_type == "balor":
spooler.job(("balor_job", data))
return "spooler", spooler
# If plan data is in data, then we copy that and move on to next step.
spooler.jobs(data.plan)
channel(_("Spooled Plan."))
self.signal("plan", data.name, 6)
if remainder is None:
channel(_("----------"))
channel(_("Spoolers:"))
for d, d_name in enumerate(self.match("device", suffix=True)):
channel("%d: %s" % (d, d_name))
channel(_("----------"))
channel(_("Spooler on device %s:" % str(self.label)))
for s, op_name in enumerate(spooler.queue):
channel("%d: %s" % (s, op_name))
channel(_("----------"))
return "spooler", spooler
@self.console_option(
"travel_speed", "t", type=float, help="Set the travel speed."
)
@self.console_option("power", "p", type=float, help="Set the power level")
@self.console_option(
"frequency", "q", type=float, help="Set the device's qswitch frequency"
)
@self.console_option(
"cut_speed", "s", type=float, help="Set the cut speed of the device"
)
@self.console_option("power", "p", type=float, help="Set the power level")
@self.console_option(
"laser_on_delay", "n", type=float, help="Sets the device's laser on delay"
)
@self.console_option(
"laser_off_delay", "f", type=float, help="Sets the device's laser off delay"
)
@self.console_option(
"polygon_delay",
"n",
type=float,
help="Sets the device's laser polygon delay",
)
@self.console_option(
"quantization",
"Q",
type=int,
default=500,
help="Number of line segments to break this path into",
)
@self.console_command(
"mark",
input_type="elements",
output_type="balor",
help=_("runs mark on path."),
)
def mark(
command,
channel,
_,
data=None,
travel_speed=None,
power=None,
frequency=None,
cut_speed=None,
laser_on_delay=None,
laser_off_delay=None,
polygon_delay=None,
quantization=500,
**kwgs,
):
"""
Mark takes in element types from element* or circle or hull and applies the mark settings, and outputs
a Balor job type. These could be spooled, looped, debugged or whatever else might be wanted/needed.
"""
channel("Creating mark job out of elements.")
paths = data
job = CommandList()
job.set_mark_settings(
travel_speed=self.default_rapid_speed
if travel_speed is None
else travel_speed,
power=self.default_power if power is None else power,
frequency=self.default_frequency if frequency is None else frequency,
cut_speed=self.default_speed if cut_speed is None else cut_speed,
laser_on_delay=self.delay_laser_on
if laser_on_delay is None
else laser_on_delay,
laser_off_delay=self.delay_laser_off
if laser_off_delay is None
else laser_off_delay,
polygon_delay=self.delay_polygon
if polygon_delay is None
else polygon_delay,
)
for e in paths:
x, y = e.point(0)
x, y = self.scene_to_device_position(x, y)
job.goto(x, y)
for i in range(1, quantization + 1):
x, y = e.point(i / float(quantization))
x, y = self.scene_to_device_position(x, y)
job.mark(x, y)
return "balor", job
@self.console_option(
"speed",
"s",
type=bool,
action="store_true",
help="Run this light job at slow speed for the parts that would have been cuts.",
)
@self.console_option(
"travel_speed", "t", type=float, help="Set the travel speed."
)
@self.console_option(
"simulation_speed",
"m",
type=float,
help="sets the simulation speed for this operation",
)
@self.console_option(
"quantization",
"Q",
type=int,
default=500,
help="Number of line segments to break this path into",
)
@self.console_command(
"light",
input_type="shapes",
output_type="balor",
help=_("runs light on events."),
)
def light(
command,
channel,
_,
speed=False,
travel_speed=None,
simulation_speed=None,
quantization=500,
data=None,
**kwgs,
):
"""
Creates a light job out of elements. If speed is set then
"""
channel("Creating light job out of elements.")
paths = data
if simulation_speed is not None:
# Simulation_speed implies speed
speed = True
if travel_speed is None:
travel_speed = self.default_rapid_speed
if speed:
# Travel at simulation speed.
if simulation_speed is None:
# if simulation speed was not set travel at cut_speed
simulation_speed = self.default_speed
job = CommandList(light_speed=simulation_speed, goto_speed=travel_speed)
else:
# Travel at redlight speed
job = CommandList(
light_speed=self.redlight_speed, goto_speed=travel_speed
)
for e in paths:
x, y = e.point(0)
x, y = self.scene_to_device_position(x, y)
job.light(x, y, False, jump_delay=200)
for i in range(1, quantization + 1):
x, y = e.point(i / float(quantization))
x, y = self.scene_to_device_position(x, y)
job.light(x, y, True, jump_delay=0)
job.light_off()
return "balor", job
@self.console_command(
"stop",
help=_("stops the idle running job"),
input_type=(None),
)
def stoplight(command, channel, _, data=None, remainder=None, **kwgs):
channel("Stopping idle job")
self.spooler.set_idle(None)
self.driver.connection.abort()
@self.console_command(
"estop",
help=_("stops the current job, deletes the spooler"),
input_type=(None),
)
def estop(command, channel, _, data=None, remainder=None, **kwgs):
channel("Stopping idle job")
self.spooler.set_idle(None)
self.spooler.clear_queue()
self.driver.connection.abort()
@self.console_command(
"pause",
help=_("Pauses the currently running job"),
)
def pause(command, channel, _, data=None, remainder=None, **kwgs):
if self.driver.paused:
channel("Resuming current job")
else:
channel("Pausing current job")
self.driver.pause()
@self.console_command(
"resume",
help=_("Resume the currently running job"),
)
def resume(command, channel, _, data=None, remainder=None, **kwgs):
channel("Resume the current job")
self.driver.resume()
@self.console_command(
"usb_connect",
help=_("connect usb"),
)
def usb_connect(command, channel, _, data=None, remainder=None, **kwgs):
self.driver.connect()
@self.console_command(
"usb_disconnect",
help=_("connect usb"),
)
def usb_connect(command, channel, _, data=None, remainder=None, **kwgs):
self.driver.disconnect()
@self.console_command(
"print",
help=_("print balor info about generated job"),
input_type="balor",
output_type="balor",
)
def balor_print(command, channel, _, data=None, remainder=None, **kwgs):
for d in data:
print(d)
return "balor", data
@self.console_argument("filename", type=str, default="balor.png")
@self.console_command(
"png",
help=_("save image of balor write data"),
input_type="balor",
output_type="balor",
)
def balor_png(command, channel, _, data=None, filename="balor.png", **kwargs):
from PIL import Image, ImageDraw
data.scale_x = 1.0
data.scale_y = 1.0
data.size = "decagalvo"
im = Image.new("RGB", (0xFFF, 0xFFF), color=0)
data.plot(ImageDraw.Draw(im), 0xFFF)
im.save(filename, format="png")
return "balor", data
@self.console_command(
"debug",
help=_("debug balor job block"),
input_type="balor",
output_type="balor",
)
def balor_debug(command, channel, _, data=None, **kwargs):
c = CommandList()
for packet in data.packet_generator():
c.add_packet(packet)
for operation in c:
print(operation.text_debug(show_tracking=True))
return "balor", data
@self.console_argument("filename", type=str, default="balor.bin")
@self.console_command(
"save",
help=_("print balor info about generated job"),
input_type="balor",
output_type="balor",
)
def balor_save(
command, channel, _, data=None, filename="balor.bin", remainder=None, **kwgs
):
with open(filename, "wb") as f:
for d in data:
f.write(d)
channel("Saved file {filename} to disk.".format(filename=filename))
return "balor", data
@self.console_argument(
"repeats", help="Number of times to duplicate the job", default=1
)
@self.console_command(
"duplicate",
help=_("loop the selected job forever"),
input_type="balor",
output_type="balor",
)
def balor_dup(
command, channel, _, data=None, repeats=1, remainder=None, **kwgs
):
data.duplicate(1, None, repeats)
channel("Job duplicated")
return "balor", data
@self.console_command(
"loop",
help=_("loop the selected job forever"),
input_type="balor",
output_type="balor",
)
def balor_loop(command, channel, _, data=None, remainder=None, **kwgs):
self.driver.connect_if_needed()
channel("Looping job: {job}".format(job=str(data)))
self.spooler.set_idle(("light", data))
return "balor", data
@self.console_argument("x", type=float, default=0.0)
@self.console_argument("y", type=float, default=0.0)
@self.console_command(
"goto",
help=_("send laser a goto command"),
)
def balor_goto(command, channel, _, x=None, y=None, remainder=None, **kwgs):
if x is not None and y is not None:
rx = int(0x8000 + x) & 0xFFFF
ry = int(0x8000 + y) & 0xFFFF
self.driver.connect_if_needed()
self.driver.connection.set_xy(rx, ry)
@self.console_argument("off", type=str)
@self.console_command(
"red",
help=_("Turns redlight on/off"),
)
def balor_on(command, channel, _, off=None, remainder=None, **kwgs):
if off == "off":
self.driver.connect_if_needed()
reply = self.driver.connection.light_off()
self.driver.redlight_preferred = False
channel("Turning off redlight.")
else:
self.driver.connect_if_needed()
reply = self.driver.connection.light_on()
channel("Turning on redlight.")
self.driver.redlight_preferred = True
@self.console_command(
"status",
help=_("Sends status check"),
)
def balor_status(command, channel, _, remainder=None, **kwgs):
self.driver.connect_if_needed()
reply = self.driver.connection.read_port()
channel("Command replied: {reply}".format(reply=str(reply)))
for index, b in enumerate(reply):
channel(
"Bit {index}: {bits}".format(
index="{0:x}".format(index), bits="{0:b}".format(b)
)
)
@self.console_command(
"lstatus",
help=_("Checks the list status."),
)
def balor_status(command, channel, _, remainder=None, **kwgs):
self.driver.connect_if_needed()
reply = self.driver.connection.raw_get_list_status()
channel("Command replied: {reply}".format(reply=str(reply)))
for index, b in enumerate(reply):
channel(
"Bit {index}: {bits}".format(
index="{0:x}".format(index), bits="{0:b}".format(b)
)
)
@self.console_command(
"serial_number",
help=_("Checks the serial number."),
)
def balor_serial(command, channel, _, remainder=None, **kwgs):
self.driver.connect_if_needed()
reply = self.driver.connection.raw_get_serial_no()
channel("Command replied: {reply}".format(reply=str(reply)))
for index, b in enumerate(reply):
channel(
"Bit {index}: {bits}".format(
index="{0:x}".format(index), bits="{0:b}".format(b)
)
)
# @self.console_argument("filename", type=str, default=None)
# @self.console_command(
# "calibrate",
# help=_("set the calibration file"),
# )
# def set_calfile(command, channel, _, filename=None, remainder=None, **kwgs):
# if filename is None:
# calfile = self.calfile
# if calfile is None:
# channel("No calibration file set.")
# else:
# channel(
# "Calibration file is set to: {file}".format(file=self.calfile)
# )
# from os.path import exists
#
# if exists(calfile):
# channel("Calibration file exists!")
# cal = balor.Cal.Cal(calfile)
# if cal.enabled:
# channel("Calibration file successfully loads.")
# else:
# channel("Calibration file does not load.")
# else:
# channel("WARNING: Calibration file does not exist.")
# else:
# from os.path import exists
#
# if exists(filename):
# self.calfile = filename
# else:
# channel(
# "The file at {filename} does not exist.".format(
# filename=os.path.realpath(filename)
# )
# )
# channel("Calibration file was not set.")
@self.console_argument("filename", type=str, default=None)
@self.console_command(
"correction",
help=_("set the correction file"),
)
def set_corfile(command, channel, _, filename=None, remainder=None, **kwgs):
if filename is None:
file = self.corfile
if file is None:
channel("No correction file set.")
else:
channel(
"Correction file is set to: {file}".format(file=self.corfile)
)
from os.path import exists
if exists(file):
channel("Correction file exists!")
else:
channel("WARNING: Correction file does not exist.")
else:
from os.path import exists
if exists(filename):
self.corfile = filename
self.signal("corfile", filename)
else:
channel(
"The file at {filename} does not exist.".format(
filename=os.path.realpath(filename)
)
)
channel("Correction file was not set.")
@self.console_command(
"position",
help=_("give the position of the selection box in galvos"),
)
def galvo_pos(command, channel, _, data=None, args=tuple(), **kwargs):
"""
Draws an outline of the current shape.
"""
bounds = self.elements.selected_area()
if bounds is None:
channel(_("Nothing Selected"))
return
x0, y0 = self.scene_to_device_position(bounds[0], bounds[1])
x1, y1 = self.scene_to_device_position(bounds[2], bounds[3])
channel(
"Top Right: ({cx}, {cy}). Lower, Left: ({mx},{my})".format(
cx=x0, cy=y0, mx=x1, my=y1
)
)
@self.console_argument("lens_size", type=str, default=None)
@self.console_command(
"lens",
help=_("set the lens size"),
)
def galvo_lens(
command, channel, _, data=None, lens_size=None, args=tuple(), **kwargs
):
"""
Sets lens size.
"""
if lens_size is None:
raise SyntaxError
self.bedwidth = lens_size
self.bedheight = lens_size
channel(
"Set Bed Size : ({sx}, {sy}).".format(
sx=self.bedwidth, sy=self.bedheight
)
)
self.signal("bed_size")
@self.console_command(
"box",
help=_("outline the current selected elements"),
output_type="shapes",
)
def element_outline(command, channel, _, data=None, args=tuple(), **kwargs):
"""
Draws an outline of the current shape.
"""
bounds = self.elements.selected_area()
if bounds is None:
channel(_("Nothing Selected"))
return
xmin, ymin, xmax, ymax = bounds
channel("Element bounds: {bounds}".format(bounds=str(bounds)))
points = [
(xmin, ymin),
(xmax, ymin),
(xmax, ymax),
(xmin, ymax),
(xmin, ymin),
]
return "shapes", [Polygon(*points)]
@self.console_command(
"hull",
help=_("convex hull of the current selected elements"),
input_type=(None, "elements"),
output_type="shapes",
)
def element_outline(command, channel, _, data=None, args=tuple(), **kwargs):
"""
Draws an outline of the current shape.
"""
if data is None:
data = list(self.elements.elems(emphasized=True))
pts = []
for e in data:
if e.type == "elem image":
bounds = e.bounds
pts += [
(bounds[0], bounds[1]),
(bounds[0], bounds[3]),
(bounds[2], bounds[1]),
(bounds[2], bounds[3]),
]
else:
try:
path = abs(Path(e.shape))
except AttributeError:
try:
path = abs(e.path)
except AttributeError:
continue
pts += [q for q in path.as_points()]
hull = [p for p in Point.convex_hull(pts)]
if len(hull) == 0:
channel(_("No elements bounds to trace."))
return
hull.append(hull[0]) # loop
return "shapes", [Polygon(*hull)]
def ant_points(points, steps):
points = list(points)
movement = 1 + int(steps / 10)
forward_steps = steps + movement
pos = 0
size = len(points)
cycles = int(size / movement) + 1
for cycle in range(cycles):
for f in range(pos, pos + forward_steps, 1):
index = f % size
point = points[index]
yield point
pos += forward_steps
for f in range(pos, pos - steps, -1):
index = f % size
point = points[index]
yield point
pos -= steps
@self.console_option(
"quantization",
"q",
default=500,
type=int,
help="Number of segments to break each path into.",
)
@self.console_command(
"ants",
help=_("Marching ants of the given element path."),
input_type=(None, "elements"),
output_type="shapes",
)
def element_ants(command, channel, _, data=None, quantization=500, **kwargs):
"""
Draws an outline of the current shape.
"""
if data is None:
data = list(self.elements.elems(emphasized=True))
points_list = []
points = list()
for e in data:
try:
path = e.as_path()
except AttributeError:
continue
for i in range(0, quantization + 1):
x, y = path.point(i / float(quantization))
points.append((x, y))
points_list.append(list(ant_points(points, int(quantization / 10))))
return "shapes", [Polygon(*p) for p in points_list]
@self.console_command(
"viewport_update",
hidden=True,
help=_("Update balor flips for movement"),
)
def codes_update(**kwargs):
self.realize()
# @self.console_option(
# "raster-x-res",
# help="X resolution (in mm) of the laser.",
# default=0.15,
# type=float,
# )
# @self.console_option(
# "raster-y-res",
# help="X resolution (in mm) of the laser.",
# default=0.15,
# type=float,
# )
# @self.console_option(
# "x",
# "xoffs",
# help="Specify an x offset for the image (mm.)",
# default=0.0,
# type=float,
# )
# @self.console_option(
# "y",
# "yoffs",
# help="Specify an y offset for the image (mm.)",
# default=0.0,
# type=float,
# )
# @self.console_option(
# "d", "dither", help="Configure dithering", default=0.1, type=float
# )
# @self.console_option(
# "s",
# "scale",
# help="Pixels per mm (default 23.62 px/mm - 600 DPI)",
# default=23.622047,
# type=float,
# )
# @self.console_option(
# "t",
# "threshold",
# help="Greyscale threshold for burning (default 0.5, negative inverts)",
# default=0.5,
# type=float,
# )
# @self.console_option(
# "g",
# "grayscale",
# help="Greyscale rastering (power, speed, q_switch_frequency, passes)",
# default=False,
# type=bool,
# )
# @self.console_option(
# "grayscale-min",
# help="Minimum (black=1) value of the gray scale",
# default=None,
# type=float,
# )
# @self.console_option(
# "grayscale-max",
# help="Maximum (white=255) value of the gray scale",
# default=None,
# type=float,
# )
# @self.console_command("balor-raster", input_type="image", output_type="balor")
# def balor_raster(
# command,
# channel,
# _,
# data=None,
# raster_x_res=0.15,
# raster_y_res=0.15,
# xoffs=0.0,
# yoffs=0.0,
# dither=0.1,
# scale=23.622047,
# threshold=0.5,
# grayscale=False,
# grayscale_min=None,
# grayscale_max=None,
# **kwgs,
# ):
# # def raster_render(self, job, cal, in_file, out_file, args):
# if len(data) == 0:
# channel("No image selected.")
# return
# in_file = data[0].image
# width = in_file.size[0] / scale
# height = in_file.size[1] / scale
# x0, y0 = xoffs, yoffs
#
# invert = False
# if threshold < 0:
# invert = True
# threshold *= -1.0
# dither = 0
# passes = 1
# if grayscale:
# gsmin = grayscale_min
# gsmax = grayscale_max
# gsslope = (gsmax - gsmin) / 256.0
# cal = None
# if self.calibration_file is not None:
# try:
# cal = Cal(self.calibration_file)
# except TypeError:
# pass
# job = CommandList(cal=cal)
#
# img = scipy.interpolate.RectBivariateSpline(
# np.linspace(y0, y0 + height, in_file.size[1]),
# np.linspace(x0, x0 + width, in_file.size[0]),
# np.asarray(in_file),
# )
#
# dither = 0
# job.set_mark_settings(
# travel_speed=self.travel_speed,
# power=self.laser_power,
# frequency=self.q_switch_frequency,
# cut_speed=self.cut_speed,
# laser_on_delay=self.delay_laser_on,
# laser_off_delay=self.delay_laser_off,
# polygon_delay=self.delay_polygon,
# )
# y = y0
# count = 0
# burning = False
# old_y = y0
# while y < y0 + height:
# x = x0
# job.goto(x, y)
# old_x = x0
# while x < x0 + width:
# px = img(y, x)[0][0]
# if invert:
# px = 255.0 - px
#
# if grayscale:
# if px > 0:
# gsval = gsmin + gsslope * px
# if grayscale == "power":
# job.set_power(gsval)
# elif grayscale == "speed":
# job.set_cut_speed(gsval)
# elif grayscale == "q_switch_frequency":
# job.set_frequency(gsval)
# elif grayscale == "passes":
# passes = int(round(gsval))
# # Would probably be better to do this over the course of multiple
# # rasters for heat disappation during 2.5D engraving
# # pp = int(round((int(px)/255) * args.laser_power * 40.95))
# # job.change_settings(q_switch_period, pp, cut_speed)
#
# if not burning:
# job.laser_control(True) # laser turn on
# i = passes
# while i > 1:
# job.mark(x, y)
# job.mark(old_x, old_y)
# i -= 2
# job.mark(x, y)
# burning = True
#
# else:
# if burning:
# # laser turn off
# job.laser_control(False)
# job.goto(x, y)
# burning = False
# else:
#
# if px + dither > threshold:
# if not burning:
# job.laser_control(True) # laser turn on
# job.mark(x, y)
# burning = True
# dither = 0.0
# else:
# if burning:
# # laser turn off
# job.laser_control(False)
# job.goto(x, y)
# dither += abs(px + dither - threshold) * dither
# burning = False
# old_x = x
# x += raster_x_res
# if burning:
# # laser turn off
# job.laser_control(False)
# burning = False
#
# old_y = y
# y += raster_y_res
# count += 1
# if not (count % 20):
# print("\ty = %.3f" % y, file=sys.stderr)
#
# return "balor", job
# @self.console_option(
# "travel_speed", "t", type=float, help="Set the travel speed."
# )
# @self.console_option("power", "p", type=float, help="Set the power level")
# @self.console_option(
# "frequency", "q", type=float, help="Set the device's qswitch frequency"
# )
# @self.console_option(
# "cut_speed", "s", type=float, help="Set the cut speed of the device"
# )
# @self.console_option("power", "p", type=float, help="Set the power level")
# @self.console_option(
# "laser_on_delay", "n", type=float, help="Sets the device's laser on delay"
# )
# @self.console_option(
# "laser_off_delay", "f", type=float, help="Sets the device's laser off delay"
# )
# @self.console_option(
# "polygon_delay",
# "n",
# type=float,
# help="Sets the device's laser polygon delay",
# )
# @self.console_option(
# "angle", "a", type=Angle.parse, default=0, help=_("Angle of the fill")
# )
# @self.console_option(
# "distance", "d", type=str, default="1mm", help=_("distance between rungs")
# )
# @self.console_command(
# "hatch",
# help=_("hatch <angle> <distance>"),
# output_type="balor",
# )
# def hatch(
# command,
# channel,
# _,
# angle=None,
# distance=None,
# travel_speed=None,
# power=None,
# frequency=None,
# cut_speed=None,
# laser_on_delay=None,
# laser_off_delay=None,
# polygon_delay=None,
# **kwargs,
# ):
# from meerk40t.balor.Cal import Cal
#
# cal = None
# if self.calibration_file is not None:
# try:
# cal = Cal(self.calibration_file)
# except TypeError:
# pass
# job = CommandList(cal=cal)
# job.set_mark_settings(
# travel_speed=self.travel_speed
# if travel_speed is None
# else travel_speed,
# power=self.laser_power if power is None else power,
# frequency=self.q_switch_frequency if frequency is None else frequency,
# cut_speed=self.cut_speed if cut_speed is None else cut_speed,
# laser_on_delay=self.delay_laser_on
# if laser_on_delay is None
# else laser_on_delay,
# laser_off_delay=self.delay_laser_off
# if laser_off_delay is None
# else laser_off_delay,
# polygon_delay=self.delay_polygon
# if polygon_delay is None
# else polygon_delay,
# )
# job.light_on()
# elements = self.elements
# channel(_("Hatch Filling"))
# if distance is not None:
# distance = "1mm"
# distance = float(Length(distance))
# transformed_vector = self._matrix.transform_vector([0, distance])
# distance = abs(complex(transformed_vector[0], transformed_vector[1]))
#
# efill = EulerianFill(distance)
# for element in elements.elems(emphasized=True):
# if not isinstance(element, Shape):
# continue
# e = abs(Path(element))
# e *= self._matrix
# if angle is not None:
# e *= Matrix.rotate(angle)
#
# pts = [abs(e).point(i / 100.0, error=1e-4) for i in range(101)]
# efill += pts
#
# points = efill.get_fill()
#
# def split(points):
# pos = 0
# for i, pts in enumerate(points):
# if pts is None:
# yield points[pos : i - 1]
# pos = i + 1
# if pos != len(points):
# yield points[pos : len(points)]
#
# for s in split(points):
# for p in s:
# if p.value == "RUNG":
# job.mark(p.x, p.y)
# if p.value == "EDGE":
# job.goto(p.x, p.y)
# return "balor", job
@property
def current(self):
"""
@return: the location in nm for the current known x value.
"""
# return float(self.driver.native_x / self.width) * 0xFFF
return self.device_to_scene_position(
self.driver.native_x,
self.driver.native_y,
)
@property
def calibration_file(self):
return None
```
#### File: meerk40t/balormk/plugin.py
```python
def plugin(kernel, lifecycle):
if lifecycle == "plugins":
from meerk40t.balormk.gui import gui
return [gui.plugin]
if lifecycle == "invalidate":
try:
import numpy
except ImportError:
return True
if lifecycle == "register":
from meerk40t.balormk.main import BalorDevice
kernel.register("provider/device/balor", BalorDevice)
elif lifecycle == "preboot":
suffix = "balor"
for d in kernel.settings.derivable(suffix):
kernel.root(
"service device start -p {path} {suffix}\n".format(
path=d, suffix=suffix
)
)
```
#### File: meerk40t/core/elements.py
```python
import functools
import os.path
import re
from datetime import datetime
from copy import copy
from math import cos, gcd, isinf, pi, sin, sqrt, tau
from os.path import realpath
from random import randint, shuffle
from numpy import linspace
from meerk40t.core.exceptions import BadFileError
from meerk40t.kernel import CommandSyntaxError, Service, Settings
from ..svgelements import Angle, Color, Matrix, SVGElement, Viewbox, SVG_RULE_EVENODD, SVG_RULE_NONZERO
from .cutcode import CutCode
from .element_types import *
from .node.elem_image import ImageNode
from .node.node import Node, Linecap, Linejoin, Fillrule
from .node.op_console import ConsoleOperation
from .node.op_cut import CutOpNode
from .node.op_dots import DotsOpNode
from .node.op_engrave import EngraveOpNode
from .node.op_hatch import HatchOpNode
from .node.op_image import ImageOpNode
from .node.op_raster import RasterOpNode
from .node.rootnode import RootNode
from .wordlist import Wordlist
from .units import UNITS_PER_PIXEL, Length
def plugin(kernel, lifecycle=None):
_ = kernel.translation
if lifecycle == "register":
kernel.add_service("elements", Elemental(kernel))
# kernel.add_service("elements", Elemental(kernel,1))
elif lifecycle == "postboot":
elements = kernel.elements
choices = [
{
"attr": "operation_default_empty",
"object": elements,
"default": True,
"type": bool,
"label": _("Default Operation Empty"),
"tip": _(
"Leave empty operations or default Other/Red/Blue"
),
},
{
"attr": "classify_reverse",
"object": elements,
"default": False,
"type": bool,
"label": _("Classify Reversed"),
"tip": _(
"Classify elements into operations in reverse order e.g. to match Inkscape's Object List"
),
},
{
"attr": "legacy_classification",
"object": elements,
"default": False,
"type": bool,
"label": _("Legacy Classify"),
"tip": _(
"Use the legacy classification algorithm rather than the modern classification algorithm."
),
},
]
kernel.register_choices("preferences", choices)
elif lifecycle == "prestart":
if hasattr(kernel.args, "input") and kernel.args.input is not None:
# Load any input file
elements = kernel.elements
try:
elements.load(realpath(kernel.args.input.name))
except BadFileError as e:
kernel._console_channel(_("File is Malformed") + ": " + str(e))
elif lifecycle == "poststart":
if hasattr(kernel.args, "output") and kernel.args.output is not None:
# output the file you have at this point.
elements = kernel.elements
elements.save(realpath(kernel.args.output.name))
def reversed_enumerate(collection: list):
for i in range(len(collection) - 1, -1, -1):
yield i, collection[i]
OP_PRIORITIES = ["op dots", "op image", "op raster", "op engrave", "op cut", "op hatch"]
# def is_dot(element):
# if not isinstance(element, Shape):
# return False
# if isinstance(element, Path):
# path = element
# else:
# path = element.segments()
#
# if len(path) == 2 and isinstance(path[0], Move):
# if isinstance(path[1], Close):
# return True
# if isinstance(path[1], Line) and path[1].length() == 0:
# return True
# return False
# def is_straight_line(element):
# if not isinstance(element, Shape):
# return False
# if isinstance(element, Path):
# path = element
# else:
# path = element.segments()
#
# if len(path) == 2 and isinstance(path[0], Move):
# if isinstance(path[1], Line) and path[1].length() > 0:
# return True
# return False
class Elemental(Service):
"""
The elemental service is governs all the interactions with the various elements,
operations, and filenodes. Handling structure change and selection, emphasis, and
highlighting changes. The goal of this module is to make sure that the life cycle
of the elements is strictly enforced. For example, every element that is removed
must have had the .cache deleted. And anything selecting an element must propagate
that information out to inform other interested modules.
"""
def __init__(self, kernel, index=None, *args, **kwargs):
Service.__init__(
self, kernel, "elements" if index is None else "elements%d" % index
)
self._clipboard = {}
self._clipboard_default = "0"
self.note = None
self._emphasized_bounds = None
self._emphasized_bounds_dirty = True
self._tree = RootNode(self)
self.setting(bool, "classify_reverse", False)
self.setting(bool, "legacy_classification", False)
self.setting(bool, "auto_note", True)
self.setting(bool, "uniform_svg", False)
self.setting(float, "svg_ppi", 96.0)
self.setting(bool, "operation_default_empty", True)
self.op_data = Settings(self.kernel.name, "operations.cfg")
self.pen_data = Settings(self.kernel.name, "penbox.cfg")
self.penbox = {}
self.load_persistent_penbox()
self.wordlists = {"version": [1, self.kernel.version]}
self._init_commands(kernel)
self._init_tree(kernel)
direct = os.path.dirname(self.op_data._config_file)
self.mywordlist = Wordlist(self.kernel.version, direct)
self.load_persistent_operations("previous")
ops = list(self.ops())
if not len(ops) and not self.operation_default_empty:
self.load_default()
def load_persistent_penbox(self):
settings = self.pen_data
pens = settings.read_persistent_string_dict("pens", suffix=True)
for pen in pens:
length = int(pens[pen])
box = list()
for i in range(length):
penbox = dict()
settings.read_persistent_string_dict(f'{pen} {i}', penbox, suffix=True)
box.append(penbox)
self.penbox[pen] = box
def save_persistent_penbox(self):
sections = {}
for section in self.penbox:
sections[section] = len(self.penbox[section])
self.pen_data.write_persistent_dict("pens", sections)
for section in self.penbox:
for i, p in enumerate(self.penbox[section]):
self.pen_data.write_persistent_dict(f'{section} {i}', p)
def wordlist_fetch(self, key):
try:
wordlist = self.wordlists[key]
except KeyError:
return None
try:
wordlist[0] += 1
return wordlist[wordlist[0]]
except IndexError:
wordlist[0] = 1
return wordlist[wordlist[0]]
def index_range(self, index_string):
"""
Parses index ranges in the form <idx>,<idx>-<idx>,<idx>
@param index_string:
@return:
"""
indexes = list()
for s in index_string.split(","):
q = list(s.split("-"))
if len(q) == 1:
indexes.append(int(q[0]))
else:
start = int(q[0])
end = int(q[1])
if start > end:
for q in range(end, start + 1):
indexes.append(q)
else:
for q in range(start, end + 1):
indexes.append(q)
return indexes
def length(self, v):
return float(Length(v))
def length_x(self, v):
return float(Length(v, relative_length=self.device.width))
def length_y(self, v):
return float(Length(v, relative_length=self.device.height))
def area(self, v):
llx = Length(v, relative_length=self.device.width)
lx = float(llx)
if "%" in v:
lly = Length(v, relative_length=self.device.height)
else:
lly = Length("1{unit}".format(unit=llx._preferred_units))
ly = float(lly)
return lx * ly
def _init_commands(self, kernel):
_ = kernel.translation
@self.console_argument("filename")
@self.console_command(
"load",
help=_("loads file from working directory"),
input_type=None,
output_type="file",
)
def load(channel, _, filename=None, **kwargs):
import os
if filename is None:
channel(_("No file specified."))
return
new_file = os.path.join(self.kernel.current_directory, filename)
if not os.path.exists(new_file):
channel(_("No such file."))
return
try:
result = self.load(new_file)
if result:
channel(_("loading..."))
except AttributeError:
raise CommandSyntaxError(_("Loading files was not defined"))
return "file", new_file
# ==========
# WORDLISTS COMMANDS
# ==========
@self.console_command(
"wordlist",
help=_("Wordlist base operation"),
output_type="wordlist",
)
def wordlist(command, channel, _, remainder = None, **kwargs):
return "wordlist", ""
@self.console_argument("key", help=_("Wordlist value"))
@self.console_argument("value", help=_("Content"))
@self.console_command(
"add",
help=_("add value to wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_add(
command, channel, _, key=None, value=None, **kwargs
):
if key is not None:
if value is None:
value = ""
self.mywordlist.add(key, value)
return "wordlist", key
@self.console_argument("key", help=_("Wordlist value"))
@self.console_argument("value", help=_("Content"))
@self.console_command(
"addcounter",
help=_("add numeric counter to wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_addcounter(
command, channel, _, key=None, value=None, **kwargs
):
if key is not None:
if value is None:
value = 1
else:
try:
value = int(value)
except ValueError:
value = 1
self.mywordlist.add(key, value, 2)
return "wordlist", key
@self.console_argument("key", help=_("Wordlist value"))
@self.console_argument("index", help=_("index to use"))
@self.console_command(
"get",
help=_("get current value from wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_get(
command, channel, _, key=None, index=None, **kwargs
):
if key is not None:
result = self.mywordlist.fetch_value(skey=key, idx=index)
channel(str(result))
else:
channel(_("Missing key"))
result = ""
return "wordlist", result
@self.console_argument("key", help=_("Wordlist value"))
@self.console_argument("value", help=_("Wordlist value"))
@self.console_argument("index", help=_("index to use"))
@self.console_command(
"set",
help=_("set value to wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_set(
command, channel, _, key=None, value=None, index=None, **kwargs
):
if key is not None and value is not None:
self.mywordlist.set_value(skey=key, value=value, idx=index)
else:
channel(_("Not enough parameters given"))
return "wordlist", key
@self.console_argument("key", help=_("Individual wordlist value (use @ALL for all)"))
@self.console_argument("index", help=_("index to use"))
@self.console_command(
"index",
help=_("sets index in wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_index(
command, channel, _, key=None, index=None, **kwargs
):
if key is not None and index is not None:
try:
index = int(index)
except ValueError:
index = 0
self.mywordlist.set_index(skey=key,idx=index)
return "wordlist", key
@self.console_argument("filename", help=_("Wordlist file (if empty use mk40-default)"))
@self.console_command(
"restore",
help=_("Loads a previously saved wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_restore(
command, channel, _, filename=None, remainder=None, **kwargs
):
new_file = filename
if not filename is None:
new_file = os.path.join(self.kernel.current_directory, filename)
if not os.path.exists(new_file):
channel(_("No such file."))
return
self.mywordlist.load_data(new_file)
return "wordlist", ""
@self.console_argument("filename", help=_("Wordlist file (if empty use mk40-default)"))
@self.console_command(
"backup",
help=_("Saves the current wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_backup(
command, channel, _, filename=None, remainder=None, **kwargs
):
new_file = filename
if not filename is None:
new_file = os.path.join(self.kernel.current_directory, filename)
self.mywordlist.save_data(new_file)
return "wordlist", ""
@self.console_argument("key", help=_("Wordlist value"))
@self.console_command(
"list",
help=_("list wordlist values"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_list(
command, channel, _, key=None, **kwargs
):
channel("----------")
if key is None:
for skey in self.mywordlist.content:
channel(str(skey))
else:
if key in self.mywordlist.content:
wordlist = self.mywordlist.content[key]
channel(_("Wordlist %s (Type=%d, Index=%d)):") % (key, wordlist[0], wordlist[1]-2))
for idx, value in enumerate(wordlist[2:]):
channel("#%d: %s" % (idx, str(value)))
else:
channel(_("There is no such pattern %s") % key )
channel("----------")
return "wordlist", key
@self.console_argument("filename", help=_("CSV file"))
@self.console_command(
"load",
help=_("Attach a csv-file to the wordlist"),
input_type="wordlist",
output_type="wordlist",
)
def wordlist_load(
command, channel, _, filename=None, **kwargs
):
if filename is None:
channel(_("No file specified."))
return
new_file = os.path.join(self.kernel.current_directory, filename)
if not os.path.exists(new_file):
channel(_("No such file."))
return
rows, columns, names = self.mywordlist.load_csv_file(new_file)
channel (_("Rows added: %d") % rows)
channel (_("Values added: %d") % columns)
for name in names:
channel (" " + name)
return "wordlist", names
# ==========
# PENBOX COMMANDS
# ==========
@self.console_argument("key", help=_("Penbox key"))
@self.console_command(
"penbox",
help=_("Penbox base operation"),
input_type=None,
output_type="penbox",
)
def penbox(command, channel, _, key=None, remainder=None, **kwargs):
if remainder is None or key is None:
channel("----------")
if key is None:
for key in self.penbox:
channel(str(key))
else:
try:
for i, value in enumerate(self.penbox[key]):
channel(f"{i}: {str(value)}")
except KeyError:
channel(_("penbox does not exist"))
channel("----------")
return "penbox", key
@self.console_argument("count", help=_("Penbox count"), type=int)
@self.console_command(
"add",
help=_("add pens to the chosen penbox"),
input_type="penbox",
output_type="penbox",
)
def penbox_add(
command, channel, _, count=None, data=None, remainder=None, **kwargs
):
if count is None:
raise CommandSyntaxError
current = self.penbox.get(data)
if current is None:
current = list()
self.penbox[data] = current
current.extend([dict() for _ in range(count)])
return "penbox", data
@self.console_argument("count", help=_("Penbox count"), type=int)
@self.console_command(
"del",
help=_("delete pens to the chosen penbox"),
input_type="penbox",
output_type="penbox",
)
def penbox_del(
command, channel, _, count=None, data=None, remainder=None, **kwargs
):
if count is None:
raise CommandSyntaxError
current = self.penbox.get(data)
if current is None:
current = list()
self.penbox[data] = current
for _ in range(count):
try:
del current[-1]
except IndexError:
break
return "penbox", data
@self.console_argument("index", help=_("Penbox index"), type=self.index_range)
@self.console_argument("key", help=_("Penbox key"), type=str)
@self.console_argument("value", help=_("Penbox key"), type=str)
@self.console_command(
"set",
help=_("set value in penbox"),
input_type="penbox",
output_type="penbox",
)
def penbox_set(
command,
channel,
_,
index=None,
key=None,
value=None,
data=None,
remainder=None,
**kwargs,
):
if not value:
raise CommandSyntaxError
current = self.penbox.get(data)
if current is None:
current = list()
self.penbox[data] = current
rex = re.compile(r"([+-]?[0-9]+)(?:[,-]([+-]?[0-9]+))?")
m = rex.match(value)
if not m:
raise CommandSyntaxError
value = float(m.group(1))
end = m.group(2)
if end:
end = float(end)
if not end:
for i in index:
try:
current[i][key] = value
except IndexError:
pass
else:
r = len(index)
try:
s = (end - value) / (r - 1)
except ZeroDivisionError:
s = 0
d = 0
for i in index:
try:
current[i][key] = value + d
except IndexError:
pass
d += s
return "penbox", data
# ==========
# MATERIALS COMMANDS
# ==========
@self.console_command(
"material",
help=_("material base operation"),
input_type=(None, "ops"),
output_type="materials",
)
def materials(command, channel, _, data=None, remainder=None, **kwargs):
if data is None:
data = list(self.ops(emphasized=True))
if remainder is None:
channel("----------")
channel(_("Materials:"))
for section in self.op_data.section_set():
channel(section)
channel("----------")
return "materials", data
@self.console_argument("name", help=_("Name to save the materials under"))
@self.console_command(
"save",
help=_("Save current materials to persistent settings"),
input_type="materials",
output_type="materials",
)
def save_materials(command, channel, _, data=None, name=None, **kwargs):
if name is None:
raise CommandSyntaxError
self.save_persistent_operations(name)
return "materials", data
@self.console_argument("name", help=_("Name to load the materials from"))
@self.console_command(
"load",
help=_("Load materials from persistent settings"),
input_type="materials",
output_type="ops",
)
def load_materials(name=None, **kwargs):
if name is None:
raise CommandSyntaxError
self.load_persistent_operations(name)
return "ops", list(self.ops())
@self.console_argument("name", help=_("Name to delete the materials from"))
@self.console_command(
"delete",
help=_("Delete materials from persistent settings"),
input_type="materials",
output_type="materials",
)
def load_materials(name=None, **kwargs):
if name is None:
raise CommandSyntaxError
self.clear_persistent_operations(name)
return "materials", list(self.ops())
@self.console_argument("name", help=_("Name to display the materials from"))
@self.console_command(
"list",
help=_("Show information about materials"),
input_type="materials",
output_type="materials",
)
def materials_list(channel, _, data=None, name=None, **kwargs):
channel("----------")
channel(_("Materials Current:"))
for section in self.op_data.section_set():
for subsect in self.op_data.derivable(section):
label = self.op_data.read_persistent(str, subsect, "label", "-")
channel(
"{subsection}: {label}".format(
section=section, subsection=subsect, label=label
)
)
channel("----------")
# ==========
# PENBOX OPERATION COMMANDS
# ==========
@self.console_argument("key", help=_("Penbox key"))
@self.console_command(
"penbox_pass",
help=_("Set the penbox_pass for the given operation"),
input_type="ops",
output_type="ops",
)
def penbox_pass(command, channel, _, key=None, remainder=None, data=None, **kwargs):
if data is not None:
if key is not None:
for op in data:
try:
op.settings["penbox_pass"] = key
channel(f"{str(op)} penbox_pass changed to {key}.")
except AttributeError:
pass
else:
if key is None:
channel("----------")
for op in data:
try:
key = op.settings.get("penbox_pass")
if key is None:
channel(f"{str(op)} penbox_pass is not set.")
else:
channel(f"{str(op)} penbox_pass is set to {key}.")
except AttributeError:
pass # No op.settings.
channel("----------")
return "ops", data
@self.console_argument("key", help=_("Penbox key"))
@self.console_command(
"penbox_value",
help=_("Set the penbox_value for the given operation"),
input_type="ops",
output_type="ops",
)
def penbox_value(command, channel, _, key=None, remainder=None, data=None, **kwargs):
if data is not None:
if key is not None:
for op in data:
try:
op.settings["penbox_value"] = key
channel(f"{str(op)} penbox_value changed to {key}.")
except AttributeError:
pass
else:
if key is None:
channel("----------")
for op in data:
try:
key = op.settings.get("penbox_value")
if key is None:
channel(f"{str(op)} penbox_value is not set.")
else:
channel(f"{str(op)} penbox_value is set to {key}.")
except AttributeError:
pass # No op.settings.
channel("----------")
return "ops", data
# ==========
# OPERATION BASE
# ==========
@self.console_command("operations", help=_("Show information about operations"))
def element(**kwargs):
self(".operation* list\n")
@self.console_command(
"operation.*", help=_("operation.*: selected operations"), output_type="ops"
)
def operation(**kwargs):
return "ops", list(self.ops(emphasized=True))
@self.console_command(
"operation*", help=_("operation*: all operations"), output_type="ops"
)
def operation(**kwargs):
return "ops", list(self.ops())
@self.console_command(
"operation~",
help=_("operation~: non selected operations."),
output_type="ops",
)
def operation(**kwargs):
return "ops", list(self.ops(emphasized=False))
@self.console_command(
"operation", help=_("operation: selected operations."), output_type="ops"
)
def operation(**kwargs):
return "ops", list(self.ops(emphasized=True))
@self.console_command(
r"operation([0-9]+,?)+",
help=_("operation0,2: operation #0 and #2"),
regex=True,
output_type="ops",
)
def operation(command, channel, _, **kwargs):
arg = command[9:]
op_values = []
for value in arg.split(","):
try:
value = int(value)
except ValueError:
continue
try:
op = self.get_op(value)
op_values.append(op)
except IndexError:
channel(_("index %d out of range") % value)
return "ops", op_values
@self.console_command(
"select",
help=_("Set these values as the selection."),
input_type="ops",
output_type="ops",
)
def operation_select(data=None, **kwargs):
self.set_emphasis(data)
return "ops", data
@self.console_command(
"select+",
help=_("Add the input to the selection"),
input_type="ops",
output_type="ops",
)
def operation_select_plus(data=None, **kwargs):
ops = list(self.ops(emphasized=True))
ops.extend(data)
self.set_emphasis(ops)
return "ops", ops
@self.console_command(
"select-",
help=_("Remove the input data from the selection"),
input_type="ops",
output_type="ops",
)
def operation_select_minus(data=None, **kwargs):
ops = list(self.ops(emphasized=True))
for e in data:
try:
ops.remove(e)
except ValueError:
pass
self.set_emphasis(ops)
return "ops", ops
@self.console_command(
"select^",
help=_("Toggle the input data in the selection"),
input_type="ops",
output_type="ops",
)
def operation_select_xor(data=None, **kwargs):
ops = list(self.ops(emphasized=True))
for e in data:
try:
ops.remove(e)
except ValueError:
ops.append(e)
self.set_emphasis(ops)
return "ops", ops
@self.console_argument("start", type=int, help=_("operation start"))
@self.console_argument("end", type=int, help=_("operation end"))
@self.console_argument("step", type=int, help=_("operation step"))
@self.console_command(
"range",
help=_("Subset existing selection by begin and end indices and step"),
input_type="ops",
output_type="ops",
)
def operation_select_range(data=None, start=None, end=None, step=1, **kwargs):
subops = list()
for e in range(start, end, step):
try:
subops.append(data[e])
except IndexError:
pass
self.set_emphasis(subops)
return "ops", subops
@self.console_argument("filter", type=str, help=_("Filter to apply"))
@self.console_command(
"filter",
help=_("Filter data by given value"),
input_type="ops",
output_type="ops",
)
def operation_filter(channel=None, data=None, filter=None, **kwargs):
"""
Apply a filter string to a filter particular operations from the current data.
Operations are evaluated in an infix prioritized stack format without spaces.
Qualified values are speed, power, step, acceleration, passes, color, op, overscan, len
Valid operators are >, >=, <, <=, =, ==, +, -, *, /, &, &&, |, and ||
eg. filter speed>=10, filter speed=5+5, filter speed>power/10, filter speed==2*4+2
eg. filter engrave=op&speed=35|cut=op&speed=10
eg. filter len=0
"""
subops = list()
_filter_parse = [
("SKIP", r"[ ,\t\n\x09\x0A\x0C\x0D]+"),
("OP20", r"(\*|/)"),
("OP15", r"(\+|-)"),
("OP11", r"(<=|>=|==|!=)"),
("OP10", r"(<|>|=)"),
("OP5", r"(&&)"),
("OP4", r"(&)"),
("OP3", r"(\|\|)"),
("OP2", r"(\|)"),
("NUM", r"([-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?)"),
(
"COLOR",
r"(#[0123456789abcdefABCDEF]{6}|#[0123456789abcdefABCDEF]{3})",
),
(
"TYPE",
r"(raster|image|cut|engrave|dots|unknown|command|cutcode|lasercode)",
),
(
"VAL",
r"(speed|power|step|acceleration|passes|color|op|overscan|len)",
),
]
filter_re = re.compile(
"|".join("(?P<%s>%s)" % pair for pair in _filter_parse)
)
operator = list()
operand = list()
def filter_parser(text: str):
pos = 0
limit = len(text)
while pos < limit:
match = filter_re.match(text, pos)
if match is None:
break # No more matches.
kind = match.lastgroup
start = pos
pos = match.end()
if kind == "SKIP":
continue
value = match.group()
yield kind, value, start, pos
def solve_to(order: int):
try:
while len(operator) and operator[0][0] >= order:
_p, op = operator.pop()
v2 = operand.pop()
v1 = operand.pop()
try:
if op == "==" or op == "=":
operand.append(v1 == v2)
elif op == "!=":
operand.append(v1 != v2)
elif op == ">":
operand.append(v1 > v2)
elif op == "<":
operand.append(v1 < v2)
elif op == "<=":
operand.append(v1 <= v2)
elif op == ">=":
operand.append(v1 >= v2)
elif op == "&&" or op == "&":
operand.append(v1 and v2)
elif op == "||" or op == "|":
operand.append(v1 or v2)
elif op == "*":
operand.append(v1 * v2)
elif op == "/":
operand.append(v1 / v2)
elif op == "+":
operand.append(v1 + v2)
elif op == "-":
operand.append(v1 - v2)
except TypeError:
raise CommandSyntaxError("Cannot evaluate expression")
except ZeroDivisionError:
operand.append(float("inf"))
except IndexError:
pass
for e in data:
for kind, value, start, pos in filter_parser(filter):
if kind == "COLOR":
operand.append(Color(value))
elif kind == "VAL":
if value == "dpi":
operand.append(e.dpi)
elif value == "color":
operand.append(e.color)
elif value == "op":
operand.append(e.type.remove("op").strip())
elif value == "len":
operand.append(len(e.children))
else:
operand.append(e.settings.get(value))
elif kind == "NUM":
operand.append(float(value))
elif kind == "TYPE":
operand.append(value)
elif kind.startswith("OP"):
prec = int(kind[2:])
solve_to(prec)
operator.append((prec, value))
solve_to(0)
if len(operand) == 1:
if operand.pop():
subops.append(e)
else:
raise CommandSyntaxError(_("Filter parse failed"))
self.set_emphasis(subops)
return "ops", subops
@self.console_command(
"list",
help=_("Show information about the chained data"),
input_type="ops",
output_type="ops",
)
def operation_list(channel, _, data=None, **kwargs):
channel("----------")
channel(_("Operations:"))
index_ops = list(self.ops())
for op_obj in data:
i = index_ops.index(op_obj)
select_piece = "*" if op_obj.emphasized else " "
name = "%s %d: %s" % (select_piece, i, str(op_obj))
channel(name)
if isinstance(op_obj, list):
for q, oe in enumerate(op_obj):
stroke_piece = (
"None"
if (not hasattr(oe, "stroke") or oe.stroke) is None
else oe.stroke.hex
)
fill_piece = (
"None"
if (not hasattr(oe, "stroke") or oe.fill) is None
else oe.fill.hex
)
ident_piece = str(oe.id)
name = "%s%d: %s-%s s:%s f:%s" % (
"".ljust(5),
q,
str(type(oe).__name__),
ident_piece,
stroke_piece,
fill_piece,
)
channel(name)
channel("----------")
@self.console_option("color", "c", type=Color)
@self.console_option("default", "D", type=bool)
@self.console_option("speed", "s", type=float)
@self.console_option("power", "p", type=float)
@self.console_option("dpi", "d", type=int)
@self.console_option("overscan", "o", type=self.length)
@self.console_option("passes", "x", type=int)
@self.console_option(
"parallel",
"P",
type=bool,
help=_("Creates a new operation for each element given"),
action="store_true",
)
@self.console_option(
"stroke",
"K",
type=bool,
action="store_true",
help=_(
"Set the operation color based on the stroke if the first stroked item added to this operation"
),
)
@self.console_option(
"fill",
"F",
type=bool,
action="store_true",
help=_(
"Set the operation color based on the fill if the first filled item added to this operation"
),
)
@self.console_command(
("cut", "engrave", "raster", "imageop", "dots", "hatch"),
help=_(
"<cut/engrave/raster/imageop/dots/hatch> - group the elements into this operation"
),
input_type=(None, "elements"),
output_type="ops",
)
def makeop(
command,
data=None,
color=None,
default=None,
speed=None,
power=None,
dpi=None,
overscan=None,
passes=None,
parallel=False,
stroke=False,
fill=False,
**kwargs,
):
op_list = []
def make_op():
if command == "cut":
return CutOpNode()
elif command == "engrave":
return EngraveOpNode()
elif command == "raster":
return RasterOpNode()
elif command == "imageop":
return ImageOpNode()
elif command == "dots":
return DotsOpNode()
elif command == "hatch":
return HatchOpNode()
else:
raise ValueError
if parallel:
if data is None:
return "op", []
for item in data:
op = make_op()
if color is not None:
op.color = color
elif fill:
try:
op.color = item.fill
except AttributeError:
continue
elif stroke:
try:
op.color = item.stroke
except AttributeError:
continue
if default is not None:
op.default = default
if speed is not None:
op.speed = speed
if power is not None:
op.power = power
if passes is not None:
op.passes_custom = True
op.passes = passes
if dpi is not None:
op.dpi = dpi
if overscan is not None:
op.overscan = overscan
self.add_op(op)
op.add_reference(item)
op_list.append(op)
else:
op = make_op()
if color is not None:
op.color = color
elif fill:
try:
op.color = data[0].fill
except (AttributeError, IndexError):
pass
elif stroke:
try:
op.color = data[0].stroke
except (AttributeError, IndexError):
pass
if default is not None:
op.default = default
if speed is not None:
op.speed = speed
if power is not None:
op.power = power
if passes is not None:
op.passes_custom = True
op.passes = passes
if dpi is not None:
op.dpi = dpi
if overscan is not None:
op.overscan = overscan
self.add_op(op)
if data is not None:
for item in data:
op.add_reference(item)
op_list.append(op)
return "ops", op_list
@self.console_argument("dpi", type=int, help=_("raster dpi"))
@self.console_command("dpi", help=_("dpi <raster-dpi>"), input_type="ops")
def op_dpi(command, channel, _, data, dpi=None, **kwrgs):
if dpi is None:
found = False
for op in data:
if op.type in ("op raster", "op image"):
dpi = op.dpi
channel(_("Step for %s is currently: %d") % (str(op), dpi))
found = True
if not found:
channel(_("No raster operations selected."))
return
for op in data:
if op.type in ("op raster", "op image"):
op.dpi = dpi
op.notify_update()
return "ops", data
@self.console_option(
"difference",
"d",
type=bool,
action="store_true",
help=_("Change speed by this amount."),
)
@self.console_option(
"progress",
"p",
type=bool,
action="store_true",
help=_("Change speed for each item in order"),
)
@self.console_argument("speed", type=str, help=_("operation speed in mm/s"))
@self.console_command(
"speed", help=_("speed <speed>"), input_type="ops", output_type="ops"
)
def op_speed(
command,
channel,
_,
speed=None,
difference=False,
progress=False,
data=None,
**kwrgs,
):
if speed is None:
for op in data:
old = op.speed
channel(_("Speed for '%s' is currently: %f") % (str(op), old))
return
if speed.endswith("%"):
speed = speed[:-1]
percent = True
else:
percent = False
try:
new_speed = float(speed)
except ValueError:
channel(_("Not a valid speed or percent."))
return
delta = 0
for op in data:
old = op.speed
if percent and difference:
s = old + old * (new_speed / 100.0)
elif difference:
s = old + new_speed
elif percent:
s = old * (new_speed / 100.0)
elif progress:
s = old + delta
delta += new_speed
else:
s = new_speed
if s < 0:
s = 0
op.speed = s
channel(_("Speed for '%s' updated %f -> %f") % (str(op), old, s))
op.notify_update()
return "ops", data
@self.console_argument(
"power", type=int, help=_("power in pulses per inch (ppi, 1000=max)")
)
@self.console_option(
"difference",
"d",
type=bool,
action="store_true",
help=_("Change power by this amount."),
)
@self.console_option(
"progress",
"p",
type=bool,
action="store_true",
help=_("Change power for each item in order"),
)
@self.console_command(
"power", help=_("power <ppi>"), input_type="ops", output_type="ops"
)
def op_power(
command,
channel,
_,
power=None,
difference=False,
progress=False,
data=None,
**kwrgs,
):
if power is None:
for op in data:
old = op.power
channel(_("Power for '%s' is currently: %d") % (str(op), old))
return
delta = 0
for op in data:
old = op.power
if progress:
s = old + delta
delta += power
elif difference:
s = old + power
else:
s = power
if s > 1000:
s = 1000
if s < 0:
s = 0
op.power = s
channel(_("Power for '%s' updated %d -> %d") % (str(op), old, s))
op.notify_update()
return "ops", data
@self.console_argument(
"frequency", type=float, help=_("frequency set for operation")
)
@self.console_option(
"difference",
"d",
type=bool,
action="store_true",
help=_("Change speed by this amount."),
)
@self.console_option(
"progress",
"p",
type=bool,
action="store_true",
help=_("Change speed for each item in order"),
)
@self.console_command(
"frequency", help=_("frequency <kHz>"), input_type="ops", output_type="ops"
)
def op_frequency(
command,
channel,
_,
frequency=None,
difference=False,
progress=False,
data=None,
**kwrgs,
):
if frequency is None:
for op in data:
old = op.frequency
channel(_("Frequency for '%s' is currently: %f") % (str(op), old))
return
delta = 0
for op in data:
old = op.frequency
if progress:
s = old + delta
delta += frequency
elif difference:
s = old + frequency
else:
s = frequency
if s < 0:
s = 0
op.frequency = s
channel(_("Frequency for '%s' updated %f -> %f") % (str(op), old, s))
op.notify_update()
return "ops", data
@self.console_argument("passes", type=int, help=_("Set operation passes"))
@self.console_command(
"passes", help=_("passes <passes>"), input_type="ops", output_type="ops"
)
def op_passes(command, channel, _, passes=None, data=None, **kwrgs):
if passes is None:
for op in data:
old_passes = op.passes
channel(
_("Passes for '%s' is currently: %d") % (str(op), old_passes)
)
return
for op in data:
old_passes = op.passes
op.passes = passes
if passes >= 1:
op.passes_custom = True
channel(
_("Passes for '%s' updated %d -> %d")
% (str(op), old_passes, passes)
)
op.notify_update()
return "ops", data
@self.console_argument(
"distance", type=Length, help=_("Set hatch-distance of operations")
)
@self.console_option(
"difference",
"d",
type=bool,
action="store_true",
help=_("Change hatch-distance by this amount."),
)
@self.console_option(
"progress",
"p",
type=bool,
action="store_true",
help=_("Change hatch-distance for each item in order"),
)
@self.console_command(
"hatch-distance",
help=_("hatch-distance <distance>"),
input_type="ops",
output_type="ops",
)
def op_hatch_distance(
command,
channel,
_,
distance=None,
difference=False,
progress=False,
data=None,
**kwrgs,
):
if distance is None:
for op in data:
old = op.hatch_distance
channel(
_("Hatch Distance for '%s' is currently: %s") % (str(op), old)
)
return
delta = 0
for op in data:
old = Length(op.hatch_distance)
if progress:
s = float(old) + delta
delta += float(distance)
elif difference:
s = float(old) + float(distance)
else:
s = float(distance)
if s < 0:
s = 0
op.hatch_distance = Length(amount=s).length_mm
channel(
_("Hatch Distance for '%s' updated %s -> %s")
% (str(op), old, op.hatch_distance)
)
op.notify_update()
return "ops", data
@self.console_argument(
"angle", type=Angle.parse, help=_("Set hatch-angle of operations")
)
@self.console_option(
"difference",
"d",
type=bool,
action="store_true",
help=_("Change hatch-distance by this amount."),
)
@self.console_option(
"progress",
"p",
type=bool,
action="store_true",
help=_("Change hatch-distance for each item in order"),
)
@self.console_command(
"hatch-angle",
help=_("hatch-angle <angle>"),
input_type="ops",
output_type="ops",
)
def op_hatch_distance(
command,
channel,
_,
angle=None,
difference=False,
progress=False,
data=None,
**kwrgs,
):
if angle is None:
for op in data:
old = f"{Angle.parse(op.hatch_angle).as_turns:.4f}turn"
old_hatch_angle_deg = (
f"{Angle.parse(op.hatch_angle).as_degrees:.4f}deg"
)
channel(
_("Hatch Distance for '%s' is currently: %s (%s)")
% (str(op), old, old_hatch_angle_deg)
)
return
delta = 0
for op in data:
old = Angle.parse(op.hatch_angle)
if progress:
s = old + delta
delta += angle
elif difference:
s = old + angle
else:
s = angle
s = Angle.radians(float(s))
op.hatch_angle = f"{s.as_turns}turn"
new_hatch_angle_turn = f"{s.as_turns:.4f}turn"
new_hatch_angle_deg = f"{s.as_degrees:.4f}deg"
channel(
_("Hatch Angle for '%s' updated %s -> %s (%s)")
% (
str(op),
f"{old.as_turns:.4f}turn",
new_hatch_angle_turn,
new_hatch_angle_deg,
)
)
op.notify_update()
return "ops", data
@self.console_command(
"disable",
help=_("Disable the given operations"),
input_type="ops",
output_type="ops",
)
def op_disable(command, channel, _, data=None, **kwrgs):
for op in data:
op.output = False
channel(_("Operation '%s' disabled.") % str(op))
op.notify_update()
return "ops", data
@self.console_command(
"enable",
help=_("Enable the given operations"),
input_type="ops",
output_type="ops",
)
def op_enable(command, channel, _, data=None, **kwrgs):
for op in data:
op.output = True
channel(_("Operation '%s' enabled.") % str(op))
op.notify_update()
return "ops", data
# ==========
# ELEMENT/OPERATION SUBCOMMANDS
# ==========
@self.console_command(
"copy",
help=_("Duplicate elements"),
input_type=("elements", "ops"),
output_type=("elements", "ops"),
)
def e_copy(data=None, data_type=None, **kwargs):
add_elem = list(map(copy, data))
if data_type == "ops":
self.add_ops(add_elem)
else:
self.add_elems(add_elem)
return data_type, add_elem
@self.console_command(
"delete", help=_("Delete elements"), input_type=("elements", "ops")
)
def e_delete(command, channel, _, data=None, data_type=None, **kwargs):
channel(_("Deleting…"))
if data_type == "elements":
self.remove_elements(data)
else:
self.remove_operations(data)
self.signal("tree_changed")
# ==========
# ELEMENT BASE
# ==========
@self.console_command(
"elements",
help=_("Show information about elements"),
)
def element(**kwargs):
self(".element* list\n")
@self.console_command(
"element*",
help=_("element*, all elements"),
output_type="elements",
)
def element_star(**kwargs):
return "elements", list(self.elems())
@self.console_command(
"element~",
help=_("element~, all non-selected elements"),
output_type="elements",
)
def element_not(**kwargs):
return "elements", list(self.elems(emphasized=False))
@self.console_command(
"element",
help=_("element, selected elements"),
output_type="elements",
)
def element_base(**kwargs):
return "elements", list(self.elems(emphasized=True))
@self.console_command(
r"element([0-9]+,?)+",
help=_("element0,3,4,5: chain a list of specific elements"),
regex=True,
output_type="elements",
)
def element_chain(command, channel, _, **kwargs):
arg = command[7:]
elements_list = []
for value in arg.split(","):
try:
value = int(value)
except ValueError:
continue
try:
e = self.get_elem(value)
elements_list.append(e)
except IndexError:
channel(_("index %d out of range") % value)
return "elements", elements_list
# ==========
# ELEMENT SUBCOMMANDS
# ==========
# @self.console_argument("step_size", type=int, help=_("element step size"))
# @self.console_command(
# "step",
# help=_("step <element step-size>"),
# input_type="elements",
# output_type="elements",
# )
# def step_command(command, channel, _, data, step_size=None, **kwrgs):
# if step_size is None:
# found = False
# for element in data:
# if isinstance(element, SVGImage):
# try:
# step = element.values["raster_step"]
# except KeyError:
# step = 1
# channel(
# _("Image step for %s is currently: %s")
# % (str(element), step)
# )
# found = True
# if not found:
# channel(_("No image element selected."))
# return
# for element in data:
# element.values["raster_step"] = str(step_size)
# m = element.transform
# tx = m.e
# ty = m.f
# element.transform = Matrix.scale(float(step_size), float(step_size))
# element.transform.post_translate(tx, ty)
# if hasattr(element, "node"):
# element.node.modified()
# self.signal("element_property_reload", element)
# return ("elements",)
@self.console_command(
"select",
help=_("Set these values as the selection."),
input_type="elements",
output_type="elements",
)
def element_select_base(data=None, **kwargs):
self.set_emphasis(data)
return "elements", data
@self.console_command(
"select+",
help=_("Add the input to the selection"),
input_type="elements",
output_type="elements",
)
def element_select_plus(data=None, **kwargs):
elems = list(self.elems(emphasized=True))
elems.extend(data)
self.set_emphasis(elems)
return "elements", elems
@self.console_command(
"select-",
help=_("Remove the input data from the selection"),
input_type="elements",
output_type="elements",
)
def element_select_minus(data=None, **kwargs):
elems = list(self.elems(emphasized=True))
for e in data:
try:
elems.remove(e)
except ValueError:
pass
self.set_emphasis(elems)
return "elements", elems
@self.console_command(
"select^",
help=_("Toggle the input data in the selection"),
input_type="elements",
output_type="elements",
)
def element_select_xor(data=None, **kwargs):
elems = list(self.elems(emphasized=True))
for e in data:
try:
elems.remove(e)
except ValueError:
elems.append(e)
self.set_emphasis(elems)
return "elements", elems
@self.console_command(
"list",
help=_("Show information about the chained data"),
input_type="elements",
output_type="elements",
)
def element_list(command, channel, _, data=None, **kwargs):
channel("----------")
channel(_("Graphical Elements:"))
index_list = list(self.elems())
for e in data:
i = index_list.index(e)
name = str(e)
if len(name) > 50:
name = name[:50] + "…"
if e.emphasized:
channel("%d: * %s" % (i, name))
else:
channel("%d: %s" % (i, name))
channel("----------")
return "elements", data
@self.console_argument("start", type=int, help=_("elements start"))
@self.console_argument("end", type=int, help=_("elements end"))
@self.console_argument("step", type=int, help=_("elements step"))
@self.console_command(
"range",
help=_("Subset selection by begin & end indices and step"),
input_type="elements",
output_type="elements",
)
def element_select_range(data=None, start=None, end=None, step=1, **kwargs):
subelem = list()
for e in range(start, end, step):
try:
subelem.append(data[e])
except IndexError:
pass
self.set_emphasis(subelem)
return "elements", subelem
@self.console_command(
"merge",
help=_("merge elements"),
input_type="elements",
output_type="elements",
)
def element_merge(data=None, **kwargs):
super_element = Path()
for e in data:
try:
path = e.as_path()
except AttributeError:
continue
if super_element.stroke is None:
super_element.stroke = e.stroke
if super_element.fill is None:
super_element.fill = e.fill
super_element += path
self.remove_elements(data)
node = self.elem_branch.add(path=super_element, type="elem path")
node.emphasized = True
self.classify([node])
return "elements", [node]
@self.console_command(
"subpath",
help=_("break elements"),
input_type="elements",
output_type="elements",
)
def element_subpath(data=None, **kwargs):
if not isinstance(data, list):
data = list(data)
elements_nodes = []
elements = []
for node in data:
group_node = node.replace_node(type="group", label=node.label)
try:
p = node.as_path()
except AttributeError:
continue
for subpath in p.as_subpaths():
subelement = Path(subpath)
elements.append(subelement)
group_node.add(path=subelement, type="elem path")
elements_nodes.append(group_node)
self.classify(elements)
return "elements", elements_nodes
# ==========
# ALIGN SUBTYPE
# Align consist of top level node objects that can be manipulated within the scene.
# ==========
@self.console_command(
"align",
help=_("align selected elements"),
input_type=("elements", None),
output_type="align",
)
def subtype_align(command, channel, _, data=None, remainder=None, **kwargs):
if not remainder:
channel(
"top\nbottom\nleft\nright\ncenter\ncenterh\ncenterv\nspaceh\nspacev\n"
"<any valid svg:Preserve Aspect Ratio, eg xminymin>"
)
return
if data is None:
data = list(self.elems(emphasized=True))
# Element conversion.
d = list()
elem_branch = self.elem_branch
for node in data:
while node.parent and node.parent is not elem_branch:
node = node.parent
if node not in d:
d.append(node)
data = d
return "align", data
@self.console_command(
"top",
help=_("align elements at top"),
input_type="align",
output_type="align",
)
def subtype_align(command, channel, _, data=None, **kwargs):
boundary_points = []
for node in data:
boundary_points.append(node.bounds)
if not len(boundary_points):
return
top_edge = min([e[1] for e in boundary_points])
for node in data:
subbox = node.bounds
top = subbox[1] - top_edge
matrix = "translate(0, %f)" % -top
if top != 0:
for q in node.flat(types=elem_nodes):
try:
q.matrix *= matrix
q.modified()
except AttributeError:
continue
return "align", data
@self.console_command(
"bottom",
help=_("align elements at bottom"),
input_type="align",
output_type="align",
)
def subtype_align(command, channel, _, data=None, **kwargs):
boundary_points = []
for node in data:
boundary_points.append(node.bounds)
if not len(boundary_points):
return
bottom_edge = max([e[3] for e in boundary_points])
for node in data:
subbox = node.bounds
bottom = subbox[3] - bottom_edge
matrix = "translate(0, %f)" % -bottom
if bottom != 0:
for q in node.flat(types=elem_nodes):
try:
q.matrix *= matrix
q.modified()
except AttributeError:
continue
return "align", data
@self.console_command(
"left",
help=_("align elements at left"),
input_type="align",
output_type="align",
)
def subtype_align(command, channel, _, data=None, **kwargs):
boundary_points = []
for node in data:
boundary_points.append(node.bounds)
if not len(boundary_points):
return
left_edge = min([e[0] for e in boundary_points])
for node in data:
subbox = node.bounds
left = subbox[0] - left_edge
matrix = "translate(%f, 0)" % -left
if left != 0:
for q in node.flat(types=elem_nodes):
try:
q.matrix *= matrix
q.modified()
except AttributeError:
continue
return "align", data
@self.console_command(
"right",
help=_("align elements at right"),
input_type="align",
output_type="align",
)
def subtype_align(command, channel, _, data=None, **kwargs):
boundary_points = []
for node in data:
boundary_points.append(node.bounds)
if not len(boundary_points):
return
right_edge = max([e[2] for e in boundary_points])
for node in data:
subbox = node.bounds
right = subbox[2] - right_edge
matrix = "translate(%f, 0)" % -right
if right != 0:
for q in node.flat(types=elem_nodes):
try:
q.matrix *= matrix
q.modified()
except AttributeError:
continue
return "align", data
@self.console_command(
"center",
help=_("align elements at center"),
input_type="align",
output_type="align",
)
def subtype_align(command, channel, _, data=None, **kwargs):
boundary_points = []
for node in data:
boundary_points.append(node.bounds)
if not len(boundary_points):
return
left_edge = min([e[0] for e in boundary_points])
top_edge = min([e[1] for e in boundary_points])
right_edge = max([e[2] for e in boundary_points])
bottom_edge = max([e[3] for e in boundary_points])
for node in data:
subbox = node.bounds
dx = (subbox[0] + subbox[2] - left_edge - right_edge) / 2.0
dy = (subbox[1] + subbox[3] - top_edge - bottom_edge) / 2.0
matrix = "translate(%f, %f)" % (-dx, -dy)
for q in node.flat(types=elem_nodes):
try:
q.matrix *= matrix
q.modified()
except AttributeError:
continue
return "align", data
@self.console_command(
"centerv",
help=_("align elements at center vertical"),
input_type="align",
output_type="align",
)
def subtype_align(command, channel, _, data=None, **kwargs):
boundary_points = []
for node in data:
boundary_points.append(node.bounds)
if not len(boundary_points):
return
left_edge = min([e[0] for e in boundary_points])
right_edge = max([e[2] for e in boundary_points])
for node in data:
subbox = node.bounds
dx = (subbox[0] + subbox[2] - left_edge - right_edge) / 2.0
matrix = "translate(%f, 0)" % -dx
for q in node.flat(types=elem_nodes):
try:
q.matrix *= matrix
q.modified()
except AttributeError:
continue
return "align", data
@self.console_command(
"centerh",
help=_("align elements at center horizontal"),
input_type="align",
output_type="align",
)
def subtype_align(command, channel, _, data=None, **kwargs):
boundary_points = []
for node in data:
boundary_points.append(node.bounds)
if not len(boundary_points):
return
top_edge = min([e[1] for e in boundary_points])
bottom_edge = max([e[3] for e in boundary_points])
for node in data:
subbox = node.bounds
dy = (subbox[1] + subbox[3] - top_edge - bottom_edge) / 2.0
matrix = "translate(0, %f)" % -dy
for q in node.flat(types=elem_nodes):
try:
q.matrix *= matrix
q.modified()
except AttributeError:
continue
return "align", data
@self.console_command(
"spaceh",
help=_("align elements across horizontal space"),
input_type="align",
output_type="align",
)
def subtype_align(command, channel, _, data=None, **kwargs):
boundary_points = []
for node in data:
boundary_points.append(node.bounds)
if not len(boundary_points):
return
if len(data) <= 2: # Cannot distribute 2 or fewer items.
return "align", data
left_edge = min([e[0] for e in boundary_points])
right_edge = max([e[2] for e in boundary_points])
dim_total = right_edge - left_edge
dim_available = dim_total
for node in data:
bounds = node.bounds
dim_available -= bounds[2] - bounds[0]
distributed_distance = dim_available / (len(data) - 1)
data.sort(key=lambda n: n.bounds[0]) # sort by left edge
dim_pos = left_edge
for node in data:
subbox = node.bounds
delta = subbox[0] - dim_pos
matrix = "translate(%f, 0)" % -delta
if delta != 0:
for q in node.flat(types=elem_nodes):
try:
q.matrix *= matrix
q.modified()
except AttributeError:
continue
dim_pos += subbox[2] - subbox[0] + distributed_distance
return "align", data
@self.console_command(
"spacev",
help=_("align elements down vertical space"),
input_type="align",
output_type="align",
)
def subtype_align(command, channel, _, data=None, **kwargs):
boundary_points = []
for node in data:
boundary_points.append(node.bounds)
if not len(boundary_points):
return
if len(data) <= 2: # Cannot distribute 2 or fewer items.
return "align", data
top_edge = min([e[1] for e in boundary_points])
bottom_edge = max([e[3] for e in boundary_points])
dim_total = bottom_edge - top_edge
dim_available = dim_total
for node in data:
bounds = node.bounds
dim_available -= bounds[3] - bounds[1]
distributed_distance = dim_available / (len(data) - 1)
data.sort(key=lambda n: n.bounds[1]) # sort by top edge
dim_pos = top_edge
for node in data:
subbox = node.bounds
delta = subbox[1] - dim_pos
matrix = "translate(0, %f)" % -delta
if delta != 0:
for q in node.flat(types=elem_nodes):
try:
q.matrix *= matrix
q.modified()
except AttributeError:
continue
dim_pos += subbox[3] - subbox[1] + distributed_distance
return "align", data
@self.console_command(
"bedcenter",
help=_("align elements to bedcenter"),
input_type="align",
output_type="align",
)
def subtype_align(command, channel, _, data=None, **kwargs):
boundary_points = []
for node in data:
boundary_points.append(node.bounds)
if not len(boundary_points):
return
left_edge = min([e[0] for e in boundary_points])
top_edge = min([e[1] for e in boundary_points])
right_edge = max([e[2] for e in boundary_points])
bottom_edge = max([e[3] for e in boundary_points])
for node in data:
device_width = self.length_x("100%")
device_height = self.length_y("100%")
dx = (device_width - left_edge - right_edge) / 2.0
dy = (device_height - top_edge - bottom_edge) / 2.0
matrix = "translate(%f, %f)" % (dx, dy)
for q in node.flat(types=elem_nodes):
try:
q.matrix *= matrix
q.modified()
except AttributeError:
continue
self.signal("tree_changed")
return "align", data
@self.console_argument(
"preserve_aspect_ratio",
type=str,
default="none",
help="preserve aspect ratio value",
)
@self.console_command(
"view",
help=_("align elements within viewbox"),
input_type="align",
output_type="align",
)
def subtype_align(
command, channel, _, data=None, preserve_aspect_ratio="none", **kwargs
):
"""
Align the elements to within the bed according to SVG Viewbox rules. The following aspect ratios
are valid. These should define all the valid methods of centering data within the laser bed.
"xminymin",
"xmidymin",
"xmaxymin",
"xminymid",
"xmidymid",
"xmaxymid",
"xminymax",
"xmidymax",
"xmaxymax",
"xminymin meet",
"xmidymin meet",
"xmaxymin meet",
"xminymid meet",
"xmidymid meet",
"xmaxymid meet",
"xminymax meet",
"xmidymax meet",
"xmaxymax meet",
"xminymin slice",
"xmidymin slice",
"xmaxymin slice",
"xminymid slice",
"xmidymid slice",
"xmaxymid slice",
"xminymax slice",
"xmidymax slice",
"xmaxymax slice",
"none"
"""
boundary_points = []
for node in data:
boundary_points.append(node.bounds)
if not len(boundary_points):
return
left_edge = min([e[0] for e in boundary_points])
top_edge = min([e[1] for e in boundary_points])
right_edge = max([e[2] for e in boundary_points])
bottom_edge = max([e[3] for e in boundary_points])
if preserve_aspect_ratio in (
"xminymin",
"xmidymin",
"xmaxymin",
"xminymid",
"xmidymid",
"xmaxymid",
"xminymax",
"xmidymax",
"xmaxymax",
"xminymin meet",
"xmidymin meet",
"xmaxymin meet",
"xminymid meet",
"xmidymid meet",
"xmaxymid meet",
"xminymax meet",
"xmidymax meet",
"xmaxymax meet",
"xminymin slice",
"xmidymin slice",
"xmaxymin slice",
"xminymid slice",
"xmidymid slice",
"xmaxymid slice",
"xminymax slice",
"xmidymax slice",
"xmaxymax slice",
"none",
):
for node in data:
device_width = self.length_x("100%")
device_height = self.length_y("100%")
matrix = Viewbox.viewbox_transform(
0,
0,
device_width,
device_height,
left_edge,
top_edge,
right_edge - left_edge,
bottom_edge - top_edge,
preserve_aspect_ratio,
)
for q in node.flat(types=elem_nodes):
try:
q.matrix *= matrix
q.modified()
except AttributeError:
continue
for q in node.flat(types=("file", "group")):
q.modified()
return "align", data
@self.console_argument("c", type=int, help=_("Number of columns"))
@self.console_argument("r", type=int, help=_("Number of rows"))
@self.console_argument("x", type=str, help=_("x distance"))
@self.console_argument("y", type=str, help=_("y distance"))
@self.console_option(
"origin",
"o",
type=int,
nargs=2,
help=_("Position of original in matrix (e.g '2,2' or '4,3')"),
)
@self.console_command(
"grid",
help=_("grid <columns> <rows> <x_distance> <y_distance> <origin>"),
input_type=(None, "elements"),
output_type="elements",
)
def element_grid(
command,
channel,
_,
c: int,
r: int,
x: str,
y: str,
origin=None,
data=None,
**kwargs,
):
if r is None:
raise CommandSyntaxError
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No item selected."))
return
try:
bounds = Node.union_bounds(data)
width = bounds[2] - bounds[0]
height = bounds[3] - bounds[1]
except TypeError:
raise CommandSyntaxError
if x is None:
x = "100%"
if y is None:
y = "100%"
x = float(Length(x, relative_length=Length(amount=width).length_mm))
y = float(Length(y, relative_length=Length(amount=height).length_mm))
if origin is None:
origin = (1, 1)
cx, cy = origin
data_out = list(data)
if cx is None:
cx = 1
if cy is None:
cy = 1
start_x = -1 * x * (cx - 1)
start_y = -1 * y * (cy - 1)
y_pos = start_y
for j in range(r):
x_pos = start_x
for k in range(c):
if j != (cy - 1) or k != (cx - 1):
add_elem = list(map(copy, data))
for e in add_elem:
e.matrix *= Matrix.translate(x_pos, y_pos)
self.elem_branch.add_node(e)
data_out.extend(add_elem)
x_pos += x
y_pos += y
self.signal("refresh_scene", "Scene")
return "elements", data_out
@self.console_argument("repeats", type=int, help=_("Number of repeats"))
@self.console_argument("radius", type=self.length, help=_("Radius"))
@self.console_argument("startangle", type=Angle.parse, help=_("Start-Angle"))
@self.console_argument("endangle", type=Angle.parse, help=_("End-Angle"))
@self.console_option(
"rotate",
"r",
type=bool,
action="store_true",
help=_("Rotate copies towards center?"),
)
@self.console_option(
"deltaangle",
"d",
type=Angle.parse,
help=_("Delta-Angle (if omitted will take (end-start)/repeats )"),
)
@self.console_command(
"radial",
help=_("radial <repeats> <radius> <startangle> <endangle> <rotate>"),
input_type=(None, "elements"),
output_type="elements",
)
def element_radial(
command,
channel,
_,
repeats: int,
radius=None,
startangle=None,
endangle=None,
rotate=None,
deltaangle=None,
data=None,
**kwargs,
):
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0 and self._emphasized_bounds is None:
channel(_("No item selected."))
return
if repeats is None:
raise CommandSyntaxError
if repeats <= 1:
raise CommandSyntaxError(_("repeats should be greater or equal to 2"))
if radius is None:
radius = 0
if startangle is None:
startangle = Angle.parse("0deg")
if endangle is None:
endangle = Angle.parse("360deg")
if rotate is None:
rotate = False
# print ("Segment to cover: %f - %f" % (startangle.as_degrees, endangle.as_degrees))
bounds = Node.union_bounds(data)
if bounds is None:
return
width = bounds[2] - bounds[0]
data_out = list(data)
if deltaangle is None:
segment_len = (endangle.as_radians - startangle.as_radians) / repeats
else:
segment_len = deltaangle.as_radians
# Notabene: we are following the cartesian system here, but as the Y-Axis is top screen to bottom screen,
# the perceived angle travel is CCW (which is counter-intuitive)
currentangle = startangle.as_radians
# bounds = self._emphasized_bounds
center_x = (bounds[2] + bounds[0]) / 2.0 - radius
center_y = (bounds[3] + bounds[1]) / 2.0
# print ("repeats: %d, Radius: %.1f" % (repeats, radius))
# print ("Center: %.1f, %.1f" % (center_x, center_y))
# print ("Startangle, Endangle, segment_len: %.1f, %.1f, %.1f" % (180 * startangle.as_radians / pi, 180 * endangle.as_radians / pi, 180 * segment_len / pi))
currentangle = segment_len
for cc in range(1, repeats):
# print ("Angle: %f rad = %f deg" % (currentangle, currentangle/pi * 180))
add_elem = list(map(copy, data))
for e in add_elem:
if rotate:
x_pos = -1 * radius
y_pos = 0
# e *= "translate(%f, %f)" % (x_pos, y_pos)
e *= "rotate(%frad, %f, %f)" % (
currentangle,
center_x,
center_y,
)
else:
x_pos = -1 * radius + radius * cos(currentangle)
y_pos = radius * sin(currentangle)
e *= "translate(%f, %f)" % (x_pos, y_pos)
self.add_elems(add_elem)
data_out.extend(add_elem)
currentangle += segment_len
self.signal("refresh_scene", "Scene")
return "elements", data_out
@self.console_argument("copies", type=int, help=_("Number of copies"))
@self.console_argument("radius", type=self.length, help=_("Radius"))
@self.console_argument("startangle", type=Angle.parse, help=_("Start-Angle"))
@self.console_argument("endangle", type=Angle.parse, help=_("End-Angle"))
@self.console_option(
"rotate",
"r",
type=bool,
action="store_true",
help=_("Rotate copies towards center?"),
)
@self.console_option(
"deltaangle",
"d",
type=Angle.parse,
help=_("Delta-Angle (if omitted will take (end-start)/copies )"),
)
@self.console_command(
"circ_copy",
help=_("circ_copy <copies> <radius> <startangle> <endangle> <rotate>"),
input_type=(None, "elements"),
output_type="elements",
)
def element_circularcopies(
command,
channel,
_,
copies: int,
radius=None,
startangle=None,
endangle=None,
rotate=None,
deltaangle=None,
data=None,
**kwargs,
):
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0 and self._emphasized_bounds is None:
channel(_("No item selected."))
return
if copies is None:
raise CommandSyntaxError
if copies <= 0:
copies = 1
if radius is None:
radius = 0
if startangle is None:
startangle = Angle.parse("0deg")
if endangle is None:
endangle = Angle.parse("360deg")
if rotate is None:
rotate = False
# print ("Segment to cover: %f - %f" % (startangle.as_degrees, endangle.as_degrees))
bounds = Node.union_bounds(data)
if bounds is None:
return
width = bounds[2] - bounds[0]
data_out = list(data)
if deltaangle is None:
segment_len = (endangle.as_radians - startangle.as_radians) / copies
else:
segment_len = deltaangle.as_radians
# Notabene: we are following the cartesian system here, but as the Y-Axis is top screen to bottom screen,
# the perceived angle travel is CCW (which is counter-intuitive)
currentangle = startangle.as_radians
# bounds = self._emphasized_bounds
center_x = (bounds[2] + bounds[0]) / 2.0
center_y = (bounds[3] + bounds[1]) / 2.0
for cc in range(copies):
# print ("Angle: %f rad = %f deg" % (currentangle, currentangle/pi * 180))
add_elem = list(map(copy, data))
for e in add_elem:
if rotate:
x_pos = radius
y_pos = 0
e *= "translate(%f, %f)" % (x_pos, y_pos)
e *= "rotate(%frad, %f, %f)" % (
currentangle,
center_x,
center_y,
)
else:
x_pos = radius * cos(currentangle)
y_pos = radius * sin(currentangle)
e *= "translate(%f, %f)" % (x_pos, y_pos)
self.add_elems(add_elem)
data_out.extend(add_elem)
currentangle += segment_len
self.signal("refresh_scene", "Scene")
return "elements", data_out
@self.console_argument(
"corners", type=int, help=_("Number of corners/vertices")
)
@self.console_argument(
"cx", type=self.length_x, help=_("X-Value of polygon's center")
)
@self.console_argument(
"cy", type=self.length_y, help=_("Y-Value of polygon's center")
)
@self.console_argument(
"radius",
type=self.length_x,
help=_("Radius (length of side if --side_length is used)"),
)
@self.console_option("startangle", "s", type=Angle.parse, help=_("Start-Angle"))
@self.console_option(
"inscribed",
"i",
type=bool,
action="store_true",
help=_("Shall the polygon touch the inscribing circle?"),
)
@self.console_option(
"side_length",
"l",
type=bool,
action="store_true",
help=_(
"Do you want to treat the length value for radius as the length of one edge instead?"
),
)
@self.console_option(
"radius_inner",
"r",
type=str,
help=_("Alternating radius for every other vertex"),
)
@self.console_option(
"alternate_seq",
"a",
type=int,
help=_(
"Length of alternating sequence (1 for starlike figures, >=2 for more gear-like patterns)"
),
)
@self.console_option(
"density", "d", type=int, help=_("Amount of vertices to skip")
)
@self.console_command(
"shape",
help=_(
"shape <corners> <x> <y> <r> <startangle> <inscribed> or shape <corners> <r>"
),
input_type=("elements", None),
output_type="elements",
)
def element_shape(
command,
channel,
_,
corners,
cx,
cy,
radius,
startangle=None,
inscribed=None,
side_length=None,
radius_inner=None,
alternate_seq=None,
density=None,
data=None,
**kwargs,
):
if corners is None:
raise CommandSyntaxError
if cx is None:
if corners <= 2:
raise CommandSyntaxError(
_(
"Please provide at least one additional value (which will act as radius then)"
)
)
cx = 0
if cy is None:
cy = 0
if radius is None:
radius = 0
if corners <= 2:
# No need to look at side_length parameter as we are considering the radius value as an edge anyway...
if startangle is None:
startangle = Angle.parse("0deg")
star_points = [(cx, cy)]
if corners == 2:
star_points += [
(
cx + cos(startangle.as_radians) * radius,
cy + sin(startangle.as_radians) * radius,
)
]
else:
# do we have something like 'polyshape 3 4cm' ? If yes, reassign the parameters
if radius is None:
radius = cx
cx = 0
cy = 0
if startangle is None:
startangle = Angle.parse("0deg")
if alternate_seq is None:
if radius_inner is None:
alternate_seq = 0
else:
alternate_seq = 1
if density is None:
density = 1
if density < 1 or density > corners:
density = 1
# Do we have to consider the radius value as the length of one corner?
if not side_length is None:
# Let's recalculate the radius then...
# d_oc = s * csc( pi / n)
radius = 0.5 * radius / sin(pi / corners)
if radius_inner is None:
radius_inner = radius
else:
try:
radius_inner = float(
Length(radius_inner, relative_length=radius)
)
except ValueError:
raise CommandSyntaxError
if inscribed:
if side_length is None:
radius = radius / cos(pi / corners)
else:
channel(
_(
"You have as well provided the --side_length parameter, this takes precedence, so --inscribed is ignored"
)
)
if alternate_seq < 1:
radius_inner = radius
# print(
# "Your parameters are:\n cx=%.1f, cy=%.1f\n radius=%.1f, inner=%.1f\n corners=%d, density=%d\n seq=%d, angle=%.1f"
# % (cx, cy, radius, radius_inner, corners, density, alternate_seq, startangle)
# )
pts = []
i_angle = startangle.as_radians
delta_angle = tau / corners
ct = 0
for j in range(corners):
if ct < alternate_seq:
r = radius
# dbg = "outer"
else:
r = radius_inner
# dbg = "inner"
thisx = cx + r * cos(i_angle)
thisy = cy + r * sin(i_angle)
# print(
# "pt %d, Angle=%.1f: %s radius=%.1f: (%.1f, %.1f)"
# % (j, i_angle / pi * 180, dbg, r, thisx, thisy)
# )
ct += 1
if ct >= 2 * alternate_seq:
ct = 0
if j == 0:
firstx = thisx
firsty = thisy
i_angle += delta_angle
pts += [(thisx, thisy)]
# Close the path
pts += [(firstx, firsty)]
star_points = [(pts[0][0], pts[0][1])]
idx = density
while idx != 0:
star_points += [(pts[idx][0], pts[idx][1])]
idx += density
if idx >= corners:
idx -= corners
if len(star_points) < corners:
ct = 0
possible_combinations = ""
for i in range(corners - 1):
j = i + 2
if gcd(j, corners) == 1:
if ct % 3 == 0:
possible_combinations += "\n shape %d ... -d %d" % (
corners,
j,
)
else:
possible_combinations += ", shape %d ... -d %d " % (
corners,
j,
)
ct += 1
channel(
_("Just for info: we have missed %d vertices...")
% (corners - len(star_points))
)
channel(
_("To hit all, the density parameters should be e.g. %s")
% possible_combinations
)
poly_path = Polygon(star_points)
node = self.elem_branch.add(shape=poly_path, type="elem polyline")
node.stroke = Color("black")
self.set_emphasis([node])
node.focus()
if data is None:
data = list()
data.append(poly_path)
return "elements", data
@self.console_option("step", "s", default=2.0, type=float)
@self.console_command(
"render",
help=_("Convert given elements to a raster image"),
input_type=(None, "elements"),
output_type="image",
)
def make_raster_image(command, channel, _, step=2.0, data=None, **kwargs):
if data is None:
data = list(self.elems(emphasized=True))
reverse = self.classify_reverse
if reverse:
data = list(reversed(data))
make_raster = self.lookup("render-op/make_raster")
if not make_raster:
channel(_("No renderer is registered to perform render."))
return
bounds = Node.union_bounds(data)
if bounds is None:
return
if step <= 0:
step = 1
xmin, ymin, xmax, ymax = bounds
if isinf(xmin):
channel(_("No bounds for selected elements."))
return
image = make_raster(
[n.node for n in data],
bounds,
step_x=step,
step_y=step,
)
matrix = Matrix()
matrix.post_scale(step, step)
matrix.post_translate(xmin, ymin)
image_node = ImageNode(image=image, matrix=matrix, step_x=step, step_y=step)
self.elem_branch.add_node(image_node)
return "image", [image_node]
# ==========
# ELEMENT/SHAPE COMMANDS
# ==========
@self.console_argument("x_pos", type=Length)
@self.console_argument("y_pos", type=Length)
@self.console_argument("r_pos", type=Length)
@self.console_command(
"circle",
help=_("circle <x> <y> <r>"),
input_type=("elements", None),
output_type="elements",
all_arguments_required=True,
)
def element_circle(channel, _, x_pos, y_pos, r_pos, data=None, **kwargs):
circ = Circle(cx=float(x_pos), cy=float(y_pos), r=float(r_pos))
if circ.is_degenerate():
channel(_("Shape is degenerate."))
return "elements", data
node = self.elem_branch.add(
shape=circ, type="elem ellipse", stroke=Color("black")
)
# node.stroke = Color("black")
self.set_emphasis([node])
node.focus()
if data is None:
data = list()
data.append(node)
return "elements", data
@self.console_argument("r_pos", type=Length)
@self.console_command(
"circle_r",
help=_("circle_r <r>"),
input_type=("elements", None),
output_type="elements",
all_arguments_required=True,
)
def element_circle_r(channel, _, r_pos, data=None, **kwargs):
circ = Circle(r=float(r_pos))
if circ.is_degenerate():
channel(_("Shape is degenerate."))
return "elements", data
node = self.elem_branch.add(shape=circ, type="elem ellipse")
node.stroke = Color("black")
self.set_emphasis([node])
node.focus()
if data is None:
data = list()
data.append(node)
return "elements", data
@self.console_argument("x_pos", type=Length)
@self.console_argument("y_pos", type=Length)
@self.console_argument("rx_pos", type=Length)
@self.console_argument("ry_pos", type=Length)
@self.console_command(
"ellipse",
help=_("ellipse <cx> <cy> <rx> <ry>"),
input_type=("elements", None),
output_type="elements",
all_arguments_required=True,
)
def element_ellipse(
channel, _, x_pos, y_pos, rx_pos, ry_pos, data=None, **kwargs
):
ellip = Ellipse(
cx=float(x_pos), cy=float(y_pos), rx=float(rx_pos), ry=float(ry_pos)
)
if ellip.is_degenerate():
channel(_("Shape is degenerate."))
return "elements", data
node = self.elem_branch.add(shape=ellip, type="elem ellipse")
node.stroke = Color("black")
self.set_emphasis([node])
node.focus()
if data is None:
data = list()
data.append(node)
return "elements", data
@self.console_argument(
"x_pos",
type=self.length_x,
help=_("x position for top left corner of rectangle."),
)
@self.console_argument(
"y_pos",
type=self.length_y,
help=_("y position for top left corner of rectangle."),
)
@self.console_argument(
"width", type=self.length_x, help=_("width of the rectangle.")
)
@self.console_argument(
"height", type=self.length_y, help=_("height of the rectangle.")
)
@self.console_option(
"rx", "x", type=self.length_x, help=_("rounded rx corner value.")
)
@self.console_option(
"ry", "y", type=self.length_y, help=_("rounded ry corner value.")
)
@self.console_command(
"rect",
help=_("adds rectangle to scene"),
input_type=("elements", None),
output_type="elements",
all_arguments_required=True,
)
def element_rect(
channel,
_,
x_pos,
y_pos,
width,
height,
rx=None,
ry=None,
data=None,
**kwargs,
):
"""
Draws a svg rectangle with optional rounded corners.
"""
rect = Rect(x=x_pos, y=y_pos, width=width, height=height, rx=rx, ry=ry)
if rect.is_degenerate():
channel(_("Shape is degenerate."))
return "elements", data
node = self.elem_branch.add(shape=rect, type="elem rect")
node.stroke = Color("black")
self.set_emphasis([node])
node.focus()
if data is None:
data = list()
data.append(node)
return "elements", data
@self.console_argument("x0", type=self.length_x, help=_("start x position"))
@self.console_argument("y0", type=self.length_y, help=_("start y position"))
@self.console_argument("x1", type=self.length_x, help=_("end x position"))
@self.console_argument("y1", type=self.length_y, help=_("end y position"))
@self.console_command(
"line",
help=_("adds line to scene"),
input_type=("elements", None),
output_type="elements",
all_arguments_required=True,
)
def element_line(command, x0, y0, x1, y1, data=None, **kwargs):
"""
Draws a svg line in the scene.
"""
simple_line = SimpleLine(x0, y0, x1, y1)
node = self.elem_branch.add(shape=simple_line, type="elem line")
node.stroke = Color("black")
self.set_emphasis([node])
node.focus()
if data is None:
data = list()
data.append(node)
return "elements", data
@self.console_option("size", "s", type=float, help=_("font size to for object"))
@self.console_argument("text", type=str, help=_("quoted string of text"))
@self.console_command(
"text",
help=_("text <text>"),
input_type=(None, "elements"),
output_type="elements",
)
def element_text(
command, channel, _, data=None, text=None, size=None, **kwargs
):
if text is None:
channel(_("No text specified"))
return
svg_text = SVGText(text)
if size is not None:
svg_text.font_size = size
svg_text *= "scale({scale})".format(scale=UNITS_PER_PIXEL)
node = self.elem_branch.add(
text=svg_text, matrix=svg_text.transform, type="elem text"
)
node.stroke = Color("black")
self.set_emphasis([node])
node.focus()
if data is None:
data = list()
data.append(node)
return "elements", data
@self.console_argument(
"mlist", type=Length, help=_("list of positions"), nargs="*"
)
@self.console_command(
("polygon", "polyline"),
help=_("poly(gon|line) (Length Length)*"),
input_type=("elements", None),
output_type="elements",
all_arguments_required=True,
)
def element_poly(command, channel, _, mlist, data=None, **kwargs):
try:
pts = [float(Length(p)) for p in mlist]
if command == "polygon":
shape = Polygon(pts)
else:
shape = Polyline(pts)
except ValueError:
raise CommandSyntaxError(
_("Must be a list of spaced delimited length pairs.")
)
if shape.is_degenerate():
channel(_("Shape is degenerate."))
return "elements", data
node = self.elem_branch.add(shape=shape, type="elem polyline")
node.stroke = Color("black")
self.set_emphasis([node])
node.focus()
if data is None:
data = list()
data.append(node)
return "elements", data
@self.console_command(
"path",
help=_("Convert any shapes to paths"),
input_type="shapes",
output_type="shapes",
)
def element_path_convert(data, **kwargs):
paths = []
for e in data:
paths.append(abs(Path(e)))
return "shapes", paths
@self.console_command(
"path",
help=_("Convert any element nodes to paths"),
input_type="elements",
output_type="shapes",
)
def element_path_convert(data, **kwargs):
paths = []
for node in data:
try:
e = node.as_path()
except AttributeError:
continue
paths.append(e)
return "shapes", paths
@self.console_argument(
"path_d", type=str, help=_("svg path syntax command (quoted).")
)
@self.console_command(
"path",
help=_("path <svg path>"),
output_type="elements",
)
def element_path(path_d, data, **kwargs):
if path_d is None:
raise CommandSyntaxError(_("Not a valid path_d string"))
try:
path = Path(path_d)
path *= "Scale({scale})".format(scale=UNITS_PER_PIXEL)
except ValueError:
raise CommandSyntaxError(_("Not a valid path_d string (try quotes)"))
node = self.elem_branch.add(path=path, type="elem path")
node.stroke = Color("black")
self.set_emphasis([node])
node.focus()
if data is None:
data = list()
data.append(node)
return "elements", data
@self.console_argument(
"stroke_width",
type=self.length,
help=_("Stroke-width for the given stroke"),
)
@self.console_command(
"stroke-width",
help=_("stroke-width <length>"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_stroke_width(
command, channel, _, stroke_width, data=None, **kwargs
):
if data is None:
data = list(self.elems(emphasized=True))
if stroke_width is None:
channel("----------")
channel(_("Stroke-Width Values:"))
i = 0
for e in self.elems():
name = str(e)
if len(name) > 50:
name = name[:50] + "…"
if e.stroke is None or e.stroke == "none":
channel(_("%d: stroke = none - %s") % (i, name))
else:
channel(_("%d: stroke = %s - %s") % (i, e.stroke_width, name))
i += 1
channel("----------")
return
if len(data) == 0:
channel(_("No selected elements."))
return
for e in data:
e.stroke_width = stroke_width
e.altered()
return "elements", data
@self.console_option("filter", "f", type=str, help="Filter indexes")
@self.console_argument(
"cap", type=str, help=_("Linecap to apply to the path (one of butt, round, square)")
)
@self.console_command(
"linecap",
help=_("linecap <cap>"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_cap(
command, channel, _, cap=None, data=None, filter=None, **kwargs
):
if data is None:
data = list(self.elems(emphasized=True))
apply = data
if filter is not None:
apply = list()
for value in filter.split(","):
try:
value = int(value)
except ValueError:
continue
try:
apply.append(data[value])
except IndexError:
channel(_("index %d out of range") % value)
if cap is None:
channel("----------")
channel(_("Linecaps:"))
i = 0
for e in self.elems():
name = str(e)
if len(name) > 50:
name = name[:50] + "…"
if hasattr(e, "linecap"):
if e.linecap == Linecap.CAP_SQUARE:
capname = "square"
elif e.linecap == Linecap.CAP_BUTT:
capname = "butt"
else:
capname = "round"
channel(_("%d: linecap = %s - %s") % (i, capname, name))
i += 1
channel("----------")
return
else:
capvalue = None
if cap.lower() == "butt":
capvalue = Linecap.CAP_BUTT
elif cap.lower() == "round":
capvalue = Linecap.CAP_ROUND
elif cap.lower() == "square":
capvalue = Linecap.CAP_SQUARE
if not capvalue is None:
for e in apply:
if hasattr(e, "linecap"):
e.linecap = capvalue
e.altered()
return "elements", data
@self.console_option("filter", "f", type=str, help="Filter indexes")
@self.console_argument(
"join", type=str, help=_("jointype to apply to the path (one of arcs, bevel, miter, miter-clip, round)")
)
@self.console_command(
"linejoin",
help=_("linejoin <join>"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_join(
command, channel, _, join=None, data=None, filter=None, **kwargs
):
if data is None:
data = list(self.elems(emphasized=True))
apply = data
if filter is not None:
apply = list()
for value in filter.split(","):
try:
value = int(value)
except ValueError:
continue
try:
apply.append(data[value])
except IndexError:
channel(_("index %d out of range") % value)
if join is None:
channel("----------")
channel(_("Linejoins:"))
i = 0
for e in self.elems():
name = str(e)
if len(name) > 50:
name = name[:50] + "…"
if hasattr(e, "linejoin"):
if e.linejoin == Linejoin.JOIN_ARCS:
joinname = "arcs"
elif e.linejoin == Linejoin.JOIN_BEVEL:
joinname = "bevel"
elif e.linejoin == Linejoin.JOIN_MITER_CLIP:
joinname = "miter-clip"
elif e.linejoin == Linejoin.JOIN_MITER:
joinname = "miter"
elif e.linejoin == Linejoin.JOIN_ROUND:
joinname = "round"
channel(_("%d: linejoin = %s - %s") % (i, joinname, name))
i += 1
channel("----------")
return
else:
joinvalue = None
if join.lower() == "arcs":
joinvalue = Linejoin.JOIN_ARCS
elif join.lower() == "bevel":
joinvalue = Linejoin.JOIN_BEVEL
elif join.lower() == "miter":
joinvalue = Linejoin.JOIN_MITER
elif join.lower() == "miter-clip":
joinvalue = Linejoin.JOIN_MITER_CLIP
elif join.lower() == "round":
joinvalue = Linejoin.JOIN_ROUND
if not joinvalue is None:
for e in apply:
if hasattr(e, "linejoin"):
e.linejoin = joinvalue
e.altered()
return "elements", data
@self.console_option("filter", "f", type=str, help="Filter indexes")
@self.console_argument(
"rule", type=str, help=_("rule to apply to fill the path (one of %s, %s)") % (SVG_RULE_NONZERO, SVG_RULE_EVENODD)
)
@self.console_command(
"fillrule",
help=_("fillrule <rule>"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_rule(
command, channel, _, rule=None, data=None, filter=None, **kwargs
):
if data is None:
data = list(self.elems(emphasized=True))
apply = data
if filter is not None:
apply = list()
for value in filter.split(","):
try:
value = int(value)
except ValueError:
continue
try:
apply.append(data[value])
except IndexError:
channel(_("index %d out of range") % value)
if rule is None:
channel("----------")
channel(_("fillrules:"))
i = 0
for e in self.elems():
name = str(e)
if len(name) > 50:
name = name[:50] + "…"
if hasattr(e, "fillrule"):
if e.fillrule == Fillrule.FILLRULE_EVENODD:
rulename = SVG_RULE_EVENODD
elif e.fillrule == Fillrule.FILLRULE_NONZERO:
rulename = SVG_RULE_NONZERO
channel(_("%d: fillrule = %s - %s") % (i, rulename, name))
i += 1
channel("----------")
return
else:
rulevalue = None
if rule.lower() == SVG_RULE_EVENODD:
rulevalue = Fillrule.FILLRULE_EVENODD
elif rule.lower() == SVG_RULE_NONZERO:
rulevalue = Fillrule.FILLRULE_NONZERO
if not rulevalue is None:
for e in apply:
if hasattr(e, "fillrule"):
e.fillrule = rulevalue
e.altered()
return "elements", data
@self.console_option("filter", "f", type=str, help="Filter indexes")
@self.console_argument(
"color", type=Color, help=_("Color to color the given stroke")
)
@self.console_command(
"stroke",
help=_("stroke <svg color>"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_stroke(
command, channel, _, color, data=None, filter=None, **kwargs
):
if data is None:
data = list(self.elems(emphasized=True))
apply = data
if filter is not None:
apply = list()
for value in filter.split(","):
try:
value = int(value)
except ValueError:
continue
try:
apply.append(data[value])
except IndexError:
channel(_("index %d out of range") % value)
if color is None:
channel("----------")
channel(_("Stroke Values:"))
i = 0
for e in self.elems():
name = str(e)
if len(name) > 50:
name = name[:50] + "…"
if e.stroke is None or e.stroke == "none":
channel(_("%d: stroke = none - %s") % (i, name))
else:
channel(_("%d: stroke = %s - %s") % (i, e.stroke.hex, name))
i += 1
channel("----------")
return
elif color == "none":
for e in apply:
e.stroke = None
e.altered()
else:
for e in apply:
e.stroke = Color(color)
e.altered()
return "elements", data
@self.console_option("filter", "f", type=str, help="Filter indexes")
@self.console_argument("color", type=Color, help=_("Color to set the fill to"))
@self.console_command(
"fill",
help=_("fill <svg color>"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_fill(command, channel, _, color, data=None, filter=None, **kwargs):
if data is None:
data = list(self.elems(emphasized=True))
apply = data
if filter is not None:
apply = list()
for value in filter.split(","):
try:
value = int(value)
except ValueError:
continue
try:
apply.append(data[value])
except IndexError:
channel(_("index %d out of range") % value)
if color is None:
channel("----------")
channel(_("Fill Values:"))
i = 0
for e in self.elems():
name = str(e)
if len(name) > 50:
name = name[:50] + "…"
if e.fill is None or e.fill == "none":
channel(_("%d: fill = none - %s") % (i, name))
else:
channel(_("%d: fill = %s - %s") % (i, e.fill.hex, name))
i += 1
channel("----------")
return "elements", data
elif color == "none":
for e in apply:
e.fill = None
e.altered()
else:
for e in apply:
e.fill = Color(color)
e.altered()
return "elements", data
@self.console_argument(
"x_offset", type=self.length_x, help=_("x offset."), default="0"
)
@self.console_argument(
"y_offset", type=self.length_y, help=_("y offset"), default="0"
)
@self.console_command(
"outline",
help=_("outline the current selected elements"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_outline(
command,
channel,
_,
x_offset=None,
y_offset=None,
data=None,
**kwargs,
):
"""
Draws an outline of the current shape.
"""
bounds = self.selected_area()
if bounds is None:
channel(_("Nothing Selected"))
return
x_pos = bounds[0]
y_pos = bounds[1]
width = bounds[2] - bounds[0]
height = bounds[3] - bounds[1]
x_pos -= x_offset
y_pos -= y_offset
width += x_offset * 2
height += y_offset * 2
element = Path(Rect(x=x_pos, y=y_pos, width=width, height=height))
node = self.elem_branch.add(shape=element, type="elem ellipse")
node.stroke = Color("red")
self.set_emphasis([node])
node.focus()
self.classify([node])
if data is None:
data = list()
data.append(element)
return "elements", data
@self.console_argument("angle", type=Angle.parse, help=_("angle to rotate by"))
@self.console_option("cx", "x", type=self.length_x, help=_("center x"))
@self.console_option("cy", "y", type=self.length_y, help=_("center y"))
@self.console_option(
"absolute",
"a",
type=bool,
action="store_true",
help=_("angle_to absolute angle"),
)
@self.console_command(
"rotate",
help=_("rotate <angle>"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_rotate(
command,
channel,
_,
angle,
cx=None,
cy=None,
absolute=False,
data=None,
**kwargs,
):
if angle is None:
channel("----------")
channel(_("Rotate Values:"))
i = 0
for node in self.elems():
name = str(node)
if len(name) > 50:
name = name[:50] + "…"
channel(
_("%d: rotate(%fturn) - %s")
% (i, node.matrix.rotation.as_turns, name)
)
i += 1
channel("----------")
return
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No selected elements."))
return
self.validate_selected_area()
bounds = self.selected_area()
if bounds is None:
channel(_("No selected elements."))
return
rot = angle.as_degrees
if cx is None:
cx = (bounds[2] + bounds[0]) / 2.0
if cy is None:
cy = (bounds[3] + bounds[1]) / 2.0
matrix = Matrix("rotate(%fdeg,%f,%f)" % (rot, cx, cy))
try:
if not absolute:
for node in data:
try:
if node.lock:
continue
except AttributeError:
pass
node.matrix *= matrix
node.modified()
else:
for node in data:
start_angle = node.matrix.rotation
amount = rot - start_angle
matrix = Matrix(
"rotate(%f,%f,%f)" % (Angle(amount).as_degrees, cx, cy)
)
node.matrix *= matrix
node.modified()
except ValueError:
raise CommandSyntaxError
return "elements", data
@self.console_argument("scale_x", type=float, help=_("scale_x value"))
@self.console_argument("scale_y", type=float, help=_("scale_y value"))
@self.console_option(
"px", "x", type=self.length_x, help=_("scale x origin point")
)
@self.console_option(
"py", "y", type=self.length_y, help=_("scale y origin point")
)
@self.console_option(
"absolute",
"a",
type=bool,
action="store_true",
help=_("scale to absolute size"),
)
@self.console_command(
"scale",
help=_("scale <scale> [<scale-y>]?"),
input_type=(None, "elements"),
output_type="elements",
)
def element_scale(
command,
channel,
_,
scale_x=None,
scale_y=None,
px=None,
py=None,
absolute=False,
data=None,
**kwargs,
):
if scale_x is None:
channel("----------")
channel(_("Scale Values:"))
i = 0
for node in self.elems():
name = str(node)
if len(name) > 50:
name = name[:50] + "…"
channel(
"%d: scale(%f, %f) - %s"
% (
i,
node.matrix.value_scale_x(),
node.matrix.value_scale_x(),
name,
)
)
i += 1
channel("----------")
return
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No selected elements."))
return
bounds = Node.union_bounds(data)
if scale_y is None:
scale_y = scale_x
if px is None:
px = (bounds[2] + bounds[0]) / 2.0
if py is None:
py = (bounds[3] + bounds[1]) / 2.0
if scale_x == 0 or scale_y == 0:
channel(_("Scaling by Zero Error"))
return
matrix = Matrix("scale(%f,%f,%f,%f)" % (scale_x, scale_y, px, py))
try:
if not absolute:
for node in data:
try:
if node.lock:
continue
except AttributeError:
pass
node.matrix *= matrix
node.modified()
else:
for node in data:
try:
if node.lock:
continue
except AttributeError:
pass
osx = node.matrix.value_scale_x()
osy = node.matrix.value_scale_y()
nsx = scale_x / osx
nsy = scale_y / osy
matrix = Matrix("scale(%f,%f,%f,%f)" % (nsx, nsy, px, px))
node.matrix *= matrix
node.modified()
except ValueError:
raise CommandSyntaxError
return "elements", data
@self.console_option(
"new_area", "n", type=self.area, help=_("provide a new area to cover")
)
@self.console_command(
"area",
help=_("provides information about/changes the area of a selected element"),
input_type=(None, "elements"),
output_type=("elements"),
)
def element_area(
command,
channel,
_,
new_area=None,
data=None,
**kwargs,
):
if new_area is None:
display_only = True
else:
if new_area == 0:
channel(_("You shouldn't collapse a shape to a zero-sized thing"))
return
display_only = False
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No selected elements."))
return
total_area = 0
if display_only:
channel("----------")
channel(_("Area values:"))
units = ("mm", "cm", "in")
square_unit = [0] * len(units)
for idx, u in enumerate(units):
value = float(Length("1{unit}".format(unit=u)))
square_unit[idx] = value * value
i = 0
for elem in data:
this_area = 0
try:
path = elem.as_path()
except AttributeError:
path = None
subject_polygons = []
if not path is None:
for subpath in path.as_subpaths():
subj = Path(subpath).npoint(linspace(0, 1, 1000))
subj.reshape((2, 1000))
s = list(map(Point, subj))
subject_polygons.append(s)
else:
try:
bb = elem.bounds
except:
# Even bounds failed, next element please
continue
s = [
Point(bb[0], bb[1]),
Point(bb[2], bb[1]),
Point(bb[2], bb[3]),
Point(bb[1], bb[3]),
]
subject_polygons.append(s)
if len(subject_polygons) > 0:
idx = len(subject_polygons[0]) - 1
if (
subject_polygons[0][0].x != subject_polygons[0][idx].x
or subject_polygons[0][0].y != subject_polygons[0][idx].y
):
# not identical, so close the loop
subject_polygons.append(
Point(subject_polygons[0][0].x, subject_polygons[0][0].y)
)
if len(subject_polygons) > 0:
idx = -1
area_x_y = 0
area_y_x = 0
for pt in subject_polygons[0]:
idx += 1
if idx > 0:
area_x_y += last_x * pt.y
area_y_x += last_y * pt.x
last_x = pt.x
last_y = pt.y
this_area = 0.5 * abs(area_x_y - area_y_x)
if display_only:
name = str(elem)
if len(name) > 50:
name = name[:50] + "…"
channel("%d: %s" % (i, name))
for idx, u in enumerate(units):
this_area_local = this_area / square_unit[idx]
channel(
_(" Area= {area:.3f} {unit}²").format(
area=this_area_local, unit=u
)
)
i += 1
total_area += this_area
if display_only:
channel("----------")
else:
if total_area == 0:
channel(_("You can't reshape a zero-sized shape"))
return
ratio = sqrt(new_area / total_area)
self("scale %f\n" % ratio)
return "elements", data
# Do we have a new value to set? If yes scale by sqrt(of the fraction)
@self.console_argument("tx", type=self.length_x, help=_("translate x value"))
@self.console_argument("ty", type=self.length_y, help=_("translate y value"))
@self.console_option(
"absolute",
"a",
type=bool,
action="store_true",
help=_("translate to absolute position"),
)
@self.console_command(
"translate",
help=_("translate <tx> <ty>"),
input_type=(None, "elements"),
output_type="elements",
)
def element_translate(
command, channel, _, tx, ty, absolute=False, data=None, **kwargs
):
if tx is None:
channel("----------")
channel(_("Translate Values:"))
i = 0
for node in self.elems():
name = str(node)
if len(name) > 50:
name = name[:50] + "…"
channel(
_("%d: translate(%f, %f) - %s")
% (
i,
node.matrix.value_trans_x(),
node.matrix.value_trans_y(),
name,
)
)
i += 1
channel("----------")
return
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No selected elements."))
return
if tx is None:
tx = 0
if ty is None:
ty = 0
matrix = Matrix.translate(tx, ty)
try:
if not absolute:
for node in data:
node.matrix *= matrix
node.modified()
else:
for node in data:
otx = node.matrix.value_trans_x()
oty = node.matrix.value_trans_y()
ntx = tx - otx
nty = ty - oty
matrix = Matrix.translate(ntx, nty)
node.matrix *= matrix
node.modified()
except ValueError:
raise CommandSyntaxError
return "elements", data
@self.console_command(
"move_to_laser",
help=_("translates the selected element to the laser head"),
input_type=(None, "elements"),
output_type="elements",
)
def element_move_to_laser(command, channel, _, data=None, **kwargs):
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No selected elements."))
return
tx, ty = self.device.current
try:
bounds = Node.union_bounds(data)
otx = bounds[0]
oty = bounds[1]
ntx = tx - otx
nty = ty - oty
for node in data:
node.matrix.post_translate(ntx, nty)
node.modified()
except ValueError:
raise CommandSyntaxError
return "elements", data
@self.console_argument(
"x_pos", type=self.length_x, help=_("x position for top left corner")
)
@self.console_argument(
"y_pos", type=self.length_y, help=_("y position for top left corner")
)
@self.console_argument(
"width", type=self.length_x, help=_("new width of selected")
)
@self.console_argument(
"height", type=self.length_y, help=_("new height of selected")
)
@self.console_command(
"resize",
help=_("resize <x-pos> <y-pos> <width> <height>"),
input_type=(None, "elements"),
output_type="elements",
)
def element_resize(
command, channel, _, x_pos, y_pos, width, height, data=None, **kwargs
):
if height is None:
raise CommandSyntaxError
try:
area = self.selected_area()
if area is None:
channel(_("resize: nothing selected"))
return
x, y, x1, y1 = area
w, h = x1 - x, y1 - y
if w == 0 or h == 0: # dot
channel(_("resize: cannot resize a dot"))
return
sx = width / w
sy = height / h
# Don't do anything if scale is 1
if sx == 1.0 and sy == 1.0:
channel(_("resize: nothing to do - scale factors 1"))
return
matrix = Matrix(
"translate(%f,%f) scale(%f,%f) translate(%f,%f)"
% (x_pos, y_pos, sx, sy, -x, -y)
)
if data is None:
data = list(self.elems(emphasized=True))
for node in data:
try:
if node.lock:
channel(_("resize: cannot resize a locked image"))
return
except AttributeError:
pass
for node in data:
node.matrix *= matrix
node.modified()
return "elements", data
except (ValueError, ZeroDivisionError, TypeError):
raise CommandSyntaxError
@self.console_argument("sx", type=float, help=_("scale_x value"))
@self.console_argument("kx", type=float, help=_("skew_x value"))
@self.console_argument("ky", type=float, help=_("skew_y value"))
@self.console_argument("sy", type=float, help=_("scale_y value"))
@self.console_argument("tx", type=self.length_x, help=_("translate_x value"))
@self.console_argument("ty", type=self.length_y, help=_("translate_y value"))
@self.console_command(
"matrix",
help=_("matrix <sx> <kx> <ky> <sy> <tx> <ty>"),
input_type=(None, "elements"),
output_type="elements",
)
def element_matrix(
command, channel, _, sx, kx, ky, sy, tx, ty, data=None, **kwargs
):
if ty is None:
channel("----------")
channel(_("Matrix Values:"))
i = 0
for node in self.elems():
name = str(node)
if len(name) > 50:
name = name[:50] + "…"
channel("%d: %s - %s" % (i, str(node.matrix), name))
i += 1
channel("----------")
return
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No selected elements."))
return
try:
# SVG 7.15.3 defines the matrix form as:
# [a c e]
# [b d f]
m = Matrix(
sx,
kx,
ky,
sy,
tx,
ty,
)
for node in data:
try:
if node.lock:
continue
except AttributeError:
pass
node.matrix = Matrix(m)
node.modified()
except ValueError:
raise CommandSyntaxError
return
@self.console_command(
"reset",
help=_("reset affine transformations"),
input_type=(None, "elements"),
output_type="elements",
)
def reset(command, channel, _, data=None, **kwargs):
if data is None:
data = list(self.elems(emphasized=True))
for e in data:
try:
if e.lock:
continue
except AttributeError:
pass
name = str(e)
if len(name) > 50:
name = name[:50] + "…"
channel(_("reset - %s") % name)
e.matrix.reset()
e.modified()
return "elements", data
# @self.console_command(
# "reify",
# help=_("reify affine transformations"),
# input_type=(None, "elements"),
# output_type="elements",
# )
# def element_reify(command, channel, _, data=None, **kwargs):
# if data is None:
# data = list(self.elems(emphasized=True))
# for e in data:
# try:
# if e.lock:
# continue
# except AttributeError:
# pass
#
# name = str(e)
# if len(name) > 50:
# name = name[:50] + "…"
# channel(_("reified - %s") % name)
# e.reify()
# e.altered()
# return "elements", data
@self.console_command(
"classify",
help=_("classify elements into operations"),
input_type=(None, "elements"),
output_type="elements",
)
def element_classify(command, channel, _, data=None, **kwargs):
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No selected elements."))
return
self.classify(data)
return "elements", data
@self.console_command(
"declassify",
help=_("declassify selected elements"),
input_type=(None, "elements"),
output_type="elements",
)
def declassify(command, channel, _, data=None, **kwargs):
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No selected elements."))
return
self.remove_elements_from_operations(data)
return "elements", data
# ==========
# TREE BASE
# ==========
@self.console_command(
"tree", help=_("access and alter tree elements"), output_type="tree"
)
def tree(**kwargs):
return "tree", [self._tree]
@self.console_command(
"bounds", help=_("view tree bounds"), input_type="tree", output_type="tree"
)
def tree_bounds(command, channel, _, data=None, **kwargs):
if data is None:
data = [self._tree]
def b_list(path, node):
for i, n in enumerate(node.children):
p = list(path)
p.append(str(i))
channel(
"%s: %s - %s %s - %s"
% (
".".join(p).ljust(10),
str(n._bounds),
str(n._bounds_dirty),
str(n.type),
str(n.label[:16]),
)
)
b_list(p, n)
for d in data:
channel("----------")
if d.type == "root":
channel(_("Tree:"))
else:
channel("%s:" % d.label)
b_list([], d)
channel("----------")
return "tree", data
@self.console_command(
"list", help=_("view tree"), input_type="tree", output_type="tree"
)
def tree_list(command, channel, _, data=None, **kwargs):
if data is None:
data = [self._tree]
def t_list(path, node):
for i, n in enumerate(node.children):
p = list(path)
p.append(str(i))
if n.targeted:
j = "+"
elif n.emphasized:
j = "~"
elif n.highlighted:
j = "-"
else:
j = ":"
channel(
"%s%s %s - %s"
% (".".join(p).ljust(10), j, str(n.type), str(n.label))
)
t_list(p, n)
for d in data:
channel("----------")
if d.type == "root":
channel(_("Tree:"))
else:
channel("%s:" % d.label)
t_list([], d)
channel("----------")
return "tree", data
@self.console_argument("drag", help="Drag node address")
@self.console_argument("drop", help="Drop node address")
@self.console_command(
"dnd", help=_("Drag and Drop Node"), input_type="tree", output_type="tree"
)
def tree_dnd(command, channel, _, data=None, drag=None, drop=None, **kwargs):
"""
Drag and Drop command performs a console based drag and drop operation
E.g. "tree dnd 0.1 0.2" will drag node 0.1 into node 0.2
"""
if data is None:
data = [self._tree]
if drop is None:
raise CommandSyntaxError
try:
drag_node = self._tree
for n in drag.split("."):
drag_node = drag_node.children[int(n)]
drop_node = self._tree
for n in drop.split("."):
drop_node = drop_node.children[int(n)]
drop_node.drop(drag_node)
except (IndexError, AttributeError, ValueError):
raise CommandSyntaxError
self.signal("tree_changed")
return "tree", data
@self.console_argument("node", help="Node address for menu")
@self.console_argument("execute", help="Command to execute")
@self.console_command(
"menu",
help=_("Load menu for given node"),
input_type="tree",
output_type="tree",
)
def tree_menu(
command, channel, _, data=None, node=None, execute=None, **kwargs
):
"""
Create menu for a particular node.
Processes submenus, references, radio_state as needed.
"""
try:
menu_node = self._tree
for n in node.split("."):
menu_node = menu_node.children[int(n)]
except (IndexError, AttributeError, ValueError):
raise CommandSyntaxError
menu = []
submenus = {}
def menu_functions(f, cmd_node):
func_dict = dict(f.func_dict)
def specific(event=None):
f(cmd_node, **func_dict)
return specific
for func in self.tree_operations_for_node(menu_node):
submenu_name = func.submenu
submenu = None
if submenu_name in submenus:
submenu = submenus[submenu_name]
elif submenu_name is not None:
submenu = list()
menu.append((submenu_name, submenu))
submenus[submenu_name] = submenu
menu_context = submenu if submenu is not None else menu
if func.reference is not None:
pass
if func.radio_state is not None:
if func.separate_before:
menu_context.append(("------", None))
n = func.real_name
if func.radio_state:
n = "✓" + n
menu_context.append((n, menu_functions(func, menu_node)))
else:
if func.separate_before:
menu_context.append(("------", None))
menu_context.append(
(func.real_name, menu_functions(func, menu_node))
)
if func.separate_after:
menu_context.append(("------", None))
if execute is not None:
try:
execute_command = ("menu", menu)
for n in execute.split("."):
name, cmd = execute_command
execute_command = cmd[int(n)]
name, cmd = execute_command
channel("Executing %s: %s" % (name, str(cmd)))
cmd()
except (IndexError, AttributeError, ValueError, TypeError):
raise CommandSyntaxError
else:
def m_list(path, menu):
for i, n in enumerate(menu):
p = list(path)
p.append(str(i))
name, submenu = n
channel("%s: %s" % (".".join(p).ljust(10), str(name)))
if isinstance(submenu, list):
m_list(p, submenu)
m_list([], menu)
return "tree", data
@self.console_command(
"selected",
help=_("delegate commands to focused value"),
input_type="tree",
output_type="tree",
)
def selected(channel, _, **kwargs):
"""
Set tree list to selected node
"""
return "tree", list(self.flat(selected=True))
@self.console_command(
"emphasized",
help=_("delegate commands to focused value"),
input_type="tree",
output_type="tree",
)
def emphasized(channel, _, **kwargs):
"""
Set tree list to emphasized node
"""
return "tree", list(self.flat(emphasized=True))
@self.console_command(
"highlighted",
help=_("delegate commands to sub-focused value"),
input_type="tree",
output_type="tree",
)
def highlighted(channel, _, **kwargs):
"""
Set tree list to highlighted nodes
"""
return "tree", list(self.flat(highlighted=True))
@self.console_command(
"targeted",
help=_("delegate commands to sub-focused value"),
input_type="tree",
output_type="tree",
)
def targeted(channel, _, **kwargs):
"""
Set tree list to highlighted nodes
"""
return "tree", list(self.flat(targeted=True))
@self.console_command(
"delete",
help=_("delete the given nodes"),
input_type="tree",
output_type="tree",
)
def delete(channel, _, data=None, **kwargs):
"""
Delete nodes.
Structural nodes such as root, elements branch, and operations branch are not able to be deleted
"""
self.remove_nodes(data)
self.signal("tree_changed")
self.signal("refresh_scene", 0)
return "tree", [self._tree]
@self.console_command(
"delegate",
help=_("delegate commands to focused value"),
input_type="tree",
output_type=("op", "elements"),
)
def delegate(channel, _, **kwargs):
"""
Delegate to either ops or elements depending on the current node emphasis
"""
for item in self.flat(emphasized=True):
if item.type.startswith("op"):
return "ops", list(self.ops(emphasized=True))
if item.type in elem_nodes or item.type in ("group", "file"):
return "elements", list(self.elems(emphasized=True))
# ==========
# CLIPBOARD COMMANDS
# ==========
@self.console_option("name", "n", type=str)
@self.console_command(
"clipboard",
help=_("clipboard"),
input_type=(None, "elements"),
output_type="clipboard",
)
def clipboard_base(data=None, name=None, **kwargs):
"""
Clipboard commands. Applies to current selected elements to
make a copy of those elements. Paste a copy of those elements
or cut those elements. Clear clears the clipboard.
The list command will list them but this is only for debug.
"""
if name is not None:
self._clipboard_default = name
if data is None:
return "clipboard", list(self.elems(emphasized=True))
else:
return "clipboard", data
@self.console_command(
"copy",
help=_("clipboard copy"),
input_type="clipboard",
output_type="elements",
)
def clipboard_copy(data=None, **kwargs):
destination = self._clipboard_default
self._clipboard[destination] = [copy(e) for e in data]
return "elements", self._clipboard[destination]
@self.console_option(
"dx", "x", help=_("paste offset x"), type=Length, default=0
)
@self.console_option(
"dy", "y", help=_("paste offset y"), type=Length, default=0
)
@self.console_command(
"paste",
help=_("clipboard paste"),
input_type="clipboard",
output_type="elements",
)
def clipboard_paste(command, channel, _, data=None, dx=None, dy=None, **kwargs):
destination = self._clipboard_default
try:
pasted = [copy(e) for e in self._clipboard[destination]]
except KeyError:
channel(_("Error: Clipboard Empty"))
return
if dx != 0 or dy != 0:
matrix = Matrix(
"translate({dx}, {dy})".format(dx=float(dx), dy=float(dy))
)
for node in pasted:
node.matrix *= matrix
group = self.elem_branch.add(type="group", label="Group")
for p in pasted:
group.add_node(copy(p))
self.set_emphasis([group])
return "elements", pasted
@self.console_command(
"cut",
help=_("clipboard cut"),
input_type="clipboard",
output_type="elements",
)
def clipboard_cut(data=None, **kwargs):
destination = self._clipboard_default
self._clipboard[destination] = [copy(e) for e in data]
self.remove_elements(data)
return "elements", self._clipboard[destination]
@self.console_command(
"clear",
help=_("clipboard clear"),
input_type="clipboard",
output_type="elements",
)
def clipboard_clear(data=None, **kwargs):
destination = self._clipboard_default
old = self._clipboard[destination]
self._clipboard[destination] = None
return "elements", old
@self.console_command(
"contents",
help=_("clipboard contents"),
input_type="clipboard",
output_type="elements",
)
def clipboard_contents(**kwargs):
destination = self._clipboard_default
return "elements", self._clipboard[destination]
@self.console_command(
"list",
help=_("clipboard list"),
input_type="clipboard",
)
def clipboard_list(command, channel, _, **kwargs):
for v in self._clipboard:
k = self._clipboard[v]
channel("%s: %s" % (str(v).ljust(5), str(k)))
# ==========
# NOTES COMMANDS
# ==========
@self.console_option(
"append", "a", type=bool, action="store_true", default=False
)
@self.console_command("note", help=_("note <note>"))
def note(command, channel, _, append=False, remainder=None, **kwargs):
note = remainder
if note is None:
if self.note is None:
channel(_("No Note."))
else:
channel(str(self.note))
else:
if append:
self.note += "\n" + note
else:
self.note = note
channel(_("Note Set."))
channel(str(self.note))
# ==========
# TRACE OPERATIONS
# ==========
# Function to return the euclidean distance
# between two points
def dist(a, b):
return sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))
# Function to check whether a point lies inside
# or on the boundaries of the circle
def is_inside(center, radius, p):
return dist(center, p) <= radius
# The following two functions are used
# To find the equation of the circle when
# three points are given.
# Helper method to get a circle defined by 3 points
def get_circle_center(bx, by, cx, cy):
B = bx * bx + by * by
C = cx * cx + cy * cy
D = bx * cy - by * cx
return [(cy * B - by * C) / (2 * D), (bx * C - cx * B) / (2 * D)]
# Function to return the smallest circle
# that intersects 2 points
def circle_from1(A, B):
# Set the center to be the midpoint of A and B
C = [(A[0] + B[0]) / 2.0, (A[1] + B[1]) / 2.0]
# Set the radius to be half the distance AB
return C, dist(A, B) / 2.0
# Function to return a unique circle that
# intersects three points
def circle_from2(A, B, C):
if A == B:
I, radius = circle_from1(A, C)
return I, radius
elif A == C:
I, radius = circle_from1(A, B)
return I, radius
elif B == C:
I, radius = circle_from1(A, B)
return I, radius
else:
I = get_circle_center(
B[0] - A[0], B[1] - A[1], C[0] - A[0], C[1] - A[1]
)
I[0] += A[0]
I[1] += A[1]
radius = dist(I, A)
return I, radius
# Function to check whether a circle
# encloses the given points
def is_valid_circle(center, radius, P):
# Iterating through all the points
# to check whether the points
# lie inside the circle or not
for p in P:
if not is_inside(center, radius, p):
return False
return True
# Function to return the minimum enclosing
# circle for N <= 3
def min_circle_trivial(P):
assert len(P) <= 3
if not P:
return [0, 0], 0
elif len(P) == 1:
return P[0], 0
elif len(P) == 2:
center, radius = circle_from1(P[0], P[1])
return center, radius
# To check if MEC can be determined
# by 2 points only
for i in range(3):
for j in range(i + 1, 3):
center, radius = circle_from1(P[i], P[j])
if is_valid_circle(center, radius, P):
return center, radius
center, radius = circle_from2(P[0], P[1], P[2])
return center, radius
# Returns the MEC using Welzl's algorithm
# Takes a set of input points P and a set R
# points on the circle boundary.
# n represents the number of points in P
# that are not yet processed.
def welzl_helper(P, R, n):
# Base case when all points processed or |R| = 3
if n == 0 or len(R) == 3:
center, radius = min_circle_trivial(R)
return center, radius
# Pick a random point randomly
idx = randint(0, n - 1)
p = P[idx]
# Put the picked point at the end of P
# since it's more efficient than
# deleting from the middle of the vector
P[idx], P[n - 1] = P[n - 1], P[idx]
# Get the MEC circle d from the
# set of points P - :p
dcenter, dradius = welzl_helper(P, R.copy(), n - 1)
# If d contains p, return d
if is_inside(dcenter, dradius, p):
return dcenter, dradius
# Otherwise, must be on the boundary of the MEC
R.append(p)
# Return the MEC for P - :p and R U :p
dcenter, dradius = welzl_helper(P, R.copy(), n - 1)
return dcenter, dradius
def welzl(P):
P_copy = P.copy()
shuffle(P_copy)
center, radius = welzl_helper(P_copy, [], len(P_copy))
return center, radius
def generate_hull_shape(method, data, resolution=None):
if resolution is None:
DETAIL = 500 # How coarse / fine shall a subpath be split
else:
DETAIL = int(resolution)
pts = []
min_val = [float("inf"), float("inf")]
max_val = [-float("inf"), -float("inf")]
for node in data:
if method in ("hull", "segment", "circle"):
try:
path = node.as_path()
except AttributeError:
path = None
if not path is None:
p = path.first_point
pts += [(p.x, p.y)]
for segment in path:
p = segment.end
pts += [(p.x, p.y)]
else:
bounds = node.bounds
pts += [
(bounds[0], bounds[1]),
(bounds[0], bounds[3]),
(bounds[2], bounds[1]),
(bounds[2], bounds[3]),
]
elif method in ("complex"):
try:
path = node.as_path()
except AttributeError:
path = None
if not path is None:
for subpath in path.as_subpaths():
psp = Path(subpath)
p = psp.first_point
pts += [(p.x, p.y)]
positions = linspace(0, 1, num=DETAIL, endpoint=True)
subj = psp.npoint(positions)
# Not sure why we need to do that, its already rows x 2
# subj.reshape((2, DETAIL))
s = list(map(Point, subj))
for p in s:
pts += [(p.x, p.y)]
else:
bounds = node.bounds
pts += [
(bounds[0], bounds[1]),
(bounds[0], bounds[3]),
(bounds[2], bounds[1]),
(bounds[2], bounds[3]),
]
elif method == "quick":
bounds = node.bounds
min_val[0] = min(min_val[0], bounds[0])
min_val[1] = min(min_val[1], bounds[1])
max_val[0] = max(max_val[0], bounds[2])
max_val[1] = max(max_val[1], bounds[3])
if method == "quick":
if (
not isinf(min_val[0])
and not isinf(min_val[1])
and not isinf(max_val[0])
and not isinf(max_val[0])
):
pts += [
(min_val[0], min_val[1]),
(min_val[0], max_val[1]),
(max_val[0], min_val[1]),
(max_val[0], max_val[1]),
]
if method == "segment":
hull = [p for p in pts]
elif method == "circle":
mec_center, mec_radius = welzl(pts)
# So now we have a circle with (mec[0], mec[1]), and mec_radius
hull = []
RES = 100
for i in range(RES):
hull += [
(
mec_center[0] + mec_radius * cos(i / RES * tau),
mec_center[1] + mec_radius * sin(i / RES * tau),
)
]
else:
hull = [p for p in Point.convex_hull(pts)]
if len(hull) != 0:
hull.append(hull[0]) # loop
return hull
@self.console_argument(
"method", help=_("Method to use (one of segment, quick, hull, complex)")
)
@self.console_argument("resolution")
@self.console_command(
"trace",
help=_("trace the given elements"),
input_type=("elements", "shapes", None),
)
def trace_trace_spooler(
command, channel, _, method=None, resolution=None, data=None, **kwargs
):
if method is None:
method = "quick"
method = method.lower()
if not method in ("segment", "quick", "hull", "complex"):
channel(
_(
"Invalid method, please use one of segment, quick, hull, complex."
)
)
return
spooler = self.device.spooler
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No elements bounds to trace"))
return
hull = generate_hull_shape(method, data, resolution)
if len(hull) == 0:
channel(_("No elements bounds to trace."))
return
def run_shape(spooler, hull):
def trace_hull():
yield "wait_finish"
yield "rapid_mode"
idx = 0
for p in hull:
idx += 1
yield (
"move_abs",
Length(amount=p[0]).length_mm,
Length(amount=p[1]).length_mm,
)
spooler.job(trace_hull)
run_shape(spooler, hull)
@self.console_argument(
"method",
help=_("Method to use (one of quick, hull, complex, segment, circle)"),
)
@self.console_argument(
"resolution", help=_("Resolution for complex slicing, default=500")
)
@self.console_command(
"tracegen",
help=_("create the trace around the given elements"),
input_type=("elements", "shapes", None),
output_type="elements",
)
def trace_trace_generator(
command, channel, _, method=None, resolution=None, data=None, **kwargs
):
if method is None:
method = "quick"
method = method.lower()
if not method in ("segment", "quick", "hull", "complex", "circle"):
channel(
_(
"Invalid method, please use one of quick, hull, complex, segment, circle."
)
)
return
spooler = self.device.spooler
if data is None:
data = list(self.elems(emphasized=True))
hull = generate_hull_shape(method, data, resolution=resolution)
if len(hull) == 0:
channel(_("No elements bounds to trace."))
return
shape = Polyline(hull)
if shape.is_degenerate():
channel(_("Shape is degenerate."))
return "elements", data
node = self.elem_branch.add(shape=shape, type="elem polyline")
node.stroke = Color("black")
self.set_emphasis([node])
node.focus()
data.append(node)
return "elements", data
# --------------------------- END COMMANDS ------------------------------
def _init_tree(self, kernel):
_ = kernel.translation
# --------------------------- TREE OPERATIONS ---------------------------
def is_regmark(node):
result = False
try:
if node._parent.type == "branch reg":
result = True
except AttributeError:
pass
return result
def has_changes(node):
result = False
try:
if not node.matrix.is_identity():
result = True
except AttributeError:
# There was an error druing check for matrix.is_identity
pass
return result
@self.tree_separator_after()
@self.tree_conditional(lambda node: len(list(self.ops(emphasized=True))) == 1)
@self.tree_operation(
_("Operation properties"), node_type=operate_nodes, help=""
)
def operation_property(node, **kwargs):
activate = self.kernel.lookup("function/open_property_window_for_node")
if activate is not None:
activate(node)
@self.tree_separator_after()
@self.tree_operation(_("Edit"), node_type="op console", help="")
def edit_console_command(node, **kwargs):
self.open("window/ConsoleProperty", self.gui, node=node)
@self.tree_separator_after()
@self.tree_operation(
_("Element properties"),
node_type=(
"elem ellipse",
"elem path",
"elem point",
"elem polyline",
"elem rect",
"elem line",
),
help="",
)
def path_property(node, **kwargs):
activate = self.kernel.lookup("function/open_property_window_for_node")
if activate is not None:
activate(node)
@self.tree_separator_after()
@self.tree_operation(_("Group properties"), node_type="group", help="")
def group_property(node, **kwargs):
activate = self.kernel.lookup("function/open_property_window_for_node")
if activate is not None:
activate(node)
@self.tree_separator_after()
@self.tree_operation(_("Text properties"), node_type="elem text", help="")
def text_property(node, **kwargs):
activate = self.kernel.lookup("function/open_property_window_for_node")
if activate is not None:
activate(node)
@self.tree_separator_after()
@self.tree_operation(_("Image properties"), node_type="elem image", help="")
def image_property(node, **kwargs):
activate = self.kernel.lookup("function/open_property_window_for_node")
if activate is not None:
activate(node)
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_operation(
_("Ungroup elements"), node_type=("group", "file"), help=""
)
def ungroup_elements(node, **kwargs):
for n in list(node.children):
node.insert_sibling(n)
node.remove_node() # Removing group/file node.
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_operation(_("Group elements"), node_type=elem_nodes, help="")
def group_elements(node, **kwargs):
# group_node = node.parent.add_sibling(node, type="group", name="Group")
group_node = node.parent.add(type="group", label="Group")
for e in list(self.elems(emphasized=True)):
group_node.append_child(e)
@self.tree_operation(_("Enable/Disable ops"), node_type=op_nodes, help="")
def toggle_n_operations(node, **kwargs):
for n in self.ops(emphasized=True):
n.output = not n.output
n.notify_update()
# TODO: Restore convert node type ability
#
# @self.tree_submenu(_("Convert operation"))
# @self.tree_operation(_("Convert to Image"), node_type=operate_nodes, help="")
# def convert_operation_image(node, **kwargs):
# for n in self.ops(emphasized=True):
# n.operation = "Image"
#
# @self.tree_submenu(_("Convert operation"))
# @self.tree_operation(_("Convert to Raster"), node_type=operate_nodes, help="")
# def convert_operation_raster(node, **kwargs):
# for n in self.ops(emphasized=True):
# n.operation = "Raster"
#
# @self.tree_submenu(_("Convert operation"))
# @self.tree_operation(_("Convert to Engrave"), node_type=operate_nodes, help="")
# def convert_operation_engrave(node, **kwargs):
# for n in self.ops(emphasized=True):
# n.operation = "Engrave"
#
# @self.tree_submenu(_("Convert operation"))
# @self.tree_operation(_("Convert to Cut"), node_type=operate_nodes, help="")
# def convert_operation_cut(node, **kwargs):
# for n in self.ops(emphasized=True):
# n.operation = "Cut"
def radio_match(node, speed=0, **kwargs):
return node.speed == float(speed)
@self.tree_submenu(_("Speed"))
@self.tree_radio(radio_match)
@self.tree_values("speed", (50, 75, 100, 150, 200, 250, 300, 350))
@self.tree_operation(
_("%smm/s") % "{speed}", node_type=("op raster", "op image"), help=""
)
def set_speed_raster(node, speed=150, **kwargs):
node.speed = float(speed)
self.signal("element_property_reload", node)
@self.tree_submenu(_("Speed"))
@self.tree_radio(radio_match)
@self.tree_values("speed", (5, 10, 15, 20, 25, 30, 35, 40))
@self.tree_operation(
_("%smm/s") % "{speed}",
node_type=("op cut", "op engrave", "op hatch"),
help="",
)
def set_speed_vector(node, speed=35, **kwargs):
node.speed = float(speed)
self.signal("element_property_reload", node)
def radio_match(node, power=0, **kwargs):
return node.power == float(power)
@self.tree_submenu(_("Power"))
@self.tree_radio(radio_match)
@self.tree_values("power", (100, 250, 333, 500, 666, 750, 1000))
@self.tree_operation(
_("%sppi") % "{power}",
node_type=("op cut", "op raster", "op image", "op engrave", "op hatch"),
help="",
)
def set_power(node, power=1000, **kwargs):
node.power = float(power)
self.signal("element_property_reload", node)
def radio_match(node, i=100, **kwargs):
return node.dpi == i
@self.tree_submenu(_("DPI"))
@self.tree_radio(radio_match)
@self.tree_values("dpi", (100, 250, 333, 500, 666, 750, 1000))
@self.tree_operation(
_("DPI %s") % "{dpi}",
node_type=("op raster", "elem image"),
help=_("Change dpi values"),
)
def set_step_n(node, dpi=1, **kwargs):
node.dpi = dpi
self.signal("element_property_reload", node)
def radio_match(node, passvalue=1, **kwargs):
return (node.passes_custom and passvalue == node.passes) or (
not node.passes_custom and passvalue == 1
)
@self.tree_submenu(_("Set operation passes"))
@self.tree_radio(radio_match)
@self.tree_iterate("passvalue", 1, 10)
@self.tree_operation(
_("Passes %s") % "{passvalue}", node_type=operate_nodes, help=""
)
def set_n_passes(node, passvalue=1, **kwargs):
node.passes = passvalue
node.passes_custom = passvalue != 1
self.signal("element_property_reload", node)
@self.tree_separator_after()
@self.tree_operation(
_("Execute operation(s)"),
node_type=operate_nodes,
help=_("Execute Job for the selected operation(s)."),
)
def execute_job(node, **kwargs):
node.emphasized = True
self("plan0 clear copy-selected\n")
self("window open ExecuteJob 0\n")
@self.tree_separator_after()
@self.tree_operation(
_("Simulate operation(s)"),
node_type=operate_nodes,
help=_("Run simulation for the selected operation(s)"),
)
def compile_and_simulate(node, **kwargs):
node.emphasized = True
self("plan0 copy-selected preprocess validate blob preopt optimize\n")
self("window open Simulation 0\n")
@self.tree_operation(_("Clear all"), node_type="branch ops", help="")
def clear_all(node, **kwargs):
self("operation* delete\n")
@self.tree_operation(_("Clear all"), node_type="branch elems", help="")
def clear_all_ops(node, **kwargs):
self("element* delete\n")
self.elem_branch.remove_all_children()
@self.tree_operation(_("Clear all"), node_type="branch reg", help="")
def clear_all_regmarks(node, **kwargs):
self.reg_branch.remove_all_children()
# ==========
# REMOVE MULTI (Tree Selected)
# ==========
@self.tree_conditional(
lambda cond: len(
list(
self.flat(selected=True, cascade=False, types=non_structural_nodes)
)
)
> 1
)
@self.tree_calc(
"ecount",
lambda i: len(
list(
self.flat(selected=True, cascade=False, types=non_structural_nodes)
)
),
)
@self.tree_operation(
_("Remove %s selected items") % "{ecount}",
node_type=non_structural_nodes,
help="",
)
def remove_multi_nodes(node, **kwargs):
nodes = list(
self.flat(selected=True, cascade=False, types=non_structural_nodes)
)
for node in nodes:
if node.parent is not None: # May have already removed.
node.remove_node()
self.set_emphasis(None)
# ==========
# REMOVE SINGLE (Tree Selected)
# ==========
@self.tree_conditional(
lambda cond: len(
list(
self.flat(selected=True, cascade=False, types=non_structural_nodes)
)
)
== 1
)
@self.tree_operation(
_("Remove '%s'") % "{name}",
node_type=non_structural_nodes,
help="",
)
def remove_type_op(node, **kwargs):
node.remove_node()
self.set_emphasis(None)
# ==========
# Remove Operations (If No Tree Selected)
# Note: This code would rarely match anything since the tree selected will almost always be true if we have
# match this conditional. The tree-selected delete functions are superior.
# ==========
@self.tree_conditional(
lambda cond: len(
list(
self.flat(selected=True, cascade=False, types=non_structural_nodes)
)
)
== 0
)
@self.tree_conditional(lambda node: len(list(self.ops(emphasized=True))) > 1)
@self.tree_calc("ecount", lambda i: len(list(self.ops(emphasized=True))))
@self.tree_operation(
_("Remove %s operations") % "{ecount}",
node_type=(
"op cut",
"op raster",
"op image",
"op engrave",
"op dots",
"op hatch",
"op console",
"lasercode",
"cutcode",
"blob",
),
help="",
)
def remove_n_ops(node, **kwargs):
self("operation delete\n")
# ==========
# REMOVE ELEMENTS
# ==========
@self.tree_conditional(lambda node: len(list(self.elems(emphasized=True))) > 0)
@self.tree_calc("ecount", lambda i: len(list(self.elems(emphasized=True))))
@self.tree_operation(
_("Remove %s elements") % "{ecount}",
node_type=elem_group_nodes,
help="",
)
def remove_n_elements(node, **kwargs):
self("element delete\n")
# ==========
# CONVERT TREE OPERATIONS
# ==========
@self.tree_operation(
_("Convert to Cutcode"),
node_type="lasercode",
help="",
)
def lasercode2cut(node, **kwargs):
node.replace_node(CutCode.from_lasercode(node.commands), type="cutcode")
@self.tree_conditional_try(lambda node: hasattr(node, "as_cutobjects"))
@self.tree_operation(
_("Convert to Cutcode"),
node_type="blob",
help="",
)
def blob2cut(node, **kwargs):
node.replace_node(node.as_cutobjects(), type="cutcode")
@self.tree_operation(
_("Convert to Path"),
node_type="cutcode",
help="",
)
def cutcode2pathcut(node, **kwargs):
cutcode = node.cutcode
elements = list(cutcode.as_elements())
n = None
for element in elements:
n = self.elem_branch.add(element, type="elem path")
node.remove_node()
if n is not None:
n.focus()
@self.tree_submenu(_("Clone reference"))
@self.tree_operation(_("Make 1 copy"), node_type="reference", help="")
def clone_single_element_op(node, **kwargs):
clone_element_op(node, copies=1, **kwargs)
@self.tree_submenu(_("Clone reference"))
@self.tree_iterate("copies", 2, 10)
@self.tree_operation(
_("Make %s copies") % "{copies}", node_type="reference", help=""
)
def clone_element_op(node, copies=1, **kwargs):
index = node.parent.children.index(node)
for i in range(copies):
node.parent.add_reference(node.node, type="reference", pos=index)
node.modified()
self.signal("rebuild_tree")
@self.tree_conditional(lambda node: node.count_children() > 1)
@self.tree_operation(
_("Reverse subitems order"),
node_type=(
"op cut",
"op raster",
"op image",
"op engrave",
"op dots",
"op hatch",
"group",
"branch elems",
"file",
"branch ops",
),
help=_("Reverse the items within this subitem"),
)
def reverse_layer_order(node, **kwargs):
node.reverse()
self.signal("rebuild_tree")
@self.tree_separator_after()
@self.tree_operation(
_("Refresh classification"), node_type="branch ops", help=""
)
def refresh_clasifications(node, **kwargs):
self.remove_elements_from_operations(list(self.elems()))
self.classify(list(self.elems()))
self.signal("rebuild_tree")
materials = [
_("Wood"),
_("Acrylic"),
_("Foam"),
_("Leather"),
_("Cardboard"),
_("Cork"),
_("Textiles"),
_("Paper"),
_("Save-1"),
_("Save-2"),
_("Save-3"),
]
def union_materials_saved():
union = [
d
for d in self.op_data.section_set()
if d not in materials and d != "previous"
]
union.extend(materials)
return union
def difference_materials_saved():
secs = self.op_data.section_set()
difference = [m for m in materials if m not in secs]
return difference
@self.tree_submenu(_("Load"))
@self.tree_values("opname", values=self.op_data.section_set)
@self.tree_operation(_("%s") % "{opname}", node_type="branch ops", help="")
def load_ops(node, opname, **kwargs):
self("material load %s\n" % opname)
@self.tree_separator_before()
@self.tree_submenu(_("Load"))
@self.tree_operation(_("Other/Blue/Red"), node_type="branch ops", help="")
def default_classifications(node, **kwargs):
self.load_default()
@self.tree_submenu(_("Load"))
@self.tree_separator_after()
@self.tree_operation(_("Basic"), node_type="branch ops", help="")
def basic_classifications(node, **kwargs):
self.load_default2()
@self.tree_submenu(_("Save"))
@self.tree_values("opname", values=self.op_data.section_set)
@self.tree_operation("{opname}", node_type="branch ops", help="")
def save_materials(node, opname="saved", **kwargs):
self("material save %s\n" % opname)
@self.tree_separator_before()
@self.tree_submenu(_("Save"))
@self.tree_prompt("opname", _("Name to store current operations under?"))
@self.tree_operation("New", node_type="branch ops", help="")
def save_material_custom(node, opname, **kwargs):
if opname is not None:
self("material save %s\n" % opname.replace(" ", "_"))
@self.tree_submenu(_("Delete"))
@self.tree_values("opname", values=self.op_data.section_set)
@self.tree_operation("{opname}", node_type="branch ops", help="")
def remove_ops(node, opname="saved", **kwargs):
self("material delete %s\n" % opname)
@self.tree_separator_before()
@self.tree_submenu(_("Append operation"))
@self.tree_operation(_("Append Image"), node_type="branch ops", help="")
def append_operation_image(node, pos=None, **kwargs):
self.add_op(ImageOpNode(), pos=pos)
@self.tree_submenu(_("Append operation"))
@self.tree_operation(_("Append Raster"), node_type="branch ops", help="")
def append_operation_raster(node, pos=None, **kwargs):
self.add_op(RasterOpNode(), pos=pos)
@self.tree_submenu(_("Append operation"))
@self.tree_operation(_("Append Engrave"), node_type="branch ops", help="")
def append_operation_engrave(node, pos=None, **kwargs):
self.add_op(EngraveOpNode(), pos=pos)
@self.tree_submenu(_("Append operation"))
@self.tree_operation(_("Append Cut"), node_type="branch ops", help="")
def append_operation_cut(node, pos=None, **kwargs):
self.add_op(CutOpNode(), pos=pos)
@self.tree_submenu(_("Append operation"))
@self.tree_operation(_("Append Hatch"), node_type="branch ops", help="")
def append_operation_hatch(node, pos=None, **kwargs):
self.add_op(HatchOpNode(), pos=pos)
@self.tree_submenu(_("Append special operation(s)"))
@self.tree_operation(_("Append Home"), node_type="branch ops", help="")
def append_operation_home(node, pos=None, **kwargs):
self.op_branch.add(
ConsoleOperation("home -f"),
type="op console",
pos=pos,
)
@self.tree_submenu(_("Append special operation(s)"))
@self.tree_operation(
_("Append Return to Origin"), node_type="branch ops", help=""
)
def append_operation_origin(node, pos=None, **kwargs):
self.op_branch.add(
ConsoleOperation("move_abs 0 0"),
type="op console",
pos=pos,
)
@self.tree_submenu(_("Append special operation(s)"))
@self.tree_operation(_("Append Beep"), node_type="branch ops", help="")
def append_operation_beep(node, pos=None, **kwargs):
self.op_branch.add(
ConsoleOperation("beep"),
type="op console",
pos=pos,
)
@self.tree_submenu(_("Append special operation(s)"))
@self.tree_operation(_("Append Interrupt"), node_type="branch ops", help="")
def append_operation_interrupt(node, pos=None, **kwargs):
self.op_branch.add(
ConsoleOperation('interrupt "Spooling was interrupted"'),
type="op console",
pos=pos,
)
@self.tree_submenu(_("Append special operation(s)"))
@self.tree_operation(
_("Append Home/Beep/Interrupt"), node_type="branch ops", help=""
)
def append_operation_home_beep_interrupt(node, **kwargs):
append_operation_home(node, **kwargs)
append_operation_beep(node, **kwargs)
append_operation_interrupt(node, **kwargs)
@self.tree_submenu(_("Append special operation(s)"))
@self.tree_operation(_("Append Shutdown"), node_type="branch ops", help="")
def append_operation_shutdown(node, pos=None, **kwargs):
self.op_branch.add(
ConsoleOperation("quit"),
type="op console",
pos=pos,
)
@self.tree_operation(
_("Reclassify operations"), node_type="branch elems", help=""
)
def reclassify_operations(node, **kwargs):
elems = list(self.elems())
self.remove_elements_from_operations(elems)
self.classify(list(self.elems()))
self.signal("rebuild_tree")
@self.tree_operation(
_("Duplicate operation(s)"),
node_type=operate_nodes,
help=_("duplicate operation nodes"),
)
def duplicate_operation(node, **kwargs):
operations = self._tree.get(type="branch ops").children
for op in self.ops(emphasized=True):
try:
pos = operations.index(op) + 1
except ValueError:
pos = None
copy_op = copy(op)
self.add_op(copy_op, pos=pos)
for child in op.children:
try:
copy_op.add_reference(child.node)
except AttributeError:
pass
@self.tree_conditional(lambda node: node.count_children() > 1)
@self.tree_submenu(_("Passes"))
@self.tree_operation(
_("Add 1 pass"),
node_type=("op image", "op engrave", "op cut", "op hatch"),
help="",
)
def add_1_pass(node, **kwargs):
add_n_passes(node, copies=1, **kwargs)
@self.tree_conditional(lambda node: node.count_children() > 1)
@self.tree_submenu(_("Passes"))
@self.tree_iterate("copies", 2, 10)
@self.tree_operation(
_("Add %s passes") % "{copies}",
node_type=("op image", "op engrave", "op cut", "op hatch"),
help="",
)
def add_n_passes(node, copies=1, **kwargs):
add_nodes = list(node.children)
removed = False
for i in range(0, len(add_nodes)):
for q in range(0, i):
if add_nodes[q] is add_nodes[i]:
add_nodes[i] = None
removed = True
if removed:
add_nodes = [c for c in add_nodes if c is not None]
add_nodes *= copies
for n in add_nodes:
node.add_reference(n.node)
self.signal("rebuild_tree")
@self.tree_conditional(lambda node: node.count_children() > 1)
@self.tree_submenu(_("Duplicate element(s)"))
@self.tree_operation(
_("Duplicate elements 1 time"),
node_type=("op image", "op engrave", "op cut"),
help="",
)
def dup_1_copy(node, **kwargs):
dup_n_copies(node, copies=1, **kwargs)
@self.tree_conditional(lambda node: node.count_children() > 1)
@self.tree_submenu(_("Duplicate element(s)"))
@self.tree_iterate("copies", 2, 10)
@self.tree_operation(
_("Duplicate elements %s times") % "{copies}",
node_type=("op image", "op engrave", "op cut"),
help="",
)
def dup_n_copies(node, copies=1, **kwargs):
add_nodes = list(node.children)
add_nodes *= copies
for n in add_nodes:
node.add_reference(n.node)
self.signal("rebuild_tree")
@self.tree_operation(
_("Make raster image"),
node_type=("op image", "op raster"),
help=_("Convert a vector element into a raster element."),
)
def make_raster_image(node, **kwargs):
bounds = node.bounds
if bounds is None:
return
xmin, ymin, xmax, ymax = bounds
xmin, ymin = self.device.scene_to_device_position(xmin, ymin)
xmax, ymax = self.device.scene_to_device_position(xmax, ymax)
dpi = node.dpi
oneinch_x = self.device.physical_to_device_length("1in", 0)[0]
oneinch_y = self.device.physical_to_device_length(0, "1in")[1]
step_x = float(oneinch_x / dpi)
step_y = float(oneinch_y / dpi)
make_raster = self.lookup("render-op/make_raster")
image = make_raster(
list(node.flat(types=elem_ref_nodes)),
(xmin, ymin, xmax, ymax),
step_x=step_x,
step_y=step_y,
)
matrix = Matrix(self.device.device_to_scene_matrix())
matrix.post_scale(step_x, step_y)
matrix.post_translate(xmin, ymin)
image_node = ImageNode(
image=image, matrix=matrix, step_x=step_x, step_y=step_y
)
self.elem_branch.add_node(image_node)
node.add_reference(image_node)
def add_after_index(self, node=None):
try:
if node is None:
node = list(self.ops(emphasized=True))[-1]
operations = self._tree.get(type="branch ops").children
return operations.index(node) + 1
except (ValueError, IndexError):
return None
@self.tree_separator_before()
@self.tree_submenu(_("Add operation"))
@self.tree_operation(_("Add Image"), node_type=operate_nodes, help="")
def add_operation_image(node, **kwargs):
append_operation_image(node, pos=add_after_index(self, node), **kwargs)
@self.tree_submenu(_("Add operation"))
@self.tree_operation(_("Add Raster"), node_type=operate_nodes, help="")
def add_operation_raster(node, **kwargs):
append_operation_raster(node, pos=add_after_index(self, node), **kwargs)
@self.tree_submenu(_("Add operation"))
@self.tree_operation(_("Add Engrave"), node_type=operate_nodes, help="")
def add_operation_engrave(node, **kwargs):
append_operation_engrave(node, pos=add_after_index(self, node), **kwargs)
@self.tree_submenu(_("Add operation"))
@self.tree_operation(_("Add Cut"), node_type=operate_nodes, help="")
def add_operation_cut(node, **kwargs):
append_operation_cut(node, pos=add_after_index(self, node), **kwargs)
@self.tree_submenu(_("Add special operation(s)"))
@self.tree_operation(_("Add Home"), node_type=op_nodes, help="")
def add_operation_home(node, **kwargs):
append_operation_home(node, pos=add_after_index(self, node), **kwargs)
@self.tree_submenu(_("Add special operation(s)"))
@self.tree_operation(_("Add Return to Origin"), node_type=op_nodes, help="")
def add_operation_origin(node, **kwargs):
append_operation_origin(node, pos=add_after_index(self, node), **kwargs)
@self.tree_submenu(_("Add special operation(s)"))
@self.tree_operation(_("Add Beep"), node_type=op_nodes, help="")
def add_operation_beep(node, **kwargs):
append_operation_beep(node, pos=add_after_index(self, node), **kwargs)
@self.tree_submenu(_("Add special operation(s)"))
@self.tree_operation(_("Add Interrupt"), node_type=op_nodes, help="")
def add_operation_interrupt(node, **kwargs):
append_operation_interrupt(node, pos=add_after_index(self, node), **kwargs)
@self.tree_submenu(_("Add special operation(s)"))
@self.tree_operation(_("Add Home/Beep/Interrupt"), node_type=op_nodes, help="")
def add_operation_home_beep_interrupt(node, **kwargs):
pos = add_after_index(self, node)
append_operation_home(node, pos=pos, **kwargs)
if pos:
pos += 1
append_operation_beep(node, pos=pos, **kwargs)
if pos:
pos += 1
append_operation_interrupt(node, pos=pos, **kwargs)
@self.tree_operation(_("Reload '%s'") % "{name}", node_type="file", help="")
def reload_file(node, **kwargs):
filepath = node.filepath
node.remove_node()
self.load(filepath)
@self.tree_operation(
_("Open in System: '{name}'"),
node_type="file",
help=_(
"Open this file in the system application associated with this type of file"
),
)
def open_system_file(node, **kwargs):
filepath = node.filepath
normalized = os.path.realpath(filepath)
import platform
system = platform.system()
if system == "Darwin":
from os import system as open_in_shell
open_in_shell("open '{file}'".format(file=normalized))
elif system == "Windows":
from os import startfile as open_in_shell
open_in_shell('"{file}"'.format(file=normalized))
else:
from os import system as open_in_shell
open_in_shell("xdg-open '{file}'".format(file=normalized))
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_submenu(_("Duplicate element(s)"))
@self.tree_operation(_("Make 1 copy"), node_type=elem_nodes, help="")
def duplicate_element_1(node, **kwargs):
duplicate_element_n(node, copies=1, **kwargs)
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_submenu(_("Duplicate element(s)"))
@self.tree_iterate("copies", 2, 10)
@self.tree_operation(
_("Make %s copies") % "{copies}", node_type=elem_nodes, help=""
)
def duplicate_element_n(node, copies, **kwargs):
copy_nodes = list()
for e in list(self.elems(emphasized=True)):
for n in range(copies):
copy_node = copy(e)
if hasattr(e, "wxfont"):
copy_node.wxfont = e.wxfont
node.parent.add_node(copy_node)
copy_nodes.append(copy_node)
self.classify(copy_nodes)
self.set_emphasis(None)
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_operation(
_("Convert to path"),
node_type=(
"elem ellipse",
"elem path",
"elem polyline",
"elem rect",
"elem line",
),
help="",
)
def convert_to_path(node, **kwargs):
path = node.as_path()
node.replace_node(path=path, type="elem path")
@self.tree_submenu(_("Flip"))
@self.tree_separator_before()
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_conditional_try(lambda node: not node.lock)
@self.tree_operation(
_("Horizontally"),
node_type=elem_group_nodes,
help=_("Mirror Horizontally"),
)
def mirror_elem(node, **kwargs):
bounds = node.bounds
if bounds is None:
return
center_x = (bounds[2] + bounds[0]) / 2.0
center_y = (bounds[3] + bounds[1]) / 2.0
self("scale -1 1 %f %f\n" % (center_x, center_y))
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_submenu(_("Flip"))
@self.tree_conditional_try(lambda node: not node.lock)
@self.tree_operation(
_("Vertically"),
node_type=elem_group_nodes,
help=_("Flip Vertically"),
)
def flip_elem(node, **kwargs):
bounds = node.bounds
if bounds is None:
return
center_x = (bounds[2] + bounds[0]) / 2.0
center_y = (bounds[3] + bounds[1]) / 2.0
self("scale 1 -1 %f %f\n" % (center_x, center_y))
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_conditional_try(lambda node: not node.lock)
@self.tree_submenu(_("Scale"))
@self.tree_iterate("scale", 25, 1, -1)
@self.tree_calc("scale_percent", lambda i: "%0.f" % (600.0 / float(i)))
@self.tree_operation(
_("Scale %s%%") % "{scale_percent}",
node_type=elem_group_nodes,
help=_("Scale Element"),
)
def scale_elem_amount(node, scale, **kwargs):
scale = 6.0 / float(scale)
bounds = node.bounds
if bounds is None:
return
center_x = (bounds[2] + bounds[0]) / 2.0
center_y = (bounds[3] + bounds[1]) / 2.0
self("scale %f %f %f %f\n" % (scale, scale, center_x, center_y))
# @self.tree_conditional(lambda node: isinstance(node.object, SVGElement))
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_conditional_try(lambda node: not node.lock)
@self.tree_submenu(_("Rotate"))
@self.tree_values(
"angle",
(
180,
135,
90,
60,
45,
30,
20,
15,
10,
5,
4,
3,
2,
1,
-1,
-2,
-3,
-4,
-5,
-10,
-15,
-20,
-30,
-45,
-60,
-90,
),
)
@self.tree_operation(
_("Rotate %s°") % ("{angle}"), node_type=elem_group_nodes, help=""
)
def rotate_elem_amount(node, angle, **kwargs):
turns = float(angle) / 360.0
bounds = node.bounds
if bounds is None:
return
center_x = (bounds[2] + bounds[0]) / 2.0
center_y = (bounds[3] + bounds[1]) / 2.0
self("rotate %fturn %f %f\n" % (turns, center_x, center_y))
self.signal("ext-modified")
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_conditional(lambda node: has_changes(node))
@self.tree_conditional_try(lambda node: not node.lock)
@self.tree_operation(
_("Reify User Changes"), node_type=elem_group_nodes, help=""
)
def reify_elem_changes(node, **kwargs):
self("reify\n")
self.signal("ext-modified")
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_conditional_try(lambda node: not node.lock)
@self.tree_operation(_("Break Subpaths"), node_type="elem path", help="")
def break_subpath_elem(node, **kwargs):
self("element subpath\n")
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_conditional(lambda node: has_changes(node))
@self.tree_conditional_try(lambda node: not node.lock)
@self.tree_operation(
_("Reset user changes"), node_type=elem_group_nodes, help=""
)
def reset_user_changes(node, copies=1, **kwargs):
self("reset\n")
self.signal("ext-modified")
@self.tree_operation(
_("Merge items"),
node_type="group",
help=_("Merge this node's children into 1 path."),
)
def merge_elements(node, **kwargs):
self("element merge\n")
@self.tree_conditional(lambda node: is_regmark(node))
@self.tree_separator_before()
@self.tree_operation(
_("Move back to elements"), node_type=elem_group_nodes, help=""
)
def move_back(node, copies=1, **kwargs):
# Drag and Drop
drop_node = self.elem_branch
drop_node.drop(node)
self.signal("tree_changed")
@self.tree_conditional(lambda node: not is_regmark(node))
@self.tree_separator_before()
@self.tree_operation(_("Move to regmarks"), node_type=elem_group_nodes, help="")
def move_to_regmark(node, copies=1, **kwargs):
# Drag and Drop
drop_node = self.reg_branch
drop_node.drop(node)
self.signal("tree_changed")
def radio_match(node, i=0, **kwargs):
if "raster_step_x" in node.settings:
step_x = float(node.settings["raster_step_x"])
else:
step_y = 1.0
if "raster_step_y" in node.settings:
step_x = float(node.settings["raster_step_y"])
else:
step_y = 1.0
if i == step_x and i == step_y:
m = node.matrix
if m.a == step_x or m.b == 0.0 or m.c == 0.0 or m.d == step_y:
return True
return False
@self.tree_separator_before()
@self.tree_submenu(_("Step"))
@self.tree_radio(radio_match)
@self.tree_iterate("i", 1, 10)
@self.tree_operation(_("Step %s") % "{i}", node_type="elem image", help="")
def set_step_n_elem(node, i=1, **kwargs):
step_value = i
node.step_x = step_value
node.step_y = step_value
m = node.matrix
tx = m.e
ty = m.f
node.matrix = Matrix.scale(float(step_value), float(step_value))
node.matrix.post_translate(tx, ty)
node.modified()
self.signal("element_property_reload", node)
@self.tree_conditional_try(lambda node: not node.lock)
@self.tree_operation(_("Actualize pixels"), node_type="elem image", help="")
def image_actualize_pixels(node, **kwargs):
self("image resample\n")
@self.tree_submenu(_("Z-depth divide"))
@self.tree_iterate("divide", 2, 10)
@self.tree_operation(
_("Divide into %s images") % "{divide}", node_type="elem image", help=""
)
def image_zdepth(node, divide=1, **kwargs):
if node.image.mode != "RGBA":
node.image = node.image.convert("RGBA")
band = 255 / divide
for i in range(0, divide):
threshold_min = i * band
threshold_max = threshold_min + band
self("image threshold %f %f\n" % (threshold_min, threshold_max))
@self.tree_conditional(lambda node: not node.lock)
@self.tree_submenu(_("Image"))
@self.tree_operation(_("Unlock manipulations"), node_type="elem image", help="")
def image_unlock_manipulations(node, **kwargs):
self("image unlock\n")
@self.tree_submenu(_("Image"))
@self.tree_operation(_("Dither to 1 bit"), node_type="elem image", help="")
def image_dither(node, **kwargs):
self("image dither\n")
@self.tree_submenu(_("Image"))
@self.tree_operation(_("Invert image"), node_type="elem image", help="")
def image_invert(node, **kwargs):
self("image invert\n")
@self.tree_submenu(_("Image"))
@self.tree_operation(_("Mirror horizontal"), node_type="elem image", help="")
def image_mirror(node, **kwargs):
self("image mirror\n")
@self.tree_submenu(_("Image"))
@self.tree_operation(_("Flip vertical"), node_type="elem image", help="")
def image_flip(node, **kwargs):
self("image flip\n")
@self.tree_submenu(_("Image"))
@self.tree_operation(_("Rotate 90° CW"), node_type="elem image", help="")
def image_cw(node, **kwargs):
self("image cw\n")
@self.tree_submenu(_("Image"))
@self.tree_operation(_("Rotate 90° CCW"), node_type="elem image", help="")
def image_ccw(node, **kwargs):
self("image ccw\n")
@self.tree_submenu(_("Image"))
@self.tree_operation(_("Save output.png"), node_type="elem image", help="")
def image_save(node, **kwargs):
self("image save output.png\n")
@self.tree_submenu(_("RasterWizard"))
@self.tree_values(
"script", values=list(self.match("raster_script", suffix=True))
)
@self.tree_operation(
_("RasterWizard: %s") % "{script}", node_type="elem image", help=""
)
def image_rasterwizard_open(node, script=None, **kwargs):
self("window open RasterWizard %s\n" % script)
@self.tree_submenu(_("Apply raster script"))
@self.tree_values(
"script", values=list(self.match("raster_script", suffix=True))
)
@self.tree_operation(
_("Apply: %s") % "{script}", node_type="elem image", help=""
)
def image_rasterwizard_apply(node, script=None, **kwargs):
self("image wizard %s\n" % script)
@self.tree_conditional_try(lambda node: hasattr(node, "as_elements"))
@self.tree_operation(_("Convert to SVG"), node_type=op_nodes, help="")
def cutcode_convert_svg(node, **kwargs):
# Todo: unsure if still works
self.add_elems(list(node.as_elements()))
@self.tree_conditional_try(lambda node: hasattr(node, "generate"))
@self.tree_operation(_("Process as Operation"), node_type=op_nodes, help="")
def cutcode_operation(node, **kwargs):
# Todo: unsure if still works
self.add_op(node)
@self.tree_conditional(lambda node: len(node.children) > 0)
@self.tree_separator_before()
@self.tree_operation(
_("Expand all children"),
node_type=(
"op cut",
"op raster",
"op image",
"op engrave",
"op dots",
"op hatch",
"branch elems",
"branch ops",
"branch reg",
"group",
"file",
"root",
),
help="Expand all children of this given node.",
)
def expand_all_children(node, **kwargs):
node.notify_expand()
@self.tree_conditional(lambda node: len(node.children) > 0)
@self.tree_operation(
_("Collapse all children"),
node_type=(
"op cut",
"op raster",
"op image",
"op engrave",
"op dots",
"op hatch",
"branch elems",
"branch ops",
"branch reg",
"group",
"file",
"root",
),
help="Collapse all children of this given node.",
)
def collapse_all_children(node, **kwargs):
node.notify_collapse()
def service_detach(self, *args, **kwargs):
self.unlisten_tree(self)
def service_attach(self, *args, **kwargs):
self.listen_tree(self)
def shutdown(self, *args, **kwargs):
self.save_persistent_operations("previous")
self.save_persistent_penbox()
self.pen_data.write_configuration()
self.op_data.write_configuration()
for e in self.flat():
e.unregister()
def save_persistent_operations(self, name):
settings = self.op_data
settings.clear_persistent(name)
for i, op in enumerate(self.ops()):
section = "%s %06i" % (name, i)
settings.write_persistent(section, "type", op.type)
op.save(settings, section)
settings.write_configuration()
def clear_persistent_operations(self, name):
settings = self.op_data
subitems = list(settings.derivable(name))
for section in subitems:
settings.clear_persistent(section)
settings.write_configuration()
def load_persistent_operations(self, name):
self.clear_operations()
settings = self.op_data
operation_branch = self._tree.get(type="branch ops")
for section in list(settings.derivable(name)):
op_type = settings.read_persistent(str, section, "type")
op = operation_branch.add(type=op_type)
op.load(settings, section)
self.classify(list(self.elems()))
def emphasized(self, *args):
self._emphasized_bounds_dirty = True
self._emphasized_bounds = None
def altered(self, *args):
self._emphasized_bounds_dirty = True
self._emphasized_bounds = None
def modified(self, *args):
self._emphasized_bounds_dirty = True
self._emphasized_bounds = None
def listen_tree(self, listener):
self._tree.listen(listener)
def unlisten_tree(self, listener):
self._tree.unlisten(listener)
def load_default(self):
self.clear_operations()
self.add_op(
ImageOpNode(
color="black",
speed=140.0,
power=1000.0,
raster_step=3,
)
)
self.add_op(RasterOpNode())
self.add_op(EngraveOpNode())
self.add_op(CutOpNode())
self.classify(list(self.elems()))
def load_default2(self):
self.clear_operations()
self.add_op(
ImageOpNode(
color="black",
speed=140.0,
power=1000.0,
raster_step=3,
)
)
self.add_op(RasterOpNode())
self.add_op(EngraveOpNode(color="blue"))
self.add_op(EngraveOpNode(color="green"))
self.add_op(EngraveOpNode(color="magenta"))
self.add_op(EngraveOpNode(color="cyan"))
self.add_op(EngraveOpNode(color="yellow"))
self.add_op(CutOpNode())
self.classify(list(self.elems()))
def tree_operations_for_node(self, node):
for func, m, sname in self.find("tree", node.type, ".*"):
reject = False
for cond in func.conditionals:
if not cond(node):
reject = True
break
if reject:
continue
for cond in func.try_conditionals:
try:
if not cond(node):
reject = True
break
except Exception:
continue
if reject:
continue
func_dict = {
"name": str(node.label),
"label": str(node.label),
}
iterator = func.values
if iterator is None:
iterator = [0]
else:
try:
iterator = list(iterator())
except TypeError:
pass
for i, value in enumerate(iterator):
func_dict["iterator"] = i
func_dict["value"] = value
try:
func_dict[func.value_name] = value
except AttributeError:
pass
for calc in func.calcs:
key, c = calc
value = c(value)
func_dict[key] = value
if func.radio is not None:
try:
func.radio_state = func.radio(node, **func_dict)
except:
func.radio_state = False
else:
func.radio_state = None
name = func.name.format_map(func_dict)
func.func_dict = func_dict
func.real_name = name
yield func
def flat(self, **kwargs):
yield from self._tree.flat(**kwargs)
@staticmethod
def tree_calc(value_name, calc_func):
def decor(func):
func.calcs.append((value_name, calc_func))
return func
return decor
@staticmethod
def tree_values(value_name, values):
def decor(func):
func.value_name = value_name
func.values = values
return func
return decor
@staticmethod
def tree_iterate(value_name, start, stop, step=1):
def decor(func):
func.value_name = value_name
func.values = range(start, stop, step)
return func
return decor
@staticmethod
def tree_radio(radio_function):
def decor(func):
func.radio = radio_function
return func
return decor
@staticmethod
def tree_submenu(submenu):
def decor(func):
func.submenu = submenu
return func
return decor
@staticmethod
def tree_prompt(attr, prompt, data_type=str):
def decor(func):
func.user_prompt.append(
{
"attr": attr,
"prompt": prompt,
"type": data_type,
}
)
return func
return decor
@staticmethod
def tree_conditional(conditional):
def decor(func):
func.conditionals.append(conditional)
return func
return decor
@staticmethod
def tree_conditional_try(conditional):
def decor(func):
func.try_conditionals.append(conditional)
return func
return decor
@staticmethod
def tree_reference(node):
def decor(func):
func.reference = node
return func
return decor
@staticmethod
def tree_separator_after():
def decor(func):
func.separate_after = True
return func
return decor
@staticmethod
def tree_separator_before():
def decor(func):
func.separate_before = True
return func
return decor
def tree_operation(self, name, node_type=None, help=None, **kwargs):
def decorator(func):
@functools.wraps(func)
def inner(node, **ik):
returned = func(node, **ik, **kwargs)
return returned
if isinstance(node_type, tuple):
ins = node_type
else:
ins = (node_type,)
# inner.long_help = func.__doc__
inner.help = help
inner.node_type = ins
inner.name = name
inner.radio = None
inner.submenu = None
inner.reference = None
inner.separate_after = False
inner.separate_before = False
inner.conditionals = list()
inner.try_conditionals = list()
inner.user_prompt = list()
inner.calcs = list()
inner.values = [0]
registered_name = inner.__name__
for _in in ins:
p = "tree/%s/%s" % (_in, registered_name)
if p in self._registered:
raise NameError(
"A function of this name was already registered: %s" % p
)
self.register(p, inner)
return inner
return decorator
def validate_ids(self):
idx = 1
uid = {}
missing = list()
for node in self.flat():
if node.id is None:
missing.append(node)
for m in missing:
while f"meerk40t:{idx}" in uid:
idx += 1
m.id = f"meerk40t:{idx}"
uid[m.id] = m
@property
def reg_branch(self):
return self._tree.get(type="branch reg")
@property
def op_branch(self):
return self._tree.get(type="branch ops")
@property
def elem_branch(self):
return self._tree.get(type="branch elems")
def ops(self, **kwargs):
operations = self._tree.get(type="branch ops")
for item in operations.flat(depth=1, **kwargs):
if item.type.startswith("branch") or item.type.startswith("ref"):
continue
yield item
def elems(self, **kwargs):
elements = self._tree.get(type="branch elems")
for item in elements.flat(types=elem_nodes, **kwargs):
yield item
def elems_nodes(self, depth=None, **kwargs):
elements = self._tree.get(type="branch elems")
for item in elements.flat(types=elem_group_nodes, depth=depth, **kwargs):
yield item
def regmarks(self, **kwargs):
elements = self._tree.get(type="branch reg")
for item in elements.flat(types=elem_nodes, **kwargs):
yield item
def regmarks_nodes(self, depth=None, **kwargs):
elements = self._tree.get(type="branch reg")
for item in elements.flat(types=elem_group_nodes, depth=depth, **kwargs):
yield item
def top_element(self, **kwargs):
"""
Returns the first matching node via a depth first search.
"""
for e in self.elem_branch.flat(**kwargs):
return e
return None
def first_element(self, **kwargs):
"""
Returns the first matching element node via a depth first search. Elements must be type elem.
"""
for e in self.elems(**kwargs):
return e
return None
def has_emphasis(self):
"""
Returns whether any element is emphasized
"""
for e in self.elems_nodes(emphasized=True):
return True
return False
def count_elems(self, **kwargs):
return len(list(self.elems(**kwargs)))
def count_op(self, **kwargs):
return len(list(self.ops(**kwargs)))
def get(self, obj=None, type=None):
return self._tree.get(obj=obj, type=type)
def get_op(self, index, **kwargs):
for i, op in enumerate(self.ops(**kwargs)):
if i == index:
return op
raise IndexError
def get_elem(self, index, **kwargs):
for i, elem in enumerate(self.elems(**kwargs)):
if i == index:
return elem
raise IndexError
def get_elem_node(self, index, **kwargs):
for i, elem in enumerate(self.elems_nodes(**kwargs)):
if i == index:
return elem
raise IndexError
def add_op(self, op, pos=None):
"""
Add an operation. Wraps it within a node, and appends it to the tree.
@return:
"""
operation_branch = self._tree.get(type="branch ops")
operation_branch.add_node(op, pos=pos)
def add_ops(self, adding_ops):
operation_branch = self._tree.get(type="branch ops")
items = []
for op in adding_ops:
operation_branch.add_node(op)
items.append(op)
return items
def add_elems(self, adding_elements, classify=False, branch_type="branch elems"):
"""
Add multiple svg elements to the tree.
@param adding_elements:
@param classify:
@param branch_type:
@return:
"""
branch = self._tree.get(type=branch_type)
items = []
for element in adding_elements:
node_type = get_type_from_element(element)
if node_type:
items.append(branch.add(element, type=node_type))
if branch_type == "branch elems":
self.signal("element_added", adding_elements)
elif branch_type == "branch reg":
self.signal("regmark_added", adding_elements)
if classify:
self.classify(adding_elements)
return items
def clear_operations(self):
operations = self._tree.get(type="branch ops")
operations.remove_all_children()
def clear_elements(self):
elements = self._tree.get(type="branch elems")
elements.remove_all_children()
def clear_regmarks(self):
elements = self._tree.get(type="branch reg")
elements.remove_all_children()
def clear_files(self):
pass
def clear_elements_and_operations(self):
self.clear_elements()
self.clear_operations()
def clear_all(self):
self.clear_elements()
self.clear_operations()
self.clear_files()
self.clear_note()
self.validate_selected_area()
def clear_note(self):
self.note = None
def remove_nodes(self, node_list):
for node in node_list:
for n in node.flat():
n._mark_delete = True
for ref in list(n._references):
ref._mark_delete = True
for n in reversed(list(self.flat())):
if not hasattr(n, "_mark_delete"):
continue
if n.type in ("root", "branch elems", "branch reg", "branch ops"):
continue
n.remove_node(children=False, references=False)
def remove_elements(self, element_node_list):
for elem in element_node_list:
elem.remove_node(references=True)
self.validate_selected_area()
def remove_operations(self, operations_list):
for op in operations_list:
for i, o in enumerate(list(self.ops())):
if o is op:
o.remove_node()
self.signal("operation_removed", op)
def remove_elements_from_operations(self, elements_list):
for node in elements_list:
for ref in list(node._references):
ref.remove_node()
def selected_area(self):
if self._emphasized_bounds_dirty:
self.validate_selected_area()
return self._emphasized_bounds
def validate_selected_area(self):
boundary_points = []
for e in self.elem_branch.flat(
types=elem_nodes,
emphasized=True,
):
if e.bounds is None:
continue
box = e.bounds
top_left = [box[0], box[1]]
top_right = [box[2], box[1]]
bottom_left = [box[0], box[3]]
bottom_right = [box[2], box[3]]
boundary_points.append(top_left)
boundary_points.append(top_right)
boundary_points.append(bottom_left)
boundary_points.append(bottom_right)
if len(boundary_points) == 0:
new_bounds = None
else:
xmin = min([e[0] for e in boundary_points])
ymin = min([e[1] for e in boundary_points])
xmax = max([e[0] for e in boundary_points])
ymax = max([e[1] for e in boundary_points])
new_bounds = [xmin, ymin, xmax, ymax]
self._emphasized_bounds_dirty = False
if self._emphasized_bounds != new_bounds:
self._emphasized_bounds = new_bounds
self.signal("selected_bounds", self._emphasized_bounds)
def highlight_children(self, node_context):
"""
Recursively highlight the children.
@param node_context:
@return:
"""
for child in node_context.children:
child.highlighted = True
self.highlight_children(child)
# def target_clones(self, node_context, node_exclude, object_search):
# """
# Recursively highlight the children.
#
# @param node_context: context node to search from
# @param node_exclude: excluded nodes
# @param object_search: Specific searched for object.
# @return:
# """
# for child in node_context.children:
# self.target_clones(child, node_exclude, object_search)
# if child is node_exclude:
# continue
# if child.object is None:
# continue
# if object_search is child.object:
# child.targeted = True
def set_selected(self, selected):
"""
Selected is the sublist of specifically selected nodes.
"""
for s in self._tree.flat():
in_list = selected is not None and s in selected
if s.selected:
if not in_list:
s.selected = False
else:
if in_list:
s.selected = True
if selected is not None:
for e in selected:
e.selected = True
def set_emphasis(self, emphasize):
"""
If any operation is selected, all sub-operations are highlighted.
If any element is emphasized, all references are highlighted.
If any element is emphasized, all operations a references to that element are targeted.
"""
for s in self._tree.flat():
if s.highlighted:
s.highlighted = False
if s.targeted:
s.targeted = False
in_list = emphasize is not None and s in emphasize
if s.emphasized:
if not in_list:
s.emphasized = False
else:
if in_list:
s.emphasized = True
if emphasize is not None:
for e in emphasize:
if e.type == "reference":
e.node.emphasized = True
e.highlighted = True
else:
e.emphasized = True
# if hasattr(e, "object"):
# self.target_clones(self._tree, e, e.object)
self.highlight_children(e)
def center(self):
bounds = self._emphasized_bounds
return (bounds[2] + bounds[0]) / 2.0, (bounds[3] + bounds[1]) / 2.0
def ensure_positive_bounds(self):
b = self._emphasized_bounds
if b is None:
return
self._emphasized_bounds = [
min(b[0], b[2]),
min(b[1], b[3]),
max(b[0], b[2]),
max(b[1], b[3]),
]
self.signal("selected_bounds", self._emphasized_bounds)
def update_bounds(self, b):
self._emphasized_bounds = [b[0], b[1], b[2], b[3]]
self.signal("selected_bounds", self._emphasized_bounds)
def move_emphasized(self, dx, dy):
for node in self.elems(emphasized=True):
node.matrix.post_translate(dx, dy)
node.modified()
def set_emphasized_by_position(self, position, keep_old_selection=False, use_smallest=False):
def contains(box, x, y=None):
if y is None:
y = x[1]
x = x[0]
return box[0] <= x <= box[2] and box[1] <= y <= box[3]
if self.has_emphasis():
if self._emphasized_bounds is not None and contains(
self._emphasized_bounds, position
) and not keep_old_selection:
return # Select by position aborted since selection position within current select bounds.
# Remember previous selection, in case we need to append...
e_list = []
f_list = [] # found elements...
if keep_old_selection:
for node in self.elems(emphasized=True):
e_list.append(node)
for node in self.elems_nodes(emphasized=False):
try:
bounds = node.bounds
except AttributeError:
continue # No bounds.
if bounds is None:
continue
if contains(bounds, position):
f_list.append(node)
if len(f_list) > 0:
# We checked that before, f_list contains only elements with valid bounds...
e = None
if use_smallest:
e_area = float("inf")
else:
e_area = -float("inf")
for node in f_list:
cc = node.bounds
f_area = (cc[2]-cc[0]) * (cc[3] - cc[1])
if use_smallest:
if f_area<e_area:
e_area = f_area
e = node
else:
if f_area>e_area:
e_area = f_area
e = node
if not e is None:
bounds = e.bounds
e_list.append(e)
if self._emphasized_bounds is not None:
cc = self._emphasized_bounds
bounds = (
min(bounds[0], cc[0]),
min(bounds[1], cc[1]),
max(bounds[2], cc[2]),
max(bounds[3], cc[3]),
)
if len(e_list)>0:
self._emphasized_bounds = bounds
self.set_emphasis(e_list)
else:
self._emphasized_bounds = None
self.set_emphasis(None)
def classify(self, elements, operations=None, add_op_function=None):
"""
Classify does the placement of elements within operations.
"Image" is the default for images.
Typically,
If element strokes are red they get classed as cut operations
If they are otherwise they get classed as engrave.
However, this differs based on the ops in question.
@param elements: list of elements to classify.
@param operations: operations list to classify into.
@param add_op_function: function to add a new operation, because of a lack of classification options.
@return:
"""
if elements is None:
return
if not len(list(self.ops())) and not self.operation_default_empty:
self.load_default()
reverse = self.classify_reverse
if reverse:
elements = reversed(elements)
if operations is None:
operations = list(self.ops())
if add_op_function is None:
add_op_function = self.add_op
for node in elements:
# Following lines added to handle 0.7 special ops added to ops list
if hasattr(node, "operation"):
add_op_function(node)
continue
was_classified = False
# image_added code removed because it could never be used
for op in operations:
if op.type == "op raster":
if (
hasattr(node, "stroke")
and node.stroke is not None
and (op.color == node.stroke or op.default)
):
op.add_reference(node)
was_classified = True
elif node.type == "elem image":
op.add_reference(node)
was_classified = True
elif node.type == "elem text":
op.add_reference(node)
was_classified = True
elif (
hasattr(node, "fill")
and node.fill is not None
and node.fill.argb is not None
):
op.add_reference(node)
was_classified = True
elif op.type in ("op engrave", "op cut", "op hatch"):
if (
hasattr(node, "stroke")
and node.stroke is not None
and op.color == node.stroke
) or op.default:
op.add_reference(node)
was_classified = True
elif op.type == "op image" and node.type == "elem image":
op.add_reference(node)
was_classified = True
break # May only classify in one image operation.
elif op.type == "op dots" and node.type == "elem point":
op.add_reference(node)
was_classified = True
break # May only classify in Dots.
if not was_classified:
op = None
if node.type == "elem image":
op = ImageOpNode(output=False)
elif node.type == "elem point":
op = DotsOpNode(output=False)
elif (
hasattr(node, "stroke")
and node.stroke is not None
and node.stroke.value is not None
):
op = EngraveOpNode(color=node.stroke, speed=35.0)
if op is not None:
add_op_function(op)
op.add_reference(node)
operations.append(op)
if (
hasattr(node, "fill")
and node.fill is not None
and node.fill.argb is not None
):
op = RasterOpNode(color=0, output=False)
add_op_function(op)
op.add_reference(node)
operations.append(op)
def add_classify_op(self, op):
"""
Ops are added as part of classify as elements are iterated that need a new op.
Rather than add them at the end, creating a random sequence of Engrave and Cut operations
perhaps with an Image or Raster or Dots operation in there as well, instead we need to try
to group operations together, adding the new operation:
1. After the last operation of the same type if one exists; or if not
2. After the last operation of the highest priority existing operation, where `Dots` is the lowest priority and
Cut is the highest.
"""
operations = self._tree.get(type="branch ops").children
for pos, old_op in reversed_enumerate(operations):
if op.type == old_op.type:
return self.add_op(op, pos=pos + 1)
# No operation of same type found. So we will look for last operation of a lower priority and add after it.
try:
priority = OP_PRIORITIES.index(op.type)
except ValueError:
return self.add_op(op)
for pos, old_op in reversed_enumerate(operations):
try:
if OP_PRIORITIES.index(old_op.type) < priority:
return self.add_op(op, pos=pos + 1)
except ValueError:
pass
return self.add_op(op, pos=0)
# def classify_advanced(self, elements, operations=None, add_op_function=None):
# """
# Classify does the placement of elements within operations.
# In the future, we expect to be able to save and reload the mapping of
# elements to operations, but at present classification is the only means
# of assigning elements to operations.
#
# This classification routine ensures that every element is assigned
# to at least one operation - the user does NOT have to check whether
# some elements have not been assigned (which was an issue with 0.6.x).
#
# Because of how overlaying raster elements can have white areas masking
# underlying non-white areas, the classification of raster elements is complex,
# and indeed deciding whether elements should be classified as vector or raster
# has edge case complexities.
#
# SVGImage is classified as Image.
# Dots are a special type of Path
# All other SVGElement types are Shapes / Text
#
# Paths consisting of a move followed by a single stright line segment
# are never Raster (since no width) - testing for more complex stright line
# path-segments and that multiple-such-segments are also straight-line is complex,
#
# Shapes/Text with grey (R=G=B) strokes are raster by default regardless of fill
#
# Shapes/Text with non-transparent Fill are raster by default - except for one
# edge case: Elements with white fill, non-grey stroke and no raster elements behind
# them are considered vector elements.
#
# Shapes/Text with no fill and non-grey strokes are vector by default - except
# for one edge case: Elements with strokes that have other raster elements
# overlaying the stroke should in some cases be considered raster elements,
# but there are serveral use cases and counter examples are likely easy to create.
# The algorithm below tries to be conservative in deciding whether to switch a default
# vector to a raster due to believing it is part of raster combined with elements on top.
# In essence, if there are raster elements on top (later in the list of elements) that
# have the given vector element's stroke colour as either a stroke or fill colour, then the
# probability is that this vector element should be considered a raster instead.
#
# RASTER ELEMENTS
# Because rastering of overlapping elements depends on the sequence of the elements
# (think of the difference between a white fill above or below a black fill)
# it is essential that raster elements are added to operations in the same order
# that they exist in the file/elements branch.
#
# Raster elements are handled differently depending on whether existing
# Raster operations are simple or complex:
# 1. Simple - all existing raster ops have the same color
# (default being a different colour to any other); or
# 2. Complex - there are existing raster ops of two different colors
# (default being a different colour to any other)
#
# Simple - Raster elements are matched immediately to all Raster operations.
# Complex - Raster elements are processed in a more complex second pass (see below)
#
# VECTOR ELEMENTS
# Vector Shapes/Text are attempted to match to Cut/Engrave/Raster operations of
# exact same color (regardless of default raster or vector)
#
# If not matched to exact colour, vector elements are classified based on colour:
# 1. Redish strokes are considered cuts
# 2. Other colours are considered engraves
# If a default Cut/Engrave operation exists then the element is classified to it.
# Otherwise, a new operation of matching color and type is created.
# New White Engrave operations are created disabled by default.
#
# SIMPLE RASTER CLASSIFICATION
# All existing raster ops are of the same color (or there are no existing raster ops)
#
# In this case all raster operations will be assigned either to:
# A. all existing raster ops (if there are any); or
# B. to a new Default Raster operation we create in a similar way as vector elements
#
# Because raster elements are all added to the same operations in pass 1 and without being
# grouped, the sequence of elements is retained by default, and no special handling is needed.
#
# COMPLEX RASTER CLASSIFICATION
# There are existing raster ops of at least 2 different colours.
#
# In this case we are going to try to match raster elements to raster operations by colour.
# But this is complicated as we need to keep overlapping raster elements together in the
# sae operations because raster images are generated within each operation.
#
# So in this case we classify vector and special elements in a first pass,
# and then analyse and classify raster operations in a special second pass.
#
# Because we have to analyse all raster elements together, when you load a new file
# Classify has to be called once with all elements in the file
# rather than on an element-by-element basis.
#
# In the second pass, we do the following:
#
# 1. Group rasters by whether they have overlapping bounding boxes.
# After this, if rasters are in separate groups then they are in entirely separate
# areas of the burn which do not overlap. Consequently, they can be allocated
# to different operations without causing incorrect results.
#
# Note 1: It is difficult to ensure that elements are retained in sequence when doing
# grouping. Before adding to the raster operations, we sort back into the
# original element sequence.
#
# Note 2: The current algorithm uses bounding-boxes. One edge case is to have two
# separate raster patterns of different colours that do NOT overlap but whose
# bounding-boxes DO overlap. In these cases they will both be allocated to the same
# raster Operations whereas they potentially could be allocated to different Operations.
#
# 2. For each group of raster objects, determine whether there are existing Raster operations
# of the same colour as at least one element in the group.
# If any element in a group matches the color of an operation, then
# all the raster elements of the group will be added to that operation.
#
# 3. If there are any raster elements that are not classified in this way, then:
# A) If there are Default Raster Operation(s), then the remaining raster elements are
# allocated to those.
# B) Otherwise, if there are any non-default raster operations that are empty and those
# raster operations are all the same colour, then the remaining raster operations
# will be allocated to those Raster operations.
# C) Otherwise, a new Default Raster operation will be created and remaining
# Raster elements will be added to that.
#
# LIMITATIONS: The current code does NOT do the following:
#
# a. Handle rasters in second or later files which overlap elements from earlier files which
# have already been classified into operations. It is assumed that if they happen to
# overlap that is coincidence. After all the files could have been added in a different
# order and then would have a different result.
# b. Handle the reclassifications of single elements which have e.g. had their colour
# changed. (The multitude of potential use cases are many and varied, and difficult or
# impossible comprehensively to predict.)
#
# It may be that we will need to:
#
# 1. Use the total list of Shape / Text elements loaded in the `Elements Branch` sequence
# to keep elements in the correct sequence in an operation.
# 2. Handle cases where the user resequences elements by ensuring that a drag and drop
# of elements in the Elements branch of the tree is reflected in the sequence in Operations
# and vice versa. This could, however, get messy.
#
#
# @param elements: list of elements to classify.
# @param operations: operations list to classify into.
# @param add_op_function: function to add a new operation, because of a lack of classification options.
# @return:
# """
# debug = self.kernel.channel("classify", timestamp=True)
#
# if self.legacy_classification:
# debug("classify: legacy")
# self.classify_legacy(elements, operations, add_op_function)
# return
#
# if elements is None:
# return
#
# if operations is None:
# operations = list(self.ops())
# if add_op_function is None:
# add_op_function = self.add_classify_op
#
# reverse = self.classify_reverse
# # If reverse then we insert all elements into operations at the beginning rather than appending at the end
# # EXCEPT for Rasters which have to be in the correct sequence.
# element_pos = 0 if reverse else None
#
# vector_ops = []
# raster_ops = []
# special_ops = []
# new_ops = []
# default_cut_ops = []
# default_engrave_ops = []
# default_raster_ops = []
# rasters_one_pass = None
#
# for op in operations:
# if not op.type.startswith("op"):
# continue
# if op.type == "op console":
# continue
# if op.default:
# if op.type == "op cut":
# default_cut_ops.append(op)
# if op.type == "op engrave":
# default_engrave_ops.append(op)
# if op.type == "op raster":
# default_raster_ops.append(op)
# if op.type in ("op cut", "op engrave"):
# vector_ops.append(op)
# elif op.type == "op raster":
# raster_ops.append(op)
# op_color = op.color.rgb if not op.default else "default"
# if rasters_one_pass is not False:
# if rasters_one_pass is not None:
# if str(rasters_one_pass) != str(op_color):
# rasters_one_pass = False
# else:
# rasters_one_pass = op_color
# else:
# special_ops.append(op)
# if rasters_one_pass is not False:
# rasters_one_pass = True
# if debug:
# debug(
# "classify: ops: {passes}, {v} vectors, {r} rasters, {s} specials".format(
# passes="one pass" if rasters_one_pass else "two passes",
# v=len(vector_ops),
# r=len(raster_ops),
# s=len(special_ops),
# )
# )
#
# elements_to_classify = []
# for element in elements:
# if element is None:
# debug("classify: not classifying - element is None")
# continue
# if hasattr(element, "operation"):
# add_op_function(element)
# if debug:
# debug(
# "classify: added element as op: {op}".format(
# op=str(op),
# )
# )
# continue
#
# dot = is_dot(element)
# straight_line = is_straight_line(element)
# # print(element.stroke, element.fill, element.fill.alpha, is_straight_line, is_dot)
#
# # Check for default vector operations
# element_vector = False
# if isinstance(element, (Shape, SVGText)) and not dot:
# # Vector if not filled
# if (
# element.fill is None
# or element.fill.rgb is None
# or (element.fill.alpha is not None and element.fill.alpha == 0)
# or straight_line
# ):
# element_vector = True
#
# # Not vector if grey stroke
# if (
# element_vector
# and element.stroke is not None
# and element.stroke.rgb is not None
# and element.stroke.red == element.stroke.green
# and element.stroke.red == element.stroke.blue
# ):
# element_vector = False
#
# elements_to_classify.append(
# (
# element,
# element_vector,
# dot,
# straight_line,
# )
# )
# if debug:
# debug(
# "classify: elements: {e} elements to classify".format(
# e=len(elements_to_classify),
# )
# )
#
# # Handle edge cases
# # Convert raster elements with white fill and no raster elements behind to vector
# # Because the white fill is not hiding anything.
# for i, (
# element,
# element_vector,
# dot,
# straight_line,
# ) in enumerate(elements_to_classify):
# if (
# # Raster?
# not element_vector
# and isinstance(element, (Shape, SVGText))
# and not dot
# # White non-transparent fill?
# and element.fill is not None
# and element.fill.rgb is not None
# and element.fill.rgb == 0xFFFFFF
# and element.fill.alpha is not None
# and element.fill.alpha != 0
# # But not grey stroke?
# and (
# element.stroke is None
# or element.stroke.rgb is None
# or element.stroke.red != element.stroke.green
# or element.stroke.red != element.stroke.blue
# )
# ):
# bbox = element.bbox()
# # Now check for raster elements behind
# for e2 in elements_to_classify[:i]:
# # Ignore vectors
# if e2[1]:
# continue
# # If underneath then stick with raster?
# if self.bbox_overlap(bbox, e2[0].bbox()):
# break
# else:
# # No rasters underneath - convert to vector
# if debug:
# debug(
# "classify: edge-case: treating raster as vector: {label}".format(
# label=self.element_label_id(element),
# )
# )
#
# element_vector = True
# elements_to_classify[i] = (
# element,
# element_vector,
# dot,
# straight_line,
# )
#
# # Convert vector elements with element in front crossing the stroke to raster
# for i, (
# element,
# element_vector,
# dot,
# straight_line,
# ) in reversed_enumerate(elements_to_classify):
# if (
# element_vector
# and element.stroke is not None
# and element.stroke.rgb is not None
# and element.stroke.rgb != 0xFFFFFF
# ):
# bbox = element.bbox()
# color = element.stroke.rgb
# # Now check for raster elements in front whose path crosses over this path
# for e in elements_to_classify[i + 1 :]:
# # Raster?
# if e[1]:
# continue
# # Stroke or fill same colour?
# if (
# e[0].stroke is None
# or e[0].stroke.rgb is None
# or e[0].stroke.rgb != color
# ) and (
# e[0].fill is None
# or e[0].fill.alpha is None
# or e[0].fill.alpha == 0
# or e[0].fill.rgb is None
# or e[0].fill.rgb != color
# ):
# continue
# # We have an element with a matching color
# if self.bbox_overlap(bbox, e[0].bbox()):
# # Rasters on top - convert to raster
# if debug:
# debug(
# "classify: edge-case: treating vector as raster: {label}".format(
# label=self.element_label_id(element),
# )
# )
#
# element_vector = False
# elements_to_classify[i] = (
# element,
# element_vector,
# dot,
# straight_line,
# )
# break
#
# raster_elements = []
# for (
# element,
# element_vector,
# dot,
# straight_line,
# ) in elements_to_classify:
#
# element_color = self.element_classify_color(element)
# if isinstance(element, (Shape, SVGText)) and (
# element_color is None or element_color.rgb is None
# ):
# if debug:
# debug(
# "classify: not classifying - no stroke or fill color: {e}".format(
# e=self.element_label_id(element, short=False),
# )
# )
# continue
#
# element_added = False
# if dot or isinstance(element, SVGImage):
# for op in special_ops:
# if (dot and op.type == "op dots") or (
# isinstance(element, SVGImage) and op.type == "op image"
# ):
# op.add_reference(element.node, pos=element_pos)
# element_added = True
# break # May only classify in one Dots or Image operation and indeed in one operation
# elif element_vector:
# # Vector op (i.e. no fill) with exact colour match to Raster Op will be rastered
# for op in raster_ops:
# if (
# op.color is not None
# and op.color.rgb == element_color.rgb
# and op not in default_raster_ops
# ):
# if not rasters_one_pass:
# op.add_reference(element.node, pos=element_pos)
# elif not element_added:
# raster_elements.append((element, element.bbox()))
# element_added = True
#
# for op in vector_ops:
# if (
# op.color is not None
# and op.color.rgb == element_color.rgb
# and op not in default_cut_ops
# and op not in default_engrave_ops
# ):
# op.add_reference(element.node, pos=element_pos)
# element_added = True
# if (
# element.stroke is None
# or element.stroke.rgb is None
# or element.stroke.rgb == 0xFFFFFF
# ):
# if debug:
# debug(
# "classify: not classifying - white element at back: {e}".format(
# e=self.element_label_id(element, short=False),
# )
# )
# continue
#
# elif rasters_one_pass:
# for op in raster_ops:
# if op.color is not None and op.color.rgb == element_color.rgb:
# op.add_reference(element.node, pos=element_pos)
# element_added = True
# else:
# raster_elements.append((element, element.bbox()))
# continue
#
# if element_added:
# continue
#
# if element_vector:
# is_cut = Color.distance_sq("red", element_color) <= 18825
# if is_cut:
# for op in default_cut_ops:
# op.add_reference(element.node, pos=element_pos)
# element_added = True
# else:
# for op in default_engrave_ops:
# op.add_reference(element.node, pos=element_pos)
# element_added = True
# elif (
# rasters_one_pass
# and isinstance(element, (Shape, SVGText))
# and not dot
# and raster_ops
# ):
# for op in raster_ops:
# op.add_reference(element.node, pos=element_pos)
# element_added = True
#
# if element_added:
# continue
#
# # Need to add a new operation to classify into
# op = None
# if dot:
# op = DotsOpNode(default=True)
# special_ops.append(op)
# elif isinstance(element, SVGImage):
# op = ImageOpNode(default=True)
# special_ops.append(op)
# elif isinstance(element, (Shape, SVGText)):
# if element_vector:
# if (
# is_cut
# ): # This will be initialised because criteria are same as above
# op = CutOpNode(color=abs(element_color))
# else:
# op = EngraveOpNode(
# operation="Engrave", color=abs(element_color)
# )
# if element_color == Color("white"):
# op.output = False
# vector_ops.append(op)
# elif rasters_one_pass:
# op = RasterOpNode(color="Transparent", default=True)
# default_raster_ops.append(op)
# raster_ops.append(op)
# if op is not None:
# new_ops.append(op)
# add_op_function(op)
# # element cannot be added to op before op is added to operations - otherwise refelem is not created.
# op.add_reference(element.node, pos=element_pos)
# if debug:
# debug(
# "classify: added op: {op}".format(
# op=str(op),
# )
# )
#
# # End loop "for element in elements"
#
# if rasters_one_pass:
# return
#
# # Now deal with two-pass raster elements
# # It is ESSENTIAL that elements are added to operations in the same order as original.
# # The easiest way to ensure this is to create groups using a copy of raster_elements and
# # then ensure that groups have elements in the same order as in raster_elements.
# if debug:
# debug(
# "classify: raster pass two: {n} elements".format(
# n=len(raster_elements),
# )
# )
#
# # Debugging print statements have been left in as comments as this code can
# # be complex to debug and even print statements can be difficult to craft
#
# # This is a list of groups, where each group is a list of tuples, each an element and its bbox.
# # Initial list has a separate group for each element.
# raster_groups = [[e] for e in raster_elements]
# raster_elements = [e[0] for e in raster_elements]
# # print("initial", list(map(lambda g: list(map(lambda e: e[0].id,g)), raster_groups)))
#
# # We are using old-fashioned iterators because Python cannot cope with consolidating a list whilst iterating over it.
# for i in range(len(raster_groups) - 2, -1, -1):
# g1 = raster_groups[i]
# for j in range(len(raster_groups) - 1, i, -1):
# g2 = raster_groups[j]
# if self.group_elements_overlap(g1, g2):
# # print("g1", list(map(lambda e: e[0].id,g1)))
# # print("g2", list(map(lambda e: e[0].id,g2)))
#
# # if elements in the group overlap
# # add the element tuples from group 2 to group 1
# g1.extend(g2)
# # and remove group 2
# del raster_groups[j]
#
# # print("g1+g2", list(map(lambda e: e[0].id,g1)))
# # print("reduced", list(map(lambda g: list(map(lambda e: e[0].id,g)), raster_groups)))
# if debug:
# debug(
# "classify: condensed to {n} raster groups".format(
# n=len(raster_groups),
# )
# )
#
# # Remove bbox and add element colour from groups
# # Change `list` to `groups` which are a list of tuples, each tuple being element and its classification color
# raster_groups = list(
# map(
# lambda g: tuple(((e[0], self.element_classify_color(e[0])) for e in g)),
# raster_groups,
# )
# )
#
# # print("grouped", list(map(lambda g: list(map(lambda e: e[0].id,g)), raster_groups)))
#
# # Add groups to operations of matching colour (and remove from list)
# # groups added to at least one existing raster op will not be added to default raster ops.
# groups_added = []
# for op in raster_ops:
# if (
# op not in default_raster_ops
# and op.color is not None
# and op.color.rgb is not None
# ):
# # Make a list of elements to add (same tupes)
# elements_to_add = []
# groups_count = 0
# for group in raster_groups:
# for e in group:
# if e[1].rgb == op.color.rgb:
# # An element in this group matches op color
# # So add elements to list
# elements_to_add.extend(group)
# if group not in groups_added:
# groups_added.append(group)
# groups_count += 1
# break # to next group
# if elements_to_add:
# if debug:
# debug(
# "classify: adding {e} elements in {g} groups to {label}".format(
# e=len(elements_to_add),
# g=groups_count,
# label=str(op),
# )
# )
# # Create simple list of elements sorted by original element order
# elements_to_add = sorted(
# [e[0] for e in elements_to_add], key=raster_elements.index
# )
# for element in elements_to_add:
# op.add_reference(element.node, pos=element_pos)
#
# # Now remove groups added to at least one op
# for group in groups_added:
# raster_groups.remove(group)
#
# if not raster_groups: # added all groups
# return
#
# # Because groups don't matter further simplify back to a simple element_list
# elements_to_add = []
# for g in raster_groups:
# elements_to_add.extend(g)
# elements_to_add = sorted(
# [e[0] for e in elements_to_add], key=raster_elements.index
# )
# if debug:
# debug(
# "classify: {e} elements in {g} raster groups to add to default raster op(s)".format(
# e=len(elements_to_add),
# g=len(raster_groups),
# )
# )
#
# # Remaining elements are added to one of the following groups of operations:
# # 1. to default raster ops if they exist; otherwise
# # 2. to empty raster ops if they exist and are all the same color; otherwise to
# # 3. a new default Raster operation.
# if not default_raster_ops:
# # Because this is a check for an empty operation, this functionality relies on all elements being classified at the same time.
# # If you add elements individually, after the first raster operation the empty ops will no longer be empty and a default Raster op will be created instead.
# default_raster_ops = [op for op in raster_ops if len(op.children) == 0]
# color = False
# for op in default_raster_ops:
# if op.color is None or op.color.rgb is None:
# op_color = "None"
# else:
# op_color = op.color.rgb
# if color is False:
# color = op_color
# elif color != op_color:
# default_raster_ops = []
# break
# if not default_raster_ops:
# op = RasterOpNode(color="Transparent", default=True)
# default_raster_ops.append(op)
# add_op_function(op)
# if debug:
# debug(
# "classify: default raster op added: {op}".format(
# op=str(op),
# )
# )
# else:
# if debug:
# for op in default_raster_ops:
# debug(
# "classify: default raster op selected: {op}".format(op=str(op))
# )
#
# for element in elements_to_add:
# for op in default_raster_ops:
# op.add_reference(element.node, pos=element_pos)
@staticmethod
def element_label_id(element, short=True):
if element.node is None:
if short:
return element.id
return "{id}: {path}".format(id=element.id, path=str(element))
elif ":" in element.node.label and short:
return element.node.label.split(":", 1)[0]
else:
return element.node.label
@staticmethod
def bbox_overlap(b1, b2):
if b1[0] <= b2[2] and b1[2] >= b2[0] and b1[1] <= b2[3] and b1[3] >= b2[1]:
return True
return False
def group_elements_overlap(self, g1, g2):
for e1 in g1:
for e2 in g2:
if self.bbox_overlap(e1[1], e2[1]):
return True
return False
@staticmethod
def element_classify_color(element: SVGElement):
element_color = element.stroke
if element_color is None or element_color.rgb is None:
element_color = element.fill
return element_color
def load(self, pathname, **kwargs):
kernel = self.kernel
_ = kernel.translation
for loader, loader_name, sname in kernel.find("load"):
for description, extensions, mimetype in loader.load_types():
if str(pathname).lower().endswith(extensions):
try:
results = loader.load(self, self, pathname, **kwargs)
except FileNotFoundError:
return False
except BadFileError as e:
kernel._console_channel(_("File is Malformed") + ": " + str(e))
except OSError:
return False
else:
if results:
self.signal("tree_changed")
return True
return False
def load_types(self, all=True):
kernel = self.kernel
_ = kernel.translation
filetypes = []
if all:
filetypes.append(_("All valid types"))
exts = []
for loader, loader_name, sname in kernel.find("load"):
for description, extensions, mimetype in loader.load_types():
for ext in extensions:
exts.append("*.%s" % ext)
filetypes.append(";".join(exts))
for loader, loader_name, sname in kernel.find("load"):
for description, extensions, mimetype in loader.load_types():
exts = []
for ext in extensions:
exts.append("*.%s" % ext)
filetypes.append("%s (%s)" % (description, extensions[0]))
filetypes.append(";".join(exts))
return "|".join(filetypes)
def save(self, pathname):
kernel = self.kernel
for saver, save_name, sname in kernel.find("save"):
for description, extension, mimetype in saver.save_types():
if pathname.lower().endswith(extension):
saver.save(self, pathname, "default")
return True
return False
def save_types(self):
kernel = self.kernel
filetypes = []
for saver, save_name, sname in kernel.find("save"):
for description, extension, mimetype in saver.save_types():
filetypes.append("%s (%s)" % (description, extension))
filetypes.append("*.%s" % extension)
return "|".join(filetypes)
```
#### File: core/node/cutnode.py
```python
from meerk40t.core.node.node import Node
class CutNode(Node):
"""
Node type "cutcode"
Cutcode nodes store cutcode within the tree. When processing in a plan this should be converted to a normal cutcode
object.
"""
def __init__(self, cutcode=None, **kwargs):
super().__init__(type="cutcode", **kwargs)
self.output = True
self.cutcode = cutcode
def __repr__(self):
return "CutNode('%s', %s, %s)" % (
self.type,
str(self.cutcode),
str(self._parent),
)
def __copy__(self):
return CutNode(self.cutcode)
def __len__(self):
return 1
def default_map(self, default_map=None):
default_map = super(CutNode, self).default_map(default_map=default_map)
default_map["element_type"] = "Cutcode"
return default_map
def drop(self, drag_node):
return False
def as_cutobjects(self, closed_distance=15):
yield from self.cutcode
```
#### File: meerk40t/dxf/plugin.py
```python
def plugin(kernel, lifecycle=None):
if lifecycle == "invalidate":
try:
import ezdxf
except ImportError:
print("DXF plugin could not load because ezdxf is not installed.")
return True
elif lifecycle == "register":
from meerk40t.dxf.dxf_io import DxfLoader
kernel.register("load/DxfLoader", DxfLoader)
_ = kernel.translation
choices = [
{
"attr": "dxf_center",
"object": kernel.elements,
"default": True,
"type": bool,
"label": _("DXF Centering"),
"tip": _(
"Fit (scale down if necessary) and center a DXF file within the bed"
),
},
]
kernel.register_choices("preferences", choices)
```
#### File: meerk40t/extra/embroider.py
```python
from meerk40t.core.units import Length
from meerk40t.svgelements import Angle, Matrix, Path, Polyline
from meerk40t.tools.pathtools import EulerianFill
def plugin(kernel, lifecycle):
if lifecycle == "register":
_ = kernel.translation
context = kernel.root
@context.console_option(
"angle", "a", type=Angle.parse, default="0deg", help=_("Angle of the fill")
)
@context.console_option(
"distance",
"d",
type=Length,
default="0.5mm",
help=_("Length between rungs"),
)
@context.console_command("embroider", help=_("embroider <angle> <distance>"))
def embroider(command, channel, _, angle=None, distance=None, **kwargs):
elements = context.elements
channel(_("Embroidery Filling"))
efill = EulerianFill(float(distance))
for node in elements.elems(emphasized=True):
try:
path = Path(node.shape)
except AttributeError:
try:
path = Path(node.path)
except AttributeError:
continue
if angle is not None:
path *= Matrix.rotate(angle)
pts = [abs(path).point(i / 100.0, error=1e-4) for i in range(101)]
efill += pts
points = efill.get_fill()
for s in split(points):
result = Path(Polyline(s, stroke="black"))
if angle is not None:
result *= Matrix.rotate(-angle)
node = elements.elem_branch.add(path=result, type="elem path")
elements.classify([node])
def split(points):
pos = 0
for i, pts in enumerate(points):
if pts is None:
yield points[pos : i - 1]
pos = i + 1
if pos != len(points):
yield points[pos : len(points)]
```
#### File: grbl/gui/gui.py
```python
def plugin(service, lifecycle):
if lifecycle == "invalidate":
try:
import serial
except ImportError:
return True
return not service.has_feature("wx")
if lifecycle == "service":
return "provider/device/grbl"
if lifecycle == "added":
from meerk40t.grbl.gui.grblconfiguration import GRBLConfiguration
from meerk40t.grbl.gui.grblserialcontroller import SerialController
from meerk40t.gui.icons import (
icons8_computer_support_50,
icons8_connected_50,
icons8_emergency_stop_button_50,
icons8_pause_50,
)
service.register("window/Serial-Controller", SerialController)
service.register("window/Configuration", GRBLConfiguration)
_ = service._
service.register(
"button/control/Controller",
{
"label": _("Serial Controller"),
"icon": icons8_connected_50,
"tip": _("Opens GRBL Serial Sender"),
"action": lambda e: service("window toggle Serial-Controller\n"),
},
)
service.register(
"button/config/Configuration",
{
"label": _("Config"),
"icon": icons8_computer_support_50,
"tip": _("Opens device-specfic configuration window"),
"action": lambda v: service("window toggle Configuration\n"),
},
)
service.register(
"button/control/Pause",
{
"label": _("Pause"),
"icon": icons8_pause_50,
"tip": _("Pause the laser"),
"action": lambda v: service("pause\n"),
},
)
service.register(
"button/control/Stop",
{
"label": _("Stop"),
"icon": icons8_emergency_stop_button_50,
"tip": _("Emergency stop the laser"),
"action": lambda v: service("estop\n"),
},
)
service.add_service_delegate(GRBLGui(service))
class GRBLGui:
def __init__(self, context):
self.context = context
# This is a stub.
```
#### File: meerk40t/gui/notes.py
```python
import wx
from wx import aui
from .icons import icons8_comments_50
from .mwindow import MWindow
_ = wx.GetTranslation
def register_panel(window, context):
panel = NotePanel(window, wx.ID_ANY, context=context)
pane = (
aui.AuiPaneInfo()
.Float()
.MinSize(100, 100)
.FloatingSize(170, 230)
.MaxSize(500, 500)
.Caption(_("Notes"))
.CaptionVisible(not context.pane_lock)
.Name("notes")
.Hide()
)
pane.dock_proportion = 100
pane.control = panel
pane.submenu = _("Tools")
window.on_pane_add(pane)
context.register("pane/notes", pane)
class NotePanel(wx.Panel):
def __init__(self, *args, context=None, pane=False, **kwds):
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.context = context
self.check_auto_open_notes = wx.CheckBox(
self, wx.ID_ANY, _("Automatically Open Notes")
)
self.text_notes = wx.TextCtrl(
self,
wx.ID_ANY,
"",
style=wx.TE_BESTWRAP | wx.TE_MULTILINE | wx.TE_WORDWRAP | wx.TE_RICH,
)
self.__set_properties()
self.__do_layout()
self.Bind(
wx.EVT_CHECKBOX, self.on_check_auto_note_open, self.check_auto_open_notes
)
self.Bind(wx.EVT_TEXT, self.on_text_notes, self.text_notes)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_notes, self.text_notes)
# end wxGlade
def __set_properties(self):
# begin wxGlade: NotePanel.__set_properties
self.check_auto_open_notes.SetToolTip(
_("Automatically open notes if they exist when file is opened.")
)
# end wxGlade
def __do_layout(self):
# begin wxGlade: NotePanel.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add(self.check_auto_open_notes, 0, 0, 0)
sizer_1.Add(self.text_notes, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# end wxGlade
def pane_show(self, *args):
self.context.setting(bool, "auto_note", True)
self.check_auto_open_notes.SetValue(self.context.elements.auto_note)
if self.context.elements.note is not None:
self.text_notes.SetValue(self.context.elements.note)
self.context.listen("note", self.on_note_listen)
def pane_hide(self):
self.context.unlisten("note", self.on_note_listen)
def on_check_auto_note_open(self, event=None): # wxGlade: Notes.<event_handler>
self.context.elements.auto_note = self.check_auto_open_notes.GetValue()
def on_text_notes(self, event=None): # wxGlade: Notes.<event_handler>
if len(self.text_notes.GetValue()) == 0:
self.context.elements.note = None
else:
self.context.elements.note = self.text_notes.GetValue()
self.context.elements.signal("note", self)
def on_note_listen(self, origin, source):
if source is self:
return
note = self.context.elements.note
if self.context.elements.note is None:
note = ""
if self.text_notes.GetValue() != note:
self.text_notes.SetValue(note)
class Notes(MWindow):
def __init__(self, *args, **kwds):
super().__init__(730, 621, *args, **kwds)
self.panel = NotePanel(self, wx.ID_ANY, context=self.context)
self.add_module_delegate(self.panel)
_icon = wx.NullIcon
_icon.CopyFromBitmap(icons8_comments_50.GetBitmap())
self.SetIcon(_icon)
self.SetTitle(_("Notes"))
self.Children[0].SetFocus()
@staticmethod
def sub_register(kernel):
kernel.register("wxpane/Notes", register_panel)
kernel.register(
"button/project/Notes",
{
"label": _("Notes"),
"icon": icons8_comments_50,
"tip": _("Open Notes Window"),
"action": lambda v: kernel.console("window toggle Notes\n"),
},
)
def window_open(self):
self.context.close(self.name)
self.panel.pane_show()
def window_close(self):
self.panel.pane_hide()
```
#### File: gui/propertypanels/groupproperties.py
```python
import wx
from ...svgelements import SVG_ATTR_ID
from ..icons import icons8_group_objects_50
from ..mwindow import MWindow
_ = wx.GetTranslation
class GroupPropertiesPanel(wx.Panel):
def __init__(self, *args, context=None, node=None, **kwds):
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.context = context
self.node = node
self.text_id = wx.TextCtrl(self, wx.ID_ANY, "")
self.text_label = wx.TextCtrl(self, wx.ID_ANY, "")
self.__set_properties()
self.__do_layout()
try:
if node.id is not None:
self.text_id.SetValue(str(node.id))
except AttributeError:
pass
try:
if node.label is not None:
self.text_label.SetValue(str(node.label))
except AttributeError:
pass
self.Bind(wx.EVT_TEXT, self.on_text_id_change, self.text_id)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_id_change, self.text_id)
self.Bind(wx.EVT_TEXT, self.on_text_label_change, self.text_label)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_label_change, self.text_label)
# end wxGlade
def __set_properties(self):
pass
def __do_layout(self):
# begin wxGlade: GroupProperty.__do_layout
sizer_8 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Label")), wx.VERTICAL
)
sizer_1 = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, _("Id")), wx.VERTICAL)
sizer_1.Add(self.text_id, 0, wx.EXPAND, 0)
sizer_8.Add(sizer_1, 1, wx.EXPAND, 0)
sizer_2.Add(self.text_label, 0, wx.EXPAND, 0)
sizer_8.Add(sizer_2, 1, wx.EXPAND, 0)
sizer_8.Add((0, 0), 0, 0, 0)
self.SetSizer(sizer_8)
self.Layout()
self.Centre()
# end wxGlade
def on_text_id_change(self, event=None): # wxGlade: ElementProperty.<event_handler>
try:
self.node.id = self.text_id.GetValue()
self.node.values[SVG_ATTR_ID] = self.node.id
# self.context.signal("element_property_update", self.element)
except AttributeError:
pass
def on_text_label_change(
self, event=None
): # wxGlade: ElementProperty.<event_handler>
if len(self.text_label.GetValue()):
self.node.label = self.text_label.GetValue()
else:
self.node.label = None
self.context.elements.signal("element_property_update", self.node)
class GroupProperty(MWindow):
def __init__(self, *args, node=None, **kwds):
super().__init__(372, 141, *args, **kwds)
self.panel = GroupPropertiesPanel(
self, wx.ID_ANY, context=self.context, node=node
)
self.add_module_delegate(self.panel)
_icon = wx.NullIcon
_icon.CopyFromBitmap(icons8_group_objects_50.GetBitmap())
self.SetIcon(_icon)
self.SetTitle(_("Group Properties"))
def window_preserve(self):
return False
def window_menu(self):
return False
```
#### File: gui/scene/scenespacewidget.py
```python
from meerk40t.gui.scene.sceneconst import (
HITCHAIN_DELEGATE_AND_HIT,
RESPONSE_CHAIN,
RESPONSE_CONSUME,
)
from meerk40t.gui.scene.widget import Widget
from meerk40t.svgelements import Matrix, Viewbox
class SceneSpaceWidget(Widget):
"""
SceneSpaceWidget contains two sections:
Interface: Drawn on top, uses no matrix.
Scene: Drawn at a particular scale relative to the zoom-pan scene.
"""
def __init__(self, scene):
Widget.__init__(self, scene, all=True)
self._view = None
self._frame = None
self.aspect = False
self.interface_widget = Widget(scene)
self.scene_widget = Widget(scene)
self.add_widget(-1, self.interface_widget)
self.add_widget(-1, self.scene_widget)
self.last_position = None
self._previous_zoom = None
self._placement_event = None
self._placement_event_type = None
def hit(self):
"""
If any event captures the events they take priority. But, if nothing is hit, then the events
should be dealt with here. These are mostly zoom and pan events.
"""
return HITCHAIN_DELEGATE_AND_HIT
@property
def pan_factor(self):
pf = self.scene.context.pan_factor
if self.scene.context.mouse_pan_invert:
pf = -pf
return pf
@property
def zoom_factor(self):
zf = self.scene.context.zoom_factor
if self.scene.context.mouse_zoom_invert:
zf = -zf
zf += 1.0
return zf
@property
def zoom_forward(self):
return self.zoom_factor
@property
def zoom_backwards(self):
zf = self.zoom_factor
if zf == 0:
return 1.0
return 1.0 / zf
def event(self, window_pos=None, space_pos=None, event_type=None):
"""
Process the zooming and panning of otherwise unhit-widget events.
If nothing was otherwise hit by the event, we process the scene manipulation events
"""
if event_type == "hover":
return RESPONSE_CHAIN
if self.aspect:
return RESPONSE_CONSUME
if event_type == "wheelup" and self.scene.context.mouse_wheel_pan:
self.scene_widget.matrix.post_translate(0, -self.pan_factor)
elif event_type == "wheeldown" and self.scene.context.mouse_wheel_pan:
self.scene_widget.matrix.post_translate(0, self.pan_factor)
elif event_type == "wheelup" or event_type == "wheelup_ctrl":
self.scene_widget.matrix.post_scale(
self.zoom_forward, self.zoom_forward, space_pos[0], space_pos[1]
)
self.scene.request_refresh()
return RESPONSE_CONSUME
# elif event_type == "zoom-in":
# self.scene_widget.matrix.post_scale(self.zoom_forward, self.zoom_forward, space_pos[0], space_pos[1])
# self.scene.request_refresh()
# return RESPONSE_CONSUME
elif event_type == "rightdown+alt":
self._previous_zoom = 1.0
self._placement_event = space_pos
self._placement_event_type = "zoom"
return RESPONSE_CONSUME
elif event_type == "rightdown+control":
self._previous_zoom = 1.0
self._placement_event = space_pos
self._placement_event_type = "pan"
return RESPONSE_CONSUME
elif event_type == "rightup":
self._previous_zoom = None
self._placement_event = None
self._placement_event_type = None
elif event_type == "wheeldown" or event_type == "wheeldown_ctrl":
self.scene_widget.matrix.post_scale(
self.zoom_backwards, self.zoom_backwards, space_pos[0], space_pos[1]
)
self.scene.request_refresh()
return RESPONSE_CONSUME
# elif event_type == "zoom-out":
# self.scene_widget.matrix.post_scale(
# self.zoom_backwards, self.zoom_backwards, space_pos[0], space_pos[1]
# )
# self.scene.request_refresh()
# return RESPONSE_CONSUME
elif event_type == "wheelleft":
self.scene_widget.matrix.post_translate(self.pan_factor, 0)
self.scene.request_refresh()
return RESPONSE_CONSUME
elif event_type == "wheelright":
self.scene_widget.matrix.post_translate(-self.pan_factor, 0)
self.scene.request_refresh()
return RESPONSE_CONSUME
elif event_type == "middledown":
return RESPONSE_CONSUME
elif event_type == "middleup":
return RESPONSE_CONSUME
elif event_type == "gesture-start":
self._previous_zoom = 1.0
return RESPONSE_CONSUME
elif event_type == "gesture-end":
self._previous_zoom = None
return RESPONSE_CONSUME
elif event_type == "lost":
return RESPONSE_CONSUME
elif str(event_type).startswith("zoom "):
if self._previous_zoom is None:
return RESPONSE_CONSUME
try:
zoom = float(event_type.split(" ")[1])
except Exception:
return RESPONSE_CONSUME
zoom_change = zoom / self._previous_zoom
self.scene_widget.matrix.post_scale(
zoom_change, zoom_change, space_pos[0], space_pos[1]
)
self.scene_widget.matrix.post_translate(space_pos[4], space_pos[5])
self._previous_zoom = zoom
self.scene.request_refresh()
return RESPONSE_CONSUME
elif str(event_type).startswith("magnify "):
magnify = float(event_type.split(" ")[1])
self.scene_widget.matrix.post_scale(
magnify, magnify, space_pos[0], space_pos[1]
)
self.scene_widget.matrix.post_translate(space_pos[4], space_pos[5])
self.scene.context.signal("refresh_scene", 0)
return RESPONSE_CONSUME
# Movement
if self._placement_event_type is None:
self.scene_widget.matrix.post_translate(space_pos[4], space_pos[5])
self.scene.request_refresh()
elif self._placement_event_type == "zoom":
from math import e
p = (
space_pos[0]
- self._placement_event[0]
+ space_pos[1]
- self._placement_event[1]
)
p /= 250.0
if self._previous_zoom is not None:
zoom_factor = e**p
zoom_change = zoom_factor / self._previous_zoom
self._previous_zoom = zoom_factor
self.scene_widget.matrix.post_scale(
zoom_change,
zoom_change,
self._placement_event[0],
self._placement_event[1],
)
self.scene.request_refresh()
elif self._placement_event_type == "pan":
pan_factor_x = -(space_pos[0] - self._placement_event[0]) / 10
pan_factor_y = -(space_pos[1] - self._placement_event[1]) / 10
self.scene_widget.matrix.post_translate(pan_factor_x, pan_factor_y)
self.scene.request_refresh()
return RESPONSE_CONSUME
def set_view(self, x, y, w, h, preserve_aspect=None):
self._view = Viewbox(
"%d %d %d %d" % (x, y, w, h),
preserve_aspect,
)
self.aspect_matrix()
def set_frame(self, x, y, w, h):
self._frame = Viewbox("%d %d %d %d" % (x, y, w, h))
self.aspect_matrix()
def set_aspect(self, aspect=True):
self.aspect = aspect
self.aspect_matrix()
def aspect_matrix(self):
"""
Specifically view the scene with the given Viewbox.
"""
if self._frame and self._view and self.aspect:
self.scene_widget.matrix = Matrix(self._view.transform(self._frame))
def focus_position_scene(self, scene_point, scene_size):
"""
Focus on the specific point within the scene.
"""
window_width, window_height = self.scene.ClientSize
scale_x = self.get_scale_x()
scale_y = self.get_scale_y()
self.scene_matrix_reset()
self.scene_post_pan(-scene_point[0], -scene_point[1])
self.scene_post_scale(scale_x, scale_y)
self.scene_post_pan(window_width / 2.0, window_height / 2.0)
def focus_viewport_scene(
self, new_scene_viewport, scene_size, buffer=0.0, lock=True
):
"""
Focus on the given viewport in the scene.
@param new_scene_viewport: Viewport to have after this process within the scene.
@param scene_size: Size of the scene in which this viewport is active.
@param buffer: Amount of buffer around the edge of the new viewport.
@param lock: lock the scalex, scaley.
@return:
"""
window_width, window_height = scene_size
left = new_scene_viewport[0]
top = new_scene_viewport[1]
right = new_scene_viewport[2]
bottom = new_scene_viewport[3]
viewport_width = right - left
viewport_height = bottom - top
left -= viewport_width * buffer
right += viewport_width * buffer
top -= viewport_height * buffer
bottom += viewport_height * buffer
if right == left:
scale_x = 100
else:
scale_x = window_width / float(right - left)
if bottom == top:
scale_y = 100
else:
scale_y = window_height / float(bottom - top)
cx = (right + left) / 2
cy = (top + bottom) / 2
self.scene_widget.matrix.reset()
self.scene_widget.matrix.post_translate(-cx, -cy)
if lock:
scale = min(scale_x, scale_y)
if scale != 0:
self.scene_widget.matrix.post_scale(scale)
else:
if scale_x != 0 and scale_y != 0:
self.scene_widget.matrix.post_scale(scale_x, scale_y)
self.scene_widget.matrix.post_translate(window_width / 2.0, window_height / 2.0)
```
#### File: gui/scenewidgets/laserpathwidget.py
```python
import wx
from meerk40t.gui.laserrender import DRAW_MODE_LASERPATH
from meerk40t.gui.scene.widget import Widget
class LaserPathWidget(Widget):
"""
Scene Widget.
Draw the laserpath.
These are blue lines that track the previous position of the laser-head.
"""
def __init__(self, scene):
Widget.__init__(self, scene, all=False)
self.laserpath = [[0, 0] for _ in range(1000)], [[0, 0] for _ in range(1000)]
self.laserpath_index = 0
def init(self, context):
context.listen("driver;position", self.on_update)
context.listen("emulator;position", self.on_update)
def final(self, context):
context.unlisten("driver;position", self.on_update)
context.unlisten("emulator;position", self.on_update)
def on_update(self, origin, pos):
laserpath = self.laserpath
index = self.laserpath_index
laserpath[0][index][0] = pos[0]
laserpath[0][index][1] = pos[1]
laserpath[1][index][0] = pos[2]
laserpath[1][index][1] = pos[3]
index += 1
index %= len(laserpath[0])
self.laserpath_index = index
def clear_laserpath(self):
self.laserpath = [[0, 0] for _ in range(1000)], [[0, 0] for _ in range(1000)]
self.laserpath_index = 0
def process_draw(self, gc):
"""
Draw the blue lines of the LaserPath
"""
context = self.scene.context
if context.draw_mode & DRAW_MODE_LASERPATH == 0:
mycol = self.scene.colors.color_laserpath
pen = wx.Pen(mycol)
gc.SetPen(pen)
starts, ends = self.laserpath
try:
gc.StrokeLineSegments(starts, ends)
except OverflowError:
pass # I don't actually know why this would happen.
```
#### File: meerk40t/gui/wxutils.py
```python
from typing import List
import wx
_ = wx.GetTranslation
def create_menu_for_choices(gui, choices: List[dict]) -> wx.Menu:
"""
Creates a menu for a given choices table
Processes submenus, references, radio_state as needed.
"""
menu = wx.Menu()
submenus = {}
choice = dict()
def get(key, default=None):
try:
return choice[key]
except KeyError:
return default
def execute(choice):
func = choice["action"]
func_kwargs = choice["kwargs"]
func_args = choice["kwargs"]
def specific(event=None):
func(*func_args, **func_kwargs)
return specific
def set_bool(choice, value):
obj = choice["object"]
param = choice["attr"]
def check(event=None):
setattr(obj, param, value)
return check
for c in choices:
choice = c
submenu_name = get("submenu")
submenu = None
if submenu_name in submenus:
submenu = submenus[submenu_name]
else:
if get("separate_before", default=False):
menu.AppendSeparator()
if submenu_name is not None:
submenu = wx.Menu()
menu.AppendSubMenu(submenu, submenu_name)
submenus[submenu_name] = submenu
menu_context = submenu if submenu is not None else menu
t = get("type")
if t == bool:
item = menu_context.Append(
wx.ID_ANY, get("label"), get("tip"), wx.ITEM_CHECK
)
obj = get("object")
param = get("attr")
check = bool(getattr(obj, param, False))
item.Check(check)
gui.Bind(
wx.EVT_MENU,
set_bool(choice, not check),
item,
)
elif t == "action":
item = menu_context.Append(
wx.ID_ANY, get("label"), get("tip"), wx.ITEM_NORMAL
)
gui.Bind(
wx.EVT_MENU,
execute(choice),
item,
)
if not submenu and get("separate_after", default=False):
menu.AppendSeparator()
return menu
def create_choices_for_node(node, elements) -> List[dict]:
choices = []
for func in elements.tree_operations_for_node(node):
choice = {}
choices.append(choice)
choice["action"] = func
choice["type"] = "action"
choice["submenu"] = func.submenu
choice["kwargs"] = dict()
choice["args"] = tuple()
choice["separate_before"] = func.separate_before
choice["separate_after"] = func.separate_after
choice["label"] = func.name
choice["real_name"] = func.real_name
choice["tip"] = func.help
choice["radio"] = func.radio
choice["reference"] = func.reference
choice["user_prompt"] = func.user_prompt
choice["calcs"] = func.calcs
choice["values"] = func.values
return choices
def create_menu_for_node_TEST(gui, node, elements) -> wx.Menu:
"""
Test code towards unifying choices and tree nodes into choices that parse to menus.
@param gui:
@param node:
@param elements:
@return:
"""
choices = create_choices_for_node(node, elements)
return create_menu_for_choices(gui, choices)
def create_menu_for_node(gui, node, elements) -> wx.Menu:
"""
Create menu for a particular node. Does not invoke the menu.
Processes submenus, references, radio_state as needed.
"""
menu = wx.Menu()
submenus = {}
radio_check_not_needed = []
def menu_functions(f, node):
func_dict = dict(f.func_dict)
def specific(event=None):
prompts = f.user_prompt
for prompt in prompts:
func_dict[prompt["attr"]] = elements.kernel.prompt(
prompt["type"], prompt["prompt"]
)
f(node, **func_dict)
return specific
for func in elements.tree_operations_for_node(node):
submenu_name = func.submenu
submenu = None
if submenu_name in submenus:
submenu = submenus[submenu_name]
else:
if submenu_name is not None:
submenu = wx.Menu()
menu.AppendSubMenu(submenu, submenu_name, func.help)
submenus[submenu_name] = submenu
menu_context = submenu if submenu is not None else menu
if func.separate_before:
menu_context.AppendSeparator()
if func.reference is not None:
menu_context.AppendSubMenu(
create_menu_for_node(gui, func.reference(node), elements),
func.real_name,
)
continue
if func.radio_state is not None:
item = menu_context.Append(
wx.ID_ANY, func.real_name, func.help, wx.ITEM_RADIO
)
gui.Bind(
wx.EVT_MENU,
menu_functions(func, node),
item,
)
check = func.radio_state
item.Check(check)
if check and menu_context not in radio_check_not_needed:
radio_check_not_needed.append(menu_context)
else:
gui.Bind(
wx.EVT_MENU,
menu_functions(func, node),
menu_context.Append(
wx.ID_ANY, func.real_name, func.help, wx.ITEM_NORMAL
),
)
if menu_context not in radio_check_not_needed:
radio_check_not_needed.append(menu_context)
if not submenu and func.separate_after:
menu.AppendSeparator()
for submenu in submenus.values():
if submenu not in radio_check_not_needed:
item = submenu.Append(
wx.ID_ANY,
_("Other value..."),
_("Value set using properties"),
wx.ITEM_RADIO,
)
item.Check(True)
return menu
def create_menu(gui, node, elements):
"""
Create menu items. This is used for both the scene and the tree to create menu items.
@param gui: Gui used to create menu items.
@param node: The Node clicked on for the generated menu.
@return:
"""
if node is None:
return
if hasattr(node, "node"):
node = node.node
menu = create_menu_for_node(gui, node, elements)
if menu.MenuItemCount != 0:
gui.PopupMenu(menu)
menu.Destroy()
WX_METAKEYS = [
wx.WXK_START,
wx.WXK_WINDOWS_LEFT,
wx.WXK_WINDOWS_RIGHT,
]
WX_MODIFIERS = {
wx.WXK_CONTROL: "ctrl",
wx.WXK_RAW_CONTROL: "macctl",
wx.WXK_ALT: "alt",
wx.WXK_SHIFT: "shift",
wx.WXK_START: "start",
wx.WXK_WINDOWS_LEFT: "win-left",
wx.WXK_WINDOWS_RIGHT: "win-right",
}
WX_SPECIALKEYS = {
wx.WXK_F1: "f1",
wx.WXK_F2: "f2",
wx.WXK_F3: "f3",
wx.WXK_F4: "f4",
wx.WXK_F5: "f5",
wx.WXK_F6: "f6",
wx.WXK_F7: "f7",
wx.WXK_F8: "f8",
wx.WXK_F9: "f9",
wx.WXK_F10: "f10",
wx.WXK_F11: "f11",
wx.WXK_F12: "f12",
wx.WXK_F13: "f13",
wx.WXK_F14: "f14",
wx.WXK_F15: "f15",
wx.WXK_F16: "f16",
wx.WXK_F16: "f17",
wx.WXK_F16: "f16",
wx.WXK_F17: "f17",
wx.WXK_F18: "f18",
wx.WXK_F19: "f19",
wx.WXK_F20: "f20",
wx.WXK_F21: "f21",
wx.WXK_F22: "f22",
wx.WXK_F23: "f23",
wx.WXK_F24: "f24",
wx.WXK_ADD: "+",
wx.WXK_END: "end",
wx.WXK_NUMPAD0: "numpad0",
wx.WXK_NUMPAD1: "numpad1",
wx.WXK_NUMPAD2: "numpad2",
wx.WXK_NUMPAD3: "numpad3",
wx.WXK_NUMPAD4: "numpad4",
wx.WXK_NUMPAD5: "numpad5",
wx.WXK_NUMPAD6: "numpad6",
wx.WXK_NUMPAD7: "numpad7",
wx.WXK_NUMPAD8: "numpad8",
wx.WXK_NUMPAD9: "numpad9",
wx.WXK_NUMPAD_ADD: "numpad_add",
wx.WXK_NUMPAD_SUBTRACT: "numpad_subtract",
wx.WXK_NUMPAD_MULTIPLY: "numpad_multiply",
wx.WXK_NUMPAD_DIVIDE: "numpad_divide",
wx.WXK_NUMPAD_DECIMAL: "numpad.",
wx.WXK_NUMPAD_ENTER: "numpad_enter",
wx.WXK_NUMPAD_RIGHT: "numpad_right",
wx.WXK_NUMPAD_LEFT: "numpad_left",
wx.WXK_NUMPAD_UP: "numpad_up",
wx.WXK_NUMPAD_DOWN: "numpad_down",
wx.WXK_NUMPAD_DELETE: "numpad_delete",
wx.WXK_NUMPAD_INSERT: "numpad_insert",
wx.WXK_NUMPAD_PAGEUP: "numpad_pgup",
wx.WXK_NUMPAD_PAGEDOWN: "numpad_pgdn",
wx.WXK_NUMPAD_HOME: "numpad_home",
wx.WXK_NUMPAD_END: "numpad_end",
wx.WXK_NUMLOCK: "num_lock",
wx.WXK_SCROLL: "scroll_lock",
wx.WXK_CAPITAL: "caps_lock",
wx.WXK_HOME: "home",
wx.WXK_DOWN: "down",
wx.WXK_UP: "up",
wx.WXK_RIGHT: "right",
wx.WXK_LEFT: "left",
wx.WXK_ESCAPE: "escape",
wx.WXK_BACK: "back",
wx.WXK_PAUSE: "pause",
wx.WXK_PAGEDOWN: "pagedown",
wx.WXK_PAGEUP: "pageup",
wx.WXK_PRINT: "print",
wx.WXK_RETURN: "return",
wx.WXK_SPACE: "space",
wx.WXK_TAB: "tab",
wx.WXK_DELETE: "delete",
wx.WXK_INSERT: "insert",
wx.WXK_SPECIAL1: "special1",
wx.WXK_SPECIAL2: "special2",
wx.WXK_SPECIAL3: "special3",
wx.WXK_SPECIAL4: "special4",
wx.WXK_SPECIAL5: "special5",
wx.WXK_SPECIAL6: "special6",
wx.WXK_SPECIAL7: "special7",
wx.WXK_SPECIAL8: "special8",
wx.WXK_SPECIAL9: "special9",
wx.WXK_SPECIAL10: "special10",
wx.WXK_SPECIAL11: "special11",
wx.WXK_SPECIAL12: "special12",
wx.WXK_SPECIAL13: "special13",
wx.WXK_SPECIAL14: "special14",
wx.WXK_SPECIAL15: "special15",
wx.WXK_SPECIAL16: "special16",
wx.WXK_SPECIAL17: "special17",
wx.WXK_SPECIAL18: "special18",
wx.WXK_SPECIAL19: "special19",
wx.WXK_SPECIAL20: "special20",
wx.WXK_CLEAR: "clear",
wx.WXK_WINDOWS_MENU: "menu",
}
def get_key_name(event, return_modifier=False):
keyvalue = ""
# https://wxpython.org/Phoenix/docs/html/wx.KeyEvent.html
key = event.GetUnicodeKey()
if key == wx.WXK_NONE:
key = event.GetKeyCode()
if event.RawControlDown() and not event.ControlDown():
keyvalue += "macctl+" # Deliberately not macctrl+
elif event.ControlDown():
keyvalue += "ctrl+"
if event.AltDown() or key == wx.WXK_ALT:
keyvalue += "alt+"
if event.ShiftDown():
keyvalue += "shift+"
if event.MetaDown() or key in WX_METAKEYS:
keyvalue += "meta+"
# if return_modifier and keyvalue: print("key", key, keyvalue)
if key in WX_MODIFIERS:
return keyvalue if return_modifier else None
if key in WX_SPECIALKEYS:
keyvalue += WX_SPECIALKEYS[key]
else:
keyvalue += chr(key)
# print("key", key, keyvalue)
return keyvalue.lower()
def disable_window(window):
for m in window.Children:
if hasattr(m, "Disable"):
m.Disable()
if hasattr(m, "Children"):
disable_window(m)
```
#### File: meerk40t/kernel/jobs.py
```python
import time
from typing import Callable, Optional, Tuple
from .states import *
class Job:
"""
Generic job for the scheduler.
Jobs that can be scheduled in the scheduler-kernel to run at a particular time and a given number of times.
This is done calling schedule() and unschedule() and setting the parameters for process, args, interval,
and times.
"""
def __init__(
self,
process: Optional[Callable] = None,
args: Optional[Tuple] = (),
interval: float = 1.0,
times: Optional[int] = None,
job_name: Optional[str] = None,
run_main: bool = False,
conditional: Callable = None,
):
self.job_name = job_name
self.state = STATE_INITIALIZE
self.run_main = run_main
self.conditional = conditional
self.process = process
self.args = args
self.interval = interval
self.times = times
self._last_run = None
self._next_run = time.time() + self.interval
self._remaining = self.times
def __call__(self, *args, **kwargs):
self.process(*args, **kwargs)
def __str__(self):
if self.job_name is not None:
return self.job_name
else:
try:
return self.process.__name__
except AttributeError:
return object.__str__(self)
@property
def scheduled(self) -> bool:
return (
self._next_run is not None
and time.time() >= self._next_run
and (self.conditional is None or self.conditional())
)
def reset(self) -> None:
self._last_run = None
self._next_run = time.time() + self.interval
self._remaining = self.times
def cancel(self) -> None:
self._remaining = -1
class ConsoleFunction(Job):
"""
Special type of Job that runs the Console command provided when the job is executed.
"""
def __init__(
self,
context: "Context",
data: str,
interval: float = 1.0,
times: Optional[int] = None,
job_name: Optional[str] = None,
run_main: bool = False,
conditional: Callable = None,
):
Job.__init__(
self, self.__call__, None, interval, times, job_name, run_main, conditional
)
self.context = context
self.data = data
def __call__(self, *args, **kwargs):
self.context.console(self.data)
def __str__(self):
return self.data.replace("\n", "")
```
#### File: lihuiyu/gui/lhystudiosdrivergui.py
```python
import wx
from meerk40t.core.units import Length
from meerk40t.gui.icons import icons8_administrative_tools_50
from meerk40t.gui.mwindow import MWindow
from meerk40t.kernel import signal_listener
_ = wx.GetTranslation
FIX_SPEEDS_RATIO = 0.9195
class ConfigurationUsb(wx.Panel):
def __init__(self, *args, context=None, **kwds):
# begin wxGlade: ConfigurationUsb.__init__
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.context = context
sizer_usb_settings = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("USB Settings")), wx.VERTICAL
)
sizer_usb_restrict = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Restrict Multiple Lasers")), wx.VERTICAL
)
sizer_usb_settings.Add(sizer_usb_restrict, 0, 0, 0)
sizer_criteria = wx.BoxSizer(wx.HORIZONTAL)
sizer_usb_restrict.Add(sizer_criteria, 1, wx.EXPAND, 0)
sizer_chip_version = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("CH341 Version")), wx.HORIZONTAL
)
sizer_criteria.Add(sizer_chip_version, 0, wx.EXPAND, 0)
self.text_device_version = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.text_device_version.SetMinSize((55, 23))
sizer_chip_version.Add(self.text_device_version, 0, 0, 0)
self.spin_device_version = wx.SpinCtrl(self, wx.ID_ANY, "-1", min=-1, max=25)
self.spin_device_version.SetMinSize((40, 23))
self.spin_device_version.SetToolTip(
_(
"Optional: Distinguish between different lasers using the match criteria below.\n-1 match anything. 0+ match exactly that value."
)
)
sizer_chip_version.Add(self.spin_device_version, 0, 0, 0)
sizer_device_index = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Device Index:")), wx.HORIZONTAL
)
sizer_criteria.Add(sizer_device_index, 0, wx.EXPAND, 0)
self.text_device_index = wx.TextCtrl(self, wx.ID_ANY, "", style=wx.TE_READONLY)
self.text_device_index.SetMinSize((55, 23))
sizer_device_index.Add(self.text_device_index, 0, 0, 0)
self.spin_device_index = wx.SpinCtrl(self, wx.ID_ANY, "-1", min=-1, max=5)
self.spin_device_index.SetMinSize((40, 23))
self.spin_device_index.SetToolTip(
_(
"Optional: Distinguish between different lasers using the match criteria below.\n-1 match anything. 0+ match exactly that value."
)
)
sizer_device_index.Add(self.spin_device_index, 0, 0, 0)
sizer_serial = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Serial Number")), wx.HORIZONTAL
)
sizer_usb_restrict.Add(sizer_serial, 0, wx.EXPAND, 0)
self.check_serial_number = wx.CheckBox(self, wx.ID_ANY, _("Serial Number"))
self.check_serial_number.SetToolTip(
_("Require a serial number match for this board")
)
sizer_serial.Add(self.check_serial_number, 0, 0, 0)
self.text_serial_number = wx.TextCtrl(self, wx.ID_ANY, "")
self.text_serial_number.SetMinSize((150, 23))
self.text_serial_number.SetToolTip(
_(
"Board Serial Number to be used to identify a specific laser. If the device fails to match the serial number it will be disconnected."
)
)
sizer_serial.Add(self.text_serial_number, 0, wx.EXPAND, 0)
sizer_buffer = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Write Buffer")), wx.HORIZONTAL
)
sizer_usb_settings.Add(sizer_buffer, 0, wx.EXPAND, 0)
self.checkbox_limit_buffer = wx.CheckBox(
self, wx.ID_ANY, _("Limit Write Buffer")
)
self.checkbox_limit_buffer.SetToolTip(
_(
"Limit the write buffer to a certain amount. Permits on-the-fly command production."
)
)
self.checkbox_limit_buffer.SetValue(1)
sizer_buffer.Add(self.checkbox_limit_buffer, 0, 0, 0)
self.text_buffer_length = wx.TextCtrl(self, wx.ID_ANY, "", style=wx.TE_READONLY)
self.text_buffer_length.SetToolTip(
_("Current number of bytes in the write buffer.")
)
sizer_buffer.Add(self.text_buffer_length, 0, 0, 0)
label_14 = wx.StaticText(self, wx.ID_ANY, "/")
sizer_buffer.Add(label_14, 0, 0, 0)
self.spin_packet_buffer_max = wx.SpinCtrl(
self, wx.ID_ANY, "1500", min=1, max=1000000
)
self.spin_packet_buffer_max.SetToolTip(_("Current maximum write buffer limit."))
sizer_buffer.Add(self.spin_packet_buffer_max, 0, 0, 0)
self.SetSizer(sizer_usb_settings)
self.Layout()
self.Bind(
wx.EVT_SPINCTRL, self.spin_on_device_version, self.spin_device_version
)
self.Bind(
wx.EVT_TEXT_ENTER, self.spin_on_device_version, self.spin_device_version
)
self.Bind(wx.EVT_SPINCTRL, self.spin_on_device_index, self.spin_device_index)
self.Bind(wx.EVT_TEXT_ENTER, self.spin_on_device_index, self.spin_device_index)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_serial_number, self.check_serial_number
)
self.Bind(wx.EVT_TEXT, self.on_text_serial_number, self.text_serial_number)
self.Bind(
wx.EVT_CHECKBOX,
self.on_check_limit_packet_buffer,
self.checkbox_limit_buffer,
)
self.Bind(
wx.EVT_SPINCTRL, self.on_spin_packet_buffer_max, self.spin_packet_buffer_max
)
self.Bind(
wx.EVT_TEXT, self.on_spin_packet_buffer_max, self.spin_packet_buffer_max
)
self.Bind(
wx.EVT_TEXT_ENTER,
self.on_spin_packet_buffer_max,
self.spin_packet_buffer_max,
)
# end wxGlade
self.spin_device_index.SetValue(self.context.usb_index)
self.spin_device_version.SetValue(self.context.usb_version)
if self.context.serial is not None:
self.text_serial_number.SetValue(self.context.serial)
self.check_serial_number.SetValue(self.context.serial_enable)
self.checkbox_limit_buffer.SetValue(self.context.buffer_limit)
self.spin_packet_buffer_max.SetValue(self.context.buffer_max)
# Disables of features not yet supported.
self.check_serial_number.Enable(False)
self.text_serial_number.Enable(False)
def pane_show(self):
# self.context.listen("pipe;buffer", self.on_buffer_update)
pass
def pane_hide(self):
# self.context.unlisten("pipe;buffer", self.on_buffer_update)
pass
@signal_listener("pipe;buffer")
def on_buffer_update(self, origin, value, *args):
self.text_buffer_length.SetValue(str(value))
@signal_listener("pipe;index")
def on_update_pipe_index(self, origin, value):
if origin != self.context.path:
return
self.text_device_index.SetValue(str(value))
@signal_listener("pipe;chipv")
def on_update_pipe_chipv(self, origin, value):
if origin != self.context.path:
return
self.text_device_version.SetValue(str(value))
def on_check_limit_packet_buffer(
self, event=None
): # wxGlade: JobInfo.<event_handler>
self.context.buffer_limit = self.checkbox_limit_buffer.GetValue()
def on_spin_packet_buffer_max(self, event=None): # wxGlade: JobInfo.<event_handler>
self.context.buffer_max = self.spin_packet_buffer_max.GetValue()
def spin_on_device_index(self, event=None):
self.context.usb_index = int(self.spin_device_index.GetValue())
def spin_on_device_version(self, event=None):
self.context.usb_version = int(self.spin_device_version.GetValue())
def on_check_serial_number(
self, event
): # wxGlade: ConfigurationUsb.<event_handler>
self.context.serial_enable = self.check_serial_number.GetValue()
def on_text_serial_number(self, event): # wxGlade: ConfigurationUsb.<event_handler>
self.context.serial = self.text_serial_number.GetValue()
class ConfigurationTcp(wx.Panel):
def __init__(self, *args, context=None, **kwds):
# begin wxGlade: ConfigurationTcp.__init__
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.context = context
sizer_13 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("TCP Settings")), wx.HORIZONTAL
)
sizer_21 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Address")), wx.VERTICAL
)
sizer_13.Add(sizer_21, 0, 0, 0)
self.text_address = wx.TextCtrl(self, wx.ID_ANY, "")
self.text_address.SetMinSize((150, 23))
self.text_address.SetToolTip(_("IP/Host if the server computer"))
sizer_21.Add(self.text_address, 0, 0, 0)
sizer_port = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Port")), wx.VERTICAL
)
sizer_13.Add(sizer_port, 0, 0, 0)
self.text_port = wx.TextCtrl(self, wx.ID_ANY, "")
self.text_port.SetToolTip(_("Port for tcp connection on the server computer"))
sizer_port.Add(self.text_port, 0, wx.EXPAND, 0)
self.SetSizer(sizer_13)
self.Layout()
self.Bind(wx.EVT_TEXT, self.on_text_address, self.text_address)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_address, self.text_address)
self.Bind(wx.EVT_TEXT, self.on_text_port, self.text_port)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_port, self.text_port)
# end wxGlade
self.text_port.SetValue(str(self.context.port))
self.text_address.SetValue(self.context.address)
def pane_show(self):
pass
def pane_hide(self):
pass
def on_text_address(self, event): # wxGlade: ConfigurationTcp.<event_handler>
self.context.address = self.text_address.GetValue()
def on_text_port(self, event): # wxGlade: ConfigurationTcp.<event_handler>
try:
self.context.port = int(self.text_port.GetValue())
except ValueError:
pass
class ConfigurationLaserPanel(wx.Panel):
def __init__(self, *args, context=None, **kwds):
# begin wxGlade: ConfigurationLaserPanel.__init__
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.context = context
sizer_27 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Laser Parameters")), wx.VERTICAL
)
sizer_home = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Shift Home Position")), wx.HORIZONTAL
)
sizer_27.Add(sizer_home, 0, wx.EXPAND, 0)
sizer_4 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("X:")), wx.HORIZONTAL
)
sizer_home.Add(sizer_4, 2, wx.EXPAND, 0)
self.spin_home_x = wx.SpinCtrlDouble(
self, wx.ID_ANY, "0.0", min=-50000.0, max=50000.0
)
self.spin_home_x.SetMinSize((80, 23))
self.spin_home_x.SetToolTip(_("Translate Home X"))
sizer_4.Add(self.spin_home_x, 0, 0, 0)
label_12 = wx.StaticText(self, wx.ID_ANY, _("steps"))
sizer_4.Add(label_12, 0, 0, 0)
sizer_2 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Y:")), wx.HORIZONTAL
)
sizer_home.Add(sizer_2, 2, wx.EXPAND, 0)
self.spin_home_y = wx.SpinCtrlDouble(
self, wx.ID_ANY, _("0.0"), min=-50000.0, max=50000.0
)
self.spin_home_y.SetMinSize((80, 23))
self.spin_home_y.SetToolTip(_("Translate Home Y"))
sizer_2.Add(self.spin_home_y, 0, 0, 0)
label_11 = wx.StaticText(self, wx.ID_ANY, _("steps"))
sizer_2.Add(label_11, 0, 0, 0)
self.button_home_by_current = wx.Button(self, wx.ID_ANY, _("Set Current"))
self.button_home_by_current.SetToolTip(
_("Set Home Position based on the current position")
)
sizer_home.Add(self.button_home_by_current, 1, 0, 0)
sizer_bed = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Bed Dimensions")), wx.HORIZONTAL
)
sizer_27.Add(sizer_bed, 0, wx.EXPAND, 0)
sizer_14 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Width")), wx.HORIZONTAL
)
sizer_bed.Add(sizer_14, 1, 0, 0)
self.text_bedwidth = wx.TextCtrl(
self,
wx.ID_ANY,
"310mm",
)
self.text_bedwidth.SetMinSize((80, 23))
self.text_bedwidth.SetToolTip(_("Width of the laser bed."))
sizer_14.Add(self.text_bedwidth, 4, 0, 0)
sizer_15 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Height")), wx.HORIZONTAL
)
sizer_bed.Add(sizer_15, 1, 0, 0)
label_3 = wx.StaticText(self, wx.ID_ANY, "")
sizer_15.Add(label_3, 0, 0, 0)
self.text_bedheight = wx.TextCtrl(self, wx.ID_ANY, "210mm")
self.text_bedheight.SetMinSize((80, 23))
self.text_bedheight.SetToolTip(_("Height of the laser bed."))
sizer_15.Add(self.text_bedheight, 4, 0, 0)
sizer_scale_factors = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("User Scale Factor")), wx.HORIZONTAL
)
sizer_27.Add(sizer_scale_factors, 0, wx.EXPAND, 0)
sizer_19 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("X:")), wx.HORIZONTAL
)
sizer_scale_factors.Add(sizer_19, 0, wx.EXPAND, 0)
self.text_scale_x = wx.TextCtrl(self, wx.ID_ANY, "1.000")
self.text_scale_x.SetToolTip(
_("Scale factor for the X-axis. Board units to actual physical units.")
)
sizer_19.Add(self.text_scale_x, 0, 0, 0)
sizer_20 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Y:")), wx.HORIZONTAL
)
sizer_scale_factors.Add(sizer_20, 0, wx.EXPAND, 0)
self.text_scale_y = wx.TextCtrl(self, wx.ID_ANY, "1.000")
self.text_scale_y.SetToolTip(
_("Scale factor for the Y-axis. Board units to actual physical units.")
)
sizer_20.Add(self.text_scale_y, 0, 0, 0)
self.SetSizer(sizer_27)
self.spin_home_x.SetValue(self.context.home_adjust_x)
self.spin_home_y.SetValue(self.context.home_adjust_y)
self.text_bedwidth.SetValue(self.context.bedwidth)
self.text_bedheight.SetValue(self.context.bedheight)
self.text_scale_x.SetValue("%.4f" % self.context.scale_x)
self.text_scale_y.SetValue("%.4f" % self.context.scale_y)
self.Layout()
self.Bind(wx.EVT_TEXT, self.spin_on_home_x, self.spin_home_x)
self.Bind(wx.EVT_TEXT, self.spin_on_home_y, self.spin_home_y)
self.Bind(
wx.EVT_BUTTON, self.on_button_set_home_current, self.button_home_by_current
)
self.Bind(wx.EVT_TEXT, self.on_text_bedwidth, self.text_bedwidth)
self.Bind(wx.EVT_TEXT, self.on_text_bedheight, self.text_bedheight)
self.Bind(wx.EVT_TEXT, self.on_text_x_scale, self.text_scale_x)
self.Bind(wx.EVT_TEXT, self.on_text_y_scale, self.text_scale_y)
def pane_show(self):
pass
def pane_hide(self):
pass
def spin_on_home_x(self, event=None):
self.context.home_adjust_x = int(self.spin_home_x.GetValue())
def spin_on_home_y(self, event=None):
self.context.home_adjust_y = int(self.spin_home_y.GetValue())
def on_button_set_home_current(self, event=None):
native_x = self.context.device.native_x
native_y = self.context.device.native_y
self.context.home_adjust_x = int(native_x)
self.context.home_adjust_y = int(native_y)
self.spin_home_x.SetValue(self.context.home_adjust_x)
self.spin_home_y.SetValue(self.context.home_adjust_y)
def on_text_bedwidth(self, event=None):
try:
Length(self.text_bedwidth.GetValue())
Length(self.text_bedheight.GetValue())
except ValueError:
return
self.context.device.width = self.text_bedwidth.GetValue()
self.context.device.height = self.text_bedheight.GetValue()
self.context.device.bedwidth = self.text_bedwidth.GetValue()
self.context.device.bedheight = self.text_bedheight.GetValue()
self.context.signal(
"bed_size", (self.context.device.bedwidth, self.context.device.bedheight)
)
self.context("viewport_update\n")
def on_text_bedheight(self, event=None):
try:
Length(self.text_bedwidth.GetValue())
Length(self.text_bedheight.GetValue())
except ValueError:
return
self.context.device.width = self.text_bedwidth.GetValue()
self.context.device.height = self.text_bedheight.GetValue()
self.context.device.bedwidth = self.text_bedwidth.GetValue()
self.context.device.bedheight = self.text_bedheight.GetValue()
self.context.signal(
"bed_size", (self.context.device.bedwidth, self.context.device.bedheight)
)
self.context("viewport_update\n")
def on_text_x_scale(self, event=None):
try:
self.context.device.scale_x = float(self.text_scale_x.GetValue())
self.context.device.scale_y = float(self.text_scale_y.GetValue())
self.context.signal(
"scale_step", (self.context.device.scale_x, self.context.device.scale_y)
)
self.context("viewport_update\n")
except ValueError:
pass
def on_text_y_scale(self, event=None):
try:
self.context.device.scale_x = float(self.text_scale_x.GetValue())
self.context.device.scale_y = float(self.text_scale_y.GetValue())
self.context.signal(
"scale_step", (self.context.device.scale_x, self.context.device.scale_y)
)
self.context("viewport_update\n")
except ValueError:
pass
class ConfigurationInterfacePanel(wx.Panel):
def __init__(self, *args, context=None, **kwds):
# begin wxGlade: ConfigurationInterfacePanel.__init__
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.context = context
sizer_page_1 = wx.BoxSizer(wx.VERTICAL)
sizer_name = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Device Name")), wx.HORIZONTAL
)
sizer_page_1.Add(sizer_name, 0, wx.EXPAND, 0)
self.text_device_label = wx.TextCtrl(self, wx.ID_ANY, "")
self.text_device_label.SetToolTip(
_("The internal label to be used for this device")
)
sizer_name.Add(self.text_device_label, 1, 0, 0)
sizer_config = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Configuration")), wx.HORIZONTAL
)
sizer_page_1.Add(sizer_config, 0, wx.EXPAND, 0)
sizer_board = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Board Setup")), wx.HORIZONTAL
)
sizer_config.Add(sizer_board, 0, wx.EXPAND, 0)
self.combobox_board = wx.ComboBox(
self,
wx.ID_ANY,
choices=["M2", "B2", "M", "M1", "A", "B", "B1"],
style=wx.CB_DROPDOWN,
)
self.combobox_board.SetToolTip(
_("Select the board to use. This has an effects the speedcodes used.")
)
self.combobox_board.SetSelection(0)
sizer_board.Add(self.combobox_board, 1, 0, 0)
sizer_17 = wx.BoxSizer(wx.VERTICAL)
sizer_config.Add(sizer_17, 1, wx.EXPAND, 0)
self.checkbox_flip_x = wx.CheckBox(self, wx.ID_ANY, _("Flip X"))
self.checkbox_flip_x.SetToolTip(
_("Flip the Right and Left commands sent to the controller")
)
sizer_17.Add(self.checkbox_flip_x, 0, 0, 0)
self.checkbox_home_right = wx.CheckBox(self, wx.ID_ANY, _("Home Right"))
self.checkbox_home_right.SetToolTip(
_("Indicates the device Home is on the right")
)
sizer_17.Add(self.checkbox_home_right, 0, 0, 0)
label_1 = wx.StaticText(self, wx.ID_ANY, "")
sizer_17.Add(label_1, 0, 0, 0)
sizer_16 = wx.BoxSizer(wx.VERTICAL)
sizer_config.Add(sizer_16, 1, wx.EXPAND, 0)
self.checkbox_flip_y = wx.CheckBox(self, wx.ID_ANY, _("Flip Y"))
self.checkbox_flip_y.SetToolTip(
_("Flip the Top and Bottom commands sent to the controller")
)
sizer_16.Add(self.checkbox_flip_y, 0, 0, 0)
self.checkbox_home_bottom = wx.CheckBox(self, wx.ID_ANY, _("Home Bottom"))
self.checkbox_home_bottom.SetToolTip(
_("Indicates the device Home is on the bottom")
)
sizer_16.Add(self.checkbox_home_bottom, 0, 0, 0)
self.checkbox_swap_xy = wx.CheckBox(self, wx.ID_ANY, _("Swap X and Y"))
self.checkbox_swap_xy.SetToolTip(
_("Swaps the X and Y axis. This happens before the FlipX and FlipY.")
)
sizer_16.Add(self.checkbox_swap_xy, 0, 0, 0)
sizer_interface = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Interface")), wx.VERTICAL
)
sizer_page_1.Add(sizer_interface, 0, wx.EXPAND, 0)
sizer_interface_radio = wx.BoxSizer(wx.HORIZONTAL)
sizer_interface.Add(sizer_interface_radio, 0, wx.EXPAND, 0)
self.radio_usb = wx.RadioButton(self, wx.ID_ANY, _("USB"), style=wx.RB_GROUP)
self.radio_usb.SetValue(1)
self.radio_usb.SetToolTip(
_(
"Select this if you have an m2-nano controller physically connected to this computer using a USB cable."
)
)
sizer_interface_radio.Add(self.radio_usb, 1, 0, 0)
self.radio_tcp = wx.RadioButton(self, wx.ID_ANY, _("Networked"))
self.radio_tcp.SetToolTip(
_(
"Select this to connect this instance of Meerk40t to another instance of Meerk40t running as a remote server."
)
)
sizer_interface_radio.Add(self.radio_tcp, 4, 0, 0)
self.radio_mock = wx.RadioButton(self, wx.ID_ANY, _("Mock"))
self.radio_mock.SetToolTip(
_(
"Select this only for debugging without a physical laser available. Execute a burn as if there was an m2-nano controller physically connected by USB."
)
)
sizer_interface_radio.Add(self.radio_mock, 1, 0, 0)
self.panel_usb_settings = ConfigurationUsb(
self, wx.ID_ANY, context=self.context
)
sizer_interface.Add(self.panel_usb_settings, 0, wx.EXPAND, 0)
self.panel_tcp_config = ConfigurationTcp(self, wx.ID_ANY, context=self.context)
sizer_interface.Add(self.panel_tcp_config, 0, wx.EXPAND, 0)
self.ConfigurationLaserPanel = ConfigurationLaserPanel(
self, wx.ID_ANY, context=self.context
)
sizer_page_1.Add(self.ConfigurationLaserPanel, 1, wx.EXPAND, 0)
self.SetSizer(sizer_page_1)
self.Layout()
self.Bind(wx.EVT_TEXT, self.on_device_label, self.text_device_label)
self.Bind(wx.EVT_COMBOBOX, self.on_combobox_boardtype, self.combobox_board)
self.Bind(wx.EVT_CHECKBOX, self.on_check_flip_x, self.checkbox_flip_x)
self.Bind(wx.EVT_CHECKBOX, self.on_check_home_right, self.checkbox_home_right)
self.Bind(wx.EVT_CHECKBOX, self.on_check_flip_y, self.checkbox_flip_y)
self.Bind(wx.EVT_CHECKBOX, self.on_check_home_bottom, self.checkbox_home_bottom)
self.Bind(wx.EVT_CHECKBOX, self.on_check_swapxy, self.checkbox_swap_xy)
self.Bind(wx.EVT_RADIOBUTTON, self.on_radio_interface, self.radio_usb)
self.Bind(wx.EVT_RADIOBUTTON, self.on_radio_interface, self.radio_tcp)
self.Bind(wx.EVT_RADIOBUTTON, self.on_radio_interface, self.radio_mock)
# end wxGlade
self.text_device_label.SetValue(self.context.label)
self.checkbox_swap_xy.SetValue(self.context.swap_xy)
self.checkbox_flip_x.SetValue(self.context.flip_x)
self.checkbox_flip_y.SetValue(self.context.flip_y)
self.checkbox_home_right.SetValue(self.context.home_right)
self.checkbox_home_bottom.SetValue(self.context.home_bottom)
self.combobox_board.SetValue(self.context.board)
if self.context.mock:
self.panel_tcp_config.Hide()
self.panel_usb_settings.Hide()
self.radio_mock.SetValue(True)
elif self.context.networked:
self.panel_usb_settings.Hide()
self.radio_tcp.SetValue(True)
else:
self.radio_usb.SetValue(True)
self.panel_tcp_config.Hide()
def pane_show(self):
self.ConfigurationLaserPanel.pane_show()
self.panel_usb_settings.pane_show()
self.panel_tcp_config.pane_show()
def pane_hide(self):
self.ConfigurationLaserPanel.pane_hide()
self.panel_usb_settings.pane_hide()
self.panel_tcp_config.pane_hide()
def on_combobox_boardtype(self, event=None):
self.context.board = self.combobox_board.GetValue()
def on_check_swapxy(self, event=None):
self.context.swap_xy = self.checkbox_swap_xy.GetValue()
self.context("viewport_update\n")
def on_check_flip_x(self, event=None):
self.context.flip_x = self.checkbox_flip_x.GetValue()
self.context("viewport_update\n")
def on_check_home_right(self, event=None):
self.context.home_right = self.checkbox_home_right.GetValue()
self.context.origin_x = 1.0 if self.context.home_right else 0.0
self.context("viewport_update\n")
def on_check_flip_y(self, event=None):
self.context.flip_y = self.checkbox_flip_y.GetValue()
self.context("viewport_update\n")
def on_check_home_bottom(self, event=None):
self.context.home_bottom = self.checkbox_home_bottom.GetValue()
self.context.origin_y = 1.0 if self.context.home_bottom else 0.0
self.context("viewport_update\n")
def on_device_label(
self, event
): # wxGlade: ConfigurationInterfacePanel.<event_handler>
self.context.label = self.text_device_label.GetValue()
self.context.signal("device;renamed")
def on_radio_interface(
self, event
): # wxGlade: ConfigurationInterfacePanel.<event_handler>
if self.radio_usb.GetValue():
self.panel_tcp_config.Hide()
self.panel_usb_settings.Show()
self.context.networked = False
self.context.mock = False
self.context(".network_update\n")
if self.radio_tcp.GetValue():
self.panel_tcp_config.Show()
self.panel_usb_settings.Hide()
self.context.networked = True
self.context.mock = False
self.context(".network_update\n")
if self.radio_mock.GetValue():
self.panel_tcp_config.Hide()
self.panel_usb_settings.Hide()
self.context.networked = False
self.context.mock = True
self.context(".network_update\n")
self.Layout()
class ConfigurationSetupPanel(wx.Panel):
def __init__(self, *args, context=None, **kwds):
# begin wxGlade: ConfigurationSetupPanel.__init__
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.context = context
sizer_page_2 = wx.BoxSizer(wx.VERTICAL)
sizer_general = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("General Options")), wx.VERTICAL
)
sizer_page_2.Add(sizer_general, 0, wx.EXPAND, 0)
self.check_autolock = wx.CheckBox(self, wx.ID_ANY, _("Automatically lock rail"))
self.check_autolock.SetToolTip(_("Lock rail after operations are finished."))
self.check_autolock.SetValue(1)
sizer_general.Add(self.check_autolock, 0, 0, 0)
self.check_plot_shift = wx.CheckBox(self, wx.ID_ANY, _("Pulse Grouping"))
self.check_plot_shift.SetToolTip(
"\n".join(
[
_(
"Pulse Grouping is an alternative means of reducing the incidence of stuttering, allowing you potentially to burn at higher speeds."
),
"",
_(
"It works by swapping adjacent on or off bits to group on and off together and reduce the number of switches."
),
"",
_(
'As an example, instead of X_X_ it will burn XX__ - because the laser beam is overlapping, and because a bit is only moved at most 1/1000", the difference should not be visible even under magnification.'
),
_(
"Whilst the Pulse Grouping option in Operations are set for that operation before the job is spooled, and cannot be changed on the fly, this global Pulse Grouping option is checked as instructions are sent to the laser and can turned on and off during the burn process. Because the changes are believed to be small enough to be undetectable, you may wish to leave this permanently checked."
),
]
),
)
sizer_general.Add(self.check_plot_shift, 0, 0, 0)
self.check_strict = wx.CheckBox(self, wx.ID_ANY, _("Strict"))
self.check_strict.SetToolTip(
_(
"Forces the device to enter and exit programmed speed mode from the same direction.\nThis may prevent devices like the M2-V4 and earlier from having issues. Not typically needed."
)
)
sizer_general.Add(self.check_strict, 0, 0, 0)
self.check_alternative_raster = wx.CheckBox(
self, wx.ID_ANY, _("Alt Raster Style")
)
sizer_general.Add(self.check_alternative_raster, 0, 0, 0)
self.check_twitches = wx.CheckBox(self, wx.ID_ANY, _("Twitch Vectors"))
sizer_general.Add(self.check_twitches, 0, 0, 0)
sizer_jog = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Rapid Jog")), wx.VERTICAL
)
sizer_page_2.Add(sizer_jog, 0, 0, 0)
sizer_23 = wx.BoxSizer(wx.VERTICAL)
sizer_jog.Add(sizer_23, 0, wx.EXPAND, 0)
self.check_rapid_moves_between = wx.CheckBox(
self, wx.ID_ANY, _("Rapid Moves Between Objects")
)
self.check_rapid_moves_between.SetToolTip(
_("Perform rapid moves between the objects")
)
self.check_rapid_moves_between.SetValue(1)
sizer_23.Add(self.check_rapid_moves_between, 0, 0, 0)
sizer_25 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Minimum Jog Distance")), wx.HORIZONTAL
)
sizer_23.Add(sizer_25, 0, 0, 0)
self.text_minimum_jog_distance = wx.TextCtrl(self, wx.ID_ANY, "")
sizer_25.Add(self.text_minimum_jog_distance, 0, 0, 0)
self.radio_box_jog_method = wx.RadioBox(
self,
wx.ID_ANY,
_("Jog Method"),
choices=[_("Default"), _("Reset"), _("Finish")],
majorDimension=3,
style=wx.RA_SPECIFY_ROWS,
)
self.radio_box_jog_method.SetToolTip(
_(
"Changes the method of jogging. Default are NSE jogs. Reset are @NSE jogs. Finished are @FNSE jogs followed by a wait."
)
)
self.radio_box_jog_method.SetSelection(0)
sizer_jog.Add(self.radio_box_jog_method, 0, 0, 0)
sizer_rapid_override = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Rapid Override")), wx.VERTICAL
)
sizer_page_2.Add(sizer_rapid_override, 0, wx.EXPAND, 0)
self.check_override_rapid = wx.CheckBox(
self, wx.ID_ANY, _("Override Rapid Movements")
)
sizer_rapid_override.Add(self.check_override_rapid, 0, 0, 0)
sizer_36 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("X Travel Speed:")), wx.HORIZONTAL
)
sizer_rapid_override.Add(sizer_36, 0, wx.EXPAND, 0)
self.text_rapid_x = wx.TextCtrl(self, wx.ID_ANY, "")
sizer_36.Add(self.text_rapid_x, 0, 0, 0)
label_2 = wx.StaticText(self, wx.ID_ANY, _("mm/s"))
sizer_36.Add(label_2, 0, 0, 0)
sizer_35 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Y Travel Speed:")), wx.HORIZONTAL
)
sizer_rapid_override.Add(sizer_35, 0, wx.EXPAND, 0)
self.text_rapid_y = wx.TextCtrl(self, wx.ID_ANY, "")
sizer_35.Add(self.text_rapid_y, 0, 0, 0)
label_4 = wx.StaticText(self, wx.ID_ANY, _("mm/s"))
sizer_35.Add(label_4, 0, 0, 0)
sizer_speed = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Speed:")), wx.VERTICAL
)
sizer_page_2.Add(sizer_speed, 0, wx.EXPAND, 0)
sizer_32 = wx.BoxSizer(wx.HORIZONTAL)
sizer_speed.Add(sizer_32, 0, wx.EXPAND, 0)
self.check_fix_speeds = wx.CheckBox(
self, wx.ID_ANY, _("Fix rated to actual speed")
)
self.check_fix_speeds.SetToolTip(
_(
"Correct for speed invalidity. Lihuiyu Studios speeds are 92% of the correctly rated speed"
)
)
sizer_32.Add(self.check_fix_speeds, 1, 0, 0)
self.text_fix_rated_speed = wx.TextCtrl(
self, wx.ID_ANY, str(FIX_SPEEDS_RATIO), style=wx.TE_READONLY
)
sizer_32.Add(self.text_fix_rated_speed, 1, 0, 0)
sizer_29 = wx.BoxSizer(wx.HORIZONTAL)
sizer_speed.Add(sizer_29, 0, wx.EXPAND, 0)
self.check_scale_speed = wx.CheckBox(self, wx.ID_ANY, _("Scale Speed"))
self.check_scale_speed.SetToolTip(
_(
"Scale any given speeds to this device by this amount. If set to 1.1, all speeds are 10% faster than rated."
)
)
sizer_29.Add(self.check_scale_speed, 1, 0, 0)
self.text_speed_scale_amount = wx.TextCtrl(self, wx.ID_ANY, "1.000")
self.text_speed_scale_amount.SetToolTip(
_(
"Scales the machine's speed ratio so that rated speeds speeds multiplied by this ratio."
)
)
sizer_29.Add(self.text_speed_scale_amount, 1, wx.EXPAND, 0)
sizer_30 = wx.BoxSizer(wx.HORIZONTAL)
sizer_speed.Add(sizer_30, 0, wx.EXPAND, 0)
self.check_max_speed_vector = wx.CheckBox(
self, wx.ID_ANY, _("Max Speed (Vector)")
)
self.check_max_speed_vector.SetToolTip(
_("Limit the maximum vector speed to this value")
)
sizer_30.Add(self.check_max_speed_vector, 1, 0, 0)
self.text_max_speed_vector = wx.TextCtrl(self, wx.ID_ANY, "100")
self.text_max_speed_vector.SetToolTip(
_("maximum speed at which all greater speeds are limited")
)
sizer_30.Add(self.text_max_speed_vector, 1, 0, 0)
sizer_31 = wx.BoxSizer(wx.HORIZONTAL)
sizer_speed.Add(sizer_31, 0, wx.EXPAND, 0)
self.check_max_speed_raster = wx.CheckBox(
self, wx.ID_ANY, _("Max Speed (Raster)")
)
self.check_max_speed_raster.SetToolTip(
_("Limit the maximum raster speed to this value")
)
sizer_31.Add(self.check_max_speed_raster, 1, 0, 0)
self.text_max_speed_raster = wx.TextCtrl(self, wx.ID_ANY, "750")
self.text_max_speed_raster.SetToolTip(
_("maximum speed at which all greater speeds are limited")
)
sizer_31.Add(self.text_max_speed_raster, 1, 0, 0)
self.SetSizer(sizer_page_2)
self.Layout()
self.Bind(wx.EVT_CHECKBOX, self.on_check_autolock, self.check_autolock)
self.Bind(wx.EVT_CHECKBOX, self.on_check_pulse_shift, self.check_plot_shift)
self.Bind(wx.EVT_CHECKBOX, self.on_check_strict, self.check_strict)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_alt_raster, self.check_alternative_raster
)
self.Bind(wx.EVT_CHECKBOX, self.on_check_twitches, self.check_twitches)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_rapid_between, self.check_rapid_moves_between
)
self.Bind(
wx.EVT_TEXT, self.on_text_min_jog_distance, self.text_minimum_jog_distance
)
self.Bind(wx.EVT_RADIOBOX, self.on_jog_method_radio, self.radio_box_jog_method)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_override_rapid, self.check_override_rapid
)
self.Bind(wx.EVT_TEXT, self.on_text_rapid_x, self.text_rapid_x)
self.Bind(wx.EVT_TEXT, self.on_text_rapid_y, self.text_rapid_y)
self.Bind(wx.EVT_CHECKBOX, self.on_check_fix_speeds, self.check_fix_speeds)
self.Bind(wx.EVT_CHECKBOX, self.on_check_scale_speed, self.check_scale_speed)
self.Bind(wx.EVT_TEXT, self.on_text_speed_scale, self.text_speed_scale_amount)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_max_speed_vector, self.check_max_speed_vector
)
self.Bind(
wx.EVT_TEXT, self.on_text_speed_max_vector, self.text_max_speed_vector
)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_max_speed_raster, self.check_max_speed_raster
)
self.Bind(
wx.EVT_TEXT, self.on_text_speed_max_raster, self.text_max_speed_raster
)
# end wxGlade
self.check_autolock.SetValue(self.context.autolock)
self.check_plot_shift.SetValue(self.context.plot_shift)
self.check_strict.SetValue(self.context.strict)
self.check_alternative_raster.SetValue(self.context.nse_raster)
self.check_twitches.SetValue(self.context.twitches)
self.check_rapid_moves_between.SetValue(self.context.opt_rapid_between)
self.text_minimum_jog_distance.SetValue(str(self.context.opt_jog_minimum))
self.radio_box_jog_method.SetSelection(self.context.opt_jog_mode)
self.check_override_rapid.SetValue(self.context.rapid_override)
self.text_rapid_x.SetValue(str(self.context.rapid_override_speed_x))
self.text_rapid_y.SetValue(str(self.context.rapid_override_speed_y))
self.check_fix_speeds.SetValue(self.context.fix_speeds)
self.check_scale_speed.SetValue(self.context.scale_speed_enabled)
self.text_speed_scale_amount.SetValue(str(self.context.scale_speed))
self.check_max_speed_vector.SetValue(self.context.max_speed_vector_enabled)
self.text_max_speed_vector.SetValue(str(self.context.max_speed_vector))
self.check_max_speed_raster.SetValue(self.context.max_speed_raster_enabled)
self.text_max_speed_raster.SetValue(str(self.context.max_speed_raster))
# Disables of features not yet supported.
self.text_max_speed_raster.Enable(False)
self.text_max_speed_vector.Enable(False)
self.text_speed_scale_amount.Enable(False)
self.check_max_speed_raster.Enable(False)
self.check_max_speed_vector.Enable(False)
self.check_scale_speed.Enable(False)
def pane_show(self):
pass
def pane_hide(self):
pass
def on_check_fix_speeds(self, event=None):
self.context.fix_speeds = self.check_fix_speeds.GetValue()
self.text_fix_rated_speed.SetValue(
"1.000" if self.context.fix_speeds else str(FIX_SPEEDS_RATIO)
)
def on_check_strict(self, event=None):
self.context.strict = self.check_strict.GetValue()
def on_check_autolock(self, event=None):
self.context.autolock = self.check_autolock.GetValue()
def on_check_pulse_shift(
self, event=None
): # wxGlade: LhystudiosDriver.<event_handler>
self.context.plot_shift = self.check_plot_shift.GetValue()
try:
self.context.plot_planner.force_shift = self.context.plot_shift
except (AttributeError, TypeError):
pass
def on_check_alt_raster(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.nse_raster = self.check_alternative_raster.GetValue()
def on_check_twitches(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.twitches = self.check_twitches.GetValue()
def on_check_rapid_between(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.opt_rapid_between = self.check_rapid_moves_between.GetValue()
def on_text_min_jog_distance(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.opt_jog_minimum = int(
self.text_minimum_jog_distance.GetValue()
)
except ValueError:
pass
def on_jog_method_radio(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.opt_jog_mode = self.radio_box_jog_method.GetSelection()
def on_check_override_rapid(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.check_override_rapid.SetValue(self.context.rapid_override)
def on_text_rapid_x(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.rapid_override_speed_x = float(self.text_rapid_x.GetValue())
except ValueError:
pass
def on_text_rapid_y(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.rapid_override_speed_y = float(self.text_rapid_y.GetValue())
except ValueError:
pass
def on_check_scale_speed(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.scale_speed_enabled = self.check_scale_speed.GetValue()
def on_text_speed_scale(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.scale_speed = float(self.text_speed_scale_amount.GetValue())
except ValueError:
pass
def on_check_max_speed_vector(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.max_speed_vector_enabled = self.check_max_speed_vector.GetValue()
def on_text_speed_max_vector(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.max_speed_vector = float(self.text_max_speed_vector.GetValue())
except ValueError:
pass
def on_check_max_speed_raster(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.max_speed_raster_enabled = self.check_max_speed_raster.GetValue()
def on_text_speed_max_raster(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.max_speed_raster = float(self.text_max_speed_raster.GetValue())
except ValueError:
pass
class LhystudiosDriverGui(MWindow):
def __init__(self, *args, **kwds):
super().__init__(374, 734, *args, **kwds)
self.context = self.context.device
_icon = wx.NullIcon
_icon.CopyFromBitmap(icons8_administrative_tools_50.GetBitmap())
self.SetIcon(_icon)
self.SetTitle(_(_("Lhystudios-Configuration")))
# self.notebook_main = wx.Notebook(self, wx.ID_ANY)
self.notebook_main = wx.aui.AuiNotebook(
self,
-1,
style=wx.aui.AUI_NB_TAB_EXTERNAL_MOVE
| wx.aui.AUI_NB_SCROLL_BUTTONS
| wx.aui.AUI_NB_TAB_SPLIT
| wx.aui.AUI_NB_TAB_MOVE,
)
self.ConfigurationPanel = ConfigurationInterfacePanel(
self.notebook_main, wx.ID_ANY, context=self.context
)
self.notebook_main.AddPage(self.ConfigurationPanel, _("Configuration"))
self.SetupPanel = ConfigurationSetupPanel(
self.notebook_main, wx.ID_ANY, context=self.context
)
self.notebook_main.AddPage(self.SetupPanel, _("Setup"))
self.Layout()
self.add_module_delegate(self.ConfigurationPanel)
self.add_module_delegate(self.SetupPanel)
def window_open(self):
self.SetupPanel.pane_show()
self.ConfigurationPanel.pane_show()
def window_close(self):
self.SetupPanel.pane_hide()
self.ConfigurationPanel.pane_hide()
def window_preserve(self):
return False
```
#### File: rotary/gui/gui.py
```python
ROTARY_VIEW = False
def plugin(kernel, lifecycle):
if lifecycle == "cli":
kernel.set_feature("rotary")
if lifecycle == "invalidate":
return not kernel.has_feature("wx")
if lifecycle == "register":
from meerk40t.gui.icons import icons8_roll_50
from meerk40t.rotary.gui.rotarysettings import RotarySettings
_ = kernel.translation
kernel.register("window/Rotary", RotarySettings)
kernel.register(
"button/config/Rotary",
{
"label": _("Rotary"),
"icon": icons8_roll_50,
"tip": _("Opens Rotary Window"),
"action": lambda v: kernel.console("window toggle Rotary\n"),
},
)
@kernel.console_command("rotaryview", help=_("Rotary View of Scene"))
def toggle_rotary_view(*args, **kwargs):
"""
Rotary Stretch/Unstretch of Scene based on values in rotary service
"""
global ROTARY_VIEW
rotary = kernel.rotary
if ROTARY_VIEW:
rotary(
"scene aspect {x} {y}\n".format(x=rotary.scale_x, y=rotary.scale_y)
)
else:
try:
rotary(
"scene aspect {ix} {iy}\n".format(
ix=1.0 / rotary.scale_x, iy=1.0 / rotary.scale_y
)
)
except ZeroDivisionError:
pass
ROTARY_VIEW = not ROTARY_VIEW
```
#### File: meerk40t/tools/clipper.py
```python
import math
from collections import namedtuple
horizontal = float("-inf")
class ClipType:
(Intersection, Union, Difference, Xor) = range(4)
class PolyType:
(Subject, Clip) = range(2)
class PolyFillType:
(EvenOdd, NonZero, Positive, Negative) = range(4)
class JoinType:
(Square, Round, Miter) = range(3)
class EndType:
(Closed, Butt, Square, Round) = range(4)
class EdgeSide:
(Left, Right) = range(2)
class Protects:
(Neither, Left, Right, Both) = range(4)
class Direction:
(LeftToRight, RightToLeft) = range(2)
Point = namedtuple("Point", "x y")
FloatPoint = namedtuple("FloatPoint", "x y")
Rect = namedtuple("FloatPoint", "left top right bottom")
class LocalMinima(object):
leftBound = rightBound = nextLm = None
def __init__(self, y, leftBound, rightBound):
self.y = y
self.leftBound = leftBound
self.rightBound = rightBound
class Scanbeam(object):
__slots__ = ("y", "nextSb")
def __init__(self, y, nextSb=None):
self.y = y
self.nextSb = nextSb
def __repr__(self):
s = "None"
if self.nextSb is not None:
s = "<obj>"
return "(y:%i, nextSb:%s)" % (self.y, s)
class IntersectNode(object):
__slots__ = ("e1", "e2", "pt", "nextIn")
def __init__(self, e1, e2, pt):
self.e1 = e1
self.e2 = e2
self.pt = pt
self.nextIn = None
class OutPt(object):
__slots__ = ("idx", "pt", "prevOp", "nextOp")
def __init__(self, idx, pt):
self.idx = idx
self.pt = pt
self.prevOp = None
self.nextOp = None
class OutRec(object):
__slots__ = ("idx", "bottomPt", "isHole", "FirstLeft", "pts", "PolyNode")
def __init__(self, idx):
self.idx = idx
self.bottomPt = None
self.isHole = False
self.FirstLeft = None
self.pts = None
self.PolyNode = None
class JoinRec(object):
__slots__ = ("pt1a", "pt1b", "poly1Idx", "pt2a", "pt2b", "poly2Idx")
class HorzJoin(object):
edge = None
savedIdx = 0
prevHj = None
nextHj = None
def __init__(self, edge, idx):
self.edge = edge
self.savedIdx = idx
# ===============================================================================
# Unit global functions ...
# ===============================================================================
def IntsToPoints(ints):
result = []
for i in range(0, len(ints), 2):
result.append(Point(ints[i], ints[i + 1]))
return result
def Area(polygon):
# see http://www.mathopenref.com/coordpolygonarea2.html
highI = len(polygon) - 1
A = (polygon[highI].x + polygon[0].x) * (polygon[0].y - polygon[highI].y)
for i in range(highI):
A += (polygon[i].x + polygon[i + 1].x) * (polygon[i + 1].y - polygon[i].y)
return float(A) / 2
def Orientation(polygon):
return Area(polygon) > 0.0
# ===============================================================================
# PolyNode & PolyTree classes (+ ancilliary functions)
# ===============================================================================
class PolyNode(object):
"""Node of PolyTree"""
def __init__(self):
self.Contour = []
self.Childs = []
self.Parent = None
self.Index = 0
self.ChildCount = 0
def IsHole(self):
result = True
while self.Parent is not None:
result = not result
self.Parent = self.Parent.Parent
return result
def GetNext(self):
if self.ChildCount > 0:
return self.Childs[0]
else:
return self._GetNextSiblingUp()
def _AddChild(self, node):
self.Childs.append(node)
node.Index = self.ChildCount
node.Parent = self
self.ChildCount += 1
def _GetNextSiblingUp(self):
if self.Parent is None:
return None
elif self.Index == self.Parent.ChildCount - 1:
return self.Parent._GetNextSiblingUp()
else:
return self.Parent.Childs[self.Index + 1]
class PolyTree(PolyNode):
"""Container for PolyNodes"""
def __init__(self):
PolyNode.__init__(self)
self._AllNodes = []
def Clear(self):
self._AllNodes = []
self.Childs = []
self.ChildCount = 0
def GetFirst(self):
if self.ChildCount > 0:
return self.Childs[0]
else:
return None
def Total(self):
return len(self._AllNodes)
def _AddPolyNodeToPolygons(polynode, polygons):
"""Internal function for PolyTreeToPolygons()"""
if len(polynode.Contour) > 0:
polygons.append(polynode.Contour)
for i in range(polynode.ChildCount):
_AddPolyNodeToPolygons(polynode.Childs[i], polygons)
def PolyTreeToPolygons(polyTree):
result = []
_AddPolyNodeToPolygons(polyTree, result)
return result
# ===============================================================================
# Edge class
# ===============================================================================
class Edge(object):
def __init__(self):
self.Bot = Point(0, 0)
self.Curr = Point(0, 0)
self.Top = Point(0, 0)
self.Delta = Point(0, 0)
self.dx = float(0.0)
self.polyType = PolyType.Subject
self.side = EdgeSide.Left
self.windDelta, self.windCnt, self.windCnt2 = 0, 0, 0
self.outIdx = -1
self.nextE, self.prevE, self.nextInLML = None, None, None
self.prevInAEL, self.nextInAEL, self.prevInSEL, self.nextInSEL = (
None,
None,
None,
None,
)
def __repr__(self):
return "(%i,%i . %i,%i {dx:%0.2f} %i {%x})" % (
self.Bot.x,
self.Bot.y,
self.Top.x,
self.Top.y,
self.dx,
self.outIdx,
id(self),
)
# ===============================================================================
# ClipperBase class (+ data structs & ancilliary functions)
# ===============================================================================
def _PointsEqual(pt1, pt2):
return (pt1.x == pt2.x) and (pt1.y == pt2.y)
def _SlopesEqual(pt1, pt2, pt3, pt4=None):
if pt4 is None:
return (pt1.y - pt2.y) * (pt2.x - pt3.x) == (pt1.x - pt2.x) * (pt2.y - pt3.y)
else:
return (pt1.y - pt2.y) * (pt3.x - pt4.x) == (pt1.x - pt2.x) * (pt3.y - pt4.y)
def _SlopesEqual2(e1, e2):
return e1.Delta.y * e2.Delta.x == e1.Delta.x * e2.Delta.y
def _SetDx(e):
e.Delta = Point(e.Top.x - e.Bot.x, e.Top.y - e.Bot.y)
if e.Delta.y == 0:
e.dx = horizontal
else:
e.dx = float(e.Delta.x) / float(e.Delta.y)
def _SwapSides(e1, e2):
side = e1.side
e1.side = e2.side
e2.side = side
def _SwapPolyIndexes(e1, e2):
idx = e1.outIdx
e1.outIdx = e2.outIdx
e2.outIdx = idx
def _InitEdge(e, eNext, ePrev, pt, polyType):
e.nextE = eNext
e.prevE = ePrev
e.Curr = pt
if e.Curr.y >= e.nextE.Curr.y:
e.Bot = e.Curr
e.Top = e.nextE.Curr
e.windDelta = 1
else:
e.Top = e.Curr
e.Bot = e.nextE.Curr
e.windDelta = -1
_SetDx(e)
e.outIdx = -1
e.PolyType = polyType
def _SwapX(e):
e.Curr = Point(e.Top.x, e.Curr.y)
e.Top = Point(e.Bot.x, e.Top.y)
e.Bot = Point(e.Curr.x, e.Bot.y)
class ClipperBase(object):
def __init__(self):
self._EdgeList = [] # 2D array
self._LocalMinList = None # single-linked list of LocalMinima
self._CurrentLocMin = None
def _InsertLocalMinima(self, lm):
if self._LocalMinList is None:
self._LocalMinList = lm
elif lm.y >= self._LocalMinList.y:
lm.nextLm = self._LocalMinList
self._LocalMinList = lm
else:
tmp = self._LocalMinList
while tmp.nextLm is not None and lm.y < tmp.nextLm.y:
tmp = tmp.nextLm
lm.nextLm = tmp.nextLm
tmp.nextLm = lm
def _AddBoundsToLML(self, e):
e.nextInLML = None
e = e.nextE
while True:
if e.dx == horizontal:
if (e.nextE.Top.y < e.Top.y) and (e.nextE.Bot.x > e.prevE.Bot.x):
break
if e.Top.x != e.prevE.Bot.x:
_SwapX(e)
e.nextInLML = e.prevE
elif e.Bot.y == e.prevE.Bot.y:
break
else:
e.nextInLML = e.prevE
e = e.nextE
if e.dx == horizontal:
if e.Bot.x != e.prevE.Bot.x:
_SwapX(e)
lm = LocalMinima(e.prevE.Bot.y, e.prevE, e)
elif e.dx < e.prevE.dx:
lm = LocalMinima(e.prevE.Bot.y, e.prevE, e)
else:
lm = LocalMinima(e.prevE.Bot.y, e, e.prevE)
lm.leftBound.side = EdgeSide.Left
lm.rightBound.side = EdgeSide.Right
self._InsertLocalMinima(lm)
while True:
if e.nextE.Top.y == e.Top.y and e.nextE.dx != horizontal:
break
e.nextInLML = e.nextE
e = e.nextE
if e.dx == horizontal and e.Bot.x != e.prevE.Top.x:
_SwapX(e)
return e.nextE
def _Reset(self):
lm = self._LocalMinList
if lm is not None:
self._CurrentLocMin = lm
while lm is not None:
e = lm.leftBound
while e is not None:
e.Curr = e.Bot
e.side = EdgeSide.Left
e.outIdx = -1
e = e.nextInLML
e = lm.rightBound
while e is not None:
e.Curr = e.Bot
e.side = EdgeSide.Right
e.outIdx = -1
e = e.nextInLML
lm = lm.nextLm
def AddPolygon(self, polygon, polyType):
ln = len(polygon)
if ln < 3:
return False
pg = polygon[:]
j = 0
# remove duplicate points and co-linear points
for i in range(1, len(polygon)):
if _PointsEqual(pg[j], polygon[i]):
continue
elif (j > 0) and _SlopesEqual(pg[j - 1], pg[j], polygon[i]):
if _PointsEqual(pg[j - 1], polygon[i]):
j -= 1
else:
j += 1
pg[j] = polygon[i]
if j < 2:
return False
# remove duplicate points and co-linear edges at the loop around
# of the start and end coordinates ...
ln = j + 1
while ln > 2:
if _PointsEqual(pg[j], pg[0]):
j -= 1
elif _PointsEqual(pg[0], pg[1]) or _SlopesEqual(pg[j], pg[0], pg[1]):
pg[0] = pg[j]
j -= 1
elif _SlopesEqual(pg[j - 1], pg[j], pg[0]):
j -= 1
elif _SlopesEqual(pg[0], pg[1], pg[2]):
for i in range(2, j + 1):
pg[i - 1] = pg[i]
j -= 1
else:
break
ln -= 1
if ln < 3:
return False
edges = []
for i in range(ln):
edges.append(Edge())
edges[0].Curr = pg[0]
_InitEdge(edges[ln - 1], edges[0], edges[ln - 2], pg[ln - 1], polyType)
for i in range(ln - 2, 0, -1):
_InitEdge(edges[i], edges[i + 1], edges[i - 1], pg[i], polyType)
_InitEdge(edges[0], edges[1], edges[ln - 1], pg[0], polyType)
e = edges[0]
eHighest = e
while True:
e.Curr = e.Bot
if e.Top.y < eHighest.Top.y:
eHighest = e
e = e.nextE
if e == edges[0]:
break
# make sure eHighest is positioned so the following loop works safely ...
if eHighest.windDelta > 0:
eHighest = eHighest.nextE
if eHighest.dx == horizontal:
eHighest = eHighest.nextE
# finally insert each local minima ...
e = eHighest
while True:
e = self._AddBoundsToLML(e)
if e == eHighest:
break
self._EdgeList.append(edges)
def AddPolygons(self, polygons, polyType):
result = False
for p in polygons:
if self.AddPolygon(p, polyType):
result = True
return result
def Clear(self):
self._EdgeList = []
self._LocalMinList = None
self._CurrentLocMin = None
def _PopLocalMinima(self):
if self._CurrentLocMin is not None:
self._CurrentLocMin = self._CurrentLocMin.nextLm
# ===============================================================================
# Clipper class (+ data structs & ancilliary functions)
# ===============================================================================
def _IntersectPoint(edge1, edge2):
if _SlopesEqual2(edge1, edge2):
if edge2.Bot.y > edge1.Bot.y:
y = edge2.Bot.y
else:
y = edge1.Bot.y
return Point(0, y), False
if edge1.dx == 0:
x = edge1.Bot.x
if edge2.dx == horizontal:
y = edge2.Bot.y
else:
b2 = edge2.Bot.y - float(edge2.Bot.x) / edge2.dx
y = round(float(x) / edge2.dx + b2)
elif edge2.dx == 0:
x = edge2.Bot.x
if edge1.dx == horizontal:
y = edge1.Bot.y
else:
b1 = edge1.Bot.y - float(edge1.Bot.x) / edge1.dx
y = round(float(x) / edge1.dx + b1)
else:
b1 = float(edge1.Bot.x) - float(edge1.Bot.y) * edge1.dx
b2 = float(edge2.Bot.x) - float(edge2.Bot.y) * edge2.dx
m = (b2 - b1) / (edge1.dx - edge2.dx)
y = round(m)
if math.fabs(edge1.dx) < math.fabs(edge2.dx):
x = round(edge1.dx * m + b1)
else:
x = round(edge2.dx * m + b2)
if (y < edge1.Top.y) or (y < edge2.Top.y):
if edge1.Top.y > edge2.Top.y:
return edge1.Top, _TopX(edge2, edge1.Top.y) < edge1.Top.x
else:
return edge2.Top, _TopX(edge1, edge2.Top.y) > edge2.Top.x
else:
return Point(x, y), True
def _TopX(e, currentY):
if currentY == e.Top.y:
return e.Top.x
elif e.Top.x == e.Bot.x:
return e.Bot.x
else:
return e.Bot.x + round(e.dx * float(currentY - e.Bot.y))
def _E2InsertsBeforeE1(e1, e2):
if e2.Curr.x == e1.Curr.x:
if e2.Top.y > e1.Top.y:
return e2.Top.x < _TopX(e1, e2.Top.y)
return e1.Top.x > _TopX(e2, e1.Top.y)
else:
return e2.Curr.x < e1.Curr.x
def _IsMinima(e):
return e is not None and e.prevE.nextInLML != e and e.nextE.nextInLML != e
def _IsMaxima(e, y):
return e is not None and e.Top.y == y and e.nextInLML is None
def _IsIntermediate(e, y):
return e.Top.y == y and e.nextInLML is not None
def _GetMaximaPair(e):
if not _IsMaxima(e.nextE, e.Top.y) or e.nextE.Top.x != e.Top.x:
return e.prevE
else:
return e.nextE
def _GetnextInAEL(e, direction):
if direction == Direction.LeftToRight:
return e.nextInAEL
else:
return e.prevInAEL
def _ProtectLeft(val):
if val:
return Protects.Both
else:
return Protects.Right
def _ProtectRight(val):
if val:
return Protects.Both
else:
return Protects.Left
def _GetDx(pt1, pt2):
if pt1.y == pt2.y:
return horizontal
else:
return float(pt2.x - pt1.x) / float(pt2.y - pt1.y)
def _Param1RightOfParam2(outRec1, outRec2):
while outRec1 is not None:
outRec1 = outRec1.FirstLeft
if outRec1 == outRec2:
return True
return False
def _FirstParamIsbottomPt(btmPt1, btmPt2):
p = btmPt1.prevOp
while _PointsEqual(p.pt, btmPt1.pt) and (p != btmPt1):
p = p.prevOp
dx1p = abs(_GetDx(btmPt1.pt, p.pt))
p = btmPt1.nextOp
while _PointsEqual(p.pt, btmPt1.pt) and (p != btmPt1):
p = p.nextOp
dx1n = abs(_GetDx(btmPt1.pt, p.pt))
p = btmPt2.prevOp
while _PointsEqual(p.pt, btmPt2.pt) and (p != btmPt2):
p = p.prevOp
dx2p = abs(_GetDx(btmPt2.pt, p.pt))
p = btmPt2.nextOp
while _PointsEqual(p.pt, btmPt2.pt) and (p != btmPt2):
p = p.nextOp
dx2n = abs(_GetDx(btmPt2.pt, p.pt))
return (dx1p >= dx2p and dx1p >= dx2n) or (dx1n >= dx2p and dx1n >= dx2n)
def _GetBottomPt(pp):
dups = None
p = pp.nextOp
while p != pp:
if p.pt.y > pp.pt.y:
pp = p
dups = None
elif p.pt.y == pp.pt.y and p.pt.x <= pp.pt.x:
if p.pt.x < pp.pt.x:
dups = None
pp = p
else:
if p.nextOp != pp and p.prevOp != pp:
dups = p
p = p.nextOp
if dups is not None:
while dups != p:
if not _FirstParamIsbottomPt(p, dups):
pp = dups
dups = dups.nextOp
while not _PointsEqual(dups.pt, pp.pt):
dups = dups.nextOp
return pp
def _GetLowermostRec(outRec1, outRec2):
if outRec1.bottomPt is None:
outPt1 = _GetBottomPt(outRec1.pts)
else:
outPt1 = outRec1.bottomPt
if outRec2.bottomPt is None:
outPt2 = _GetBottomPt(outRec2.pts)
else:
outPt2 = outRec2.bottomPt
if outPt1.pt.y > outPt2.pt.y:
return outRec1
elif outPt1.pt.y < outPt2.pt.y:
return outRec2
elif outPt1.pt.x < outPt2.pt.x:
return outRec1
elif outPt1.pt.x > outPt2.pt.x:
return outRec2
elif outPt1.nextOp == outPt1:
return outRec2
elif outPt2.nextOp == outPt2:
return outRec1
elif _FirstParamIsbottomPt(outPt1, outPt2):
return outRec1
else:
return outRec2
def _SetHoleState(e, outRec, polyOutList):
isHole = False
e2 = e.prevInAEL
while e2 is not None:
if e2.outIdx >= 0:
isHole = not isHole
if outRec.FirstLeft is None:
outRec.FirstLeft = polyOutList[e2.outIdx]
e2 = e2.prevInAEL
outRec.isHole = isHole
def _PointCount(pts):
if pts is None:
return 0
p = pts
result = 0
while True:
result += 1
p = p.nextOp
if p == pts:
break
return result
def _PointIsVertex(pt, outPts):
op = outPts
while True:
if _PointsEqual(op.pt, pt):
return True
op = op.nextOp
if op == outPts:
break
return False
def _ReversePolyPtLinks(pp):
if pp is None:
return
pp1 = pp
while True:
pp2 = pp1.nextOp
pp1.nextOp = pp1.prevOp
pp1.prevOp = pp2
pp1 = pp2
if pp1 == pp:
break
def _FixupOutPolygon(outRec):
lastOK = None
outRec.bottomPt = None
pp = outRec.pts
while True:
if pp.prevOp == pp or pp.nextOp == pp.prevOp:
outRec.pts = None
return
if _PointsEqual(pp.pt, pp.nextOp.pt) or _SlopesEqual(
pp.prevOp.pt, pp.pt, pp.nextOp.pt
):
lastOK = None
pp.prevOp.nextOp = pp.nextOp
pp.nextOp.prevOp = pp.prevOp
pp = pp.prevOp
elif pp == lastOK:
break
else:
if lastOK is None:
lastOK = pp
pp = pp.nextOp
outRec.pts = pp
def _FixHoleLinkage(outRec):
if outRec.FirstLeft is None or (
outRec.isHole != outRec.FirstLeft.isHole and outRec.FirstLeft.pts is not None
):
return
orfl = outRec.FirstLeft
while orfl is not None and (orfl.isHole == outRec.isHole or orfl.pts is None):
orfl = orfl.FirstLeft
outRec.FirstLeft = orfl
def _GetOverlapSegment(pt1a, pt1b, pt2a, pt2b):
# precondition: segments are co-linear
if abs(pt1a.x - pt1b.x) > abs(pt1a.y - pt1b.y):
if pt1a.x > pt1b.x:
tmp = pt1a
pt1a = pt1b
pt1b = tmp
if pt2a.x > pt2b.x:
tmp = pt2a
pt2a = pt2b
pt2b = tmp
if pt1a.x > pt2a.x:
pt1 = pt1a
else:
pt1 = pt2a
if pt1b.x < pt2b.x:
pt2 = pt1b
else:
pt2 = pt2b
return pt1, pt2, pt1.x < pt2.x
else:
if pt1a.y < pt1b.y:
tmp = pt1a
pt1a = pt1b
pt1b = tmp
if pt2a.y < pt2b.y:
tmp = pt2a
pt2a = pt2b
pt2b = tmp
if pt1a.y < pt2a.y:
pt1 = pt1a
else:
pt1 = pt2a
if pt1b.y > pt2b.y:
pt2 = pt1b
else:
pt2 = pt2b
return pt1, pt2, pt1.y > pt2.y
def _FindSegment(outPt, pt1, pt2):
if outPt is None:
return outPt, pt1, pt2, False
pt1a = pt1
pt2a = pt2
outPt2 = outPt
while True:
if _SlopesEqual(pt1a, pt2a, outPt.pt, outPt.prevOp.pt) and _SlopesEqual(
pt1a, pt2a, outPt.pt
):
pt1, pt2, overlap = _GetOverlapSegment(
pt1a, pt2a, outPt.pt, outPt.prevOp.pt
)
if overlap:
return outPt, pt1, pt2, True
outPt = outPt.nextOp
if outPt == outPt2:
return outPt, pt1, pt2, False
def _Pt3IsBetweenPt1AndPt2(pt1, pt2, pt3):
if _PointsEqual(pt1, pt3) or _PointsEqual(pt2, pt3):
return True
elif pt1.x != pt2.x:
return (pt1.x < pt3.x) == (pt3.x < pt2.x)
else:
return (pt1.y < pt3.y) == (pt3.y < pt2.y)
def _InsertPolyPtBetween(outPt1, outPt2, pt):
if outPt1 == outPt2:
raise Exception("JoinError")
result = OutPt(outPt1.idx, pt)
if outPt2 == outPt1.nextOp:
outPt1.nextOp = result
outPt2.prevOp = result
result.nextOp = outPt2
result.prevOp = outPt1
else:
outPt2.nextOp = result
outPt1.prevOp = result
result.nextOp = outPt1
result.prevOp = outPt2
return result
def _PointOnLineSegment(pt, linePt1, linePt2):
return (
((pt.x == linePt1.x) and (pt.y == linePt1.y))
or ((pt.x == linePt2.x) and (pt.y == linePt2.y))
or (
((pt.x > linePt1.x) == (pt.x < linePt2.x))
and ((pt.y > linePt1.y) == (pt.y < linePt2.y))
and (
(pt.x - linePt1.x) * (linePt2.y - linePt1.y)
== (linePt2.x - linePt1.x) * (pt.y - linePt1.y)
)
)
)
def _PointOnPolygon(pt, pp):
pp2 = pp
while True:
if _PointOnLineSegment(pt, pp2.pt, pp2.nextOp.pt):
return True
pp2 = pp2.nextOp
if pp2 == pp:
return False
def _PointInPolygon(pt, outPt):
result = False
outPt2 = outPt
while True:
if (
((outPt2.pt.y <= pt.y) and (pt.y < outPt2.prevOp.pt.y))
or ((outPt2.prevOp.pt.y <= pt.y) and (pt.y < outPt2.pt.y))
) and (
pt.x
< (outPt2.prevOp.pt.x - outPt2.pt.x)
* (pt.y - outPt2.pt.y)
/ (outPt2.prevOp.pt.y - outPt2.pt.y)
+ outPt2.pt.x
):
result = not result
outPt2 = outPt2.nextOp
if outPt2 == outPt:
break
def _Poly2ContainsPoly1(outPt1, outPt2):
pt = outPt1
if _PointOnPolygon(pt.pt, outPt2):
pt = pt.nextOp
while pt != outPt1 and _PointOnPolygon(pt.pt, outPt2):
pt = pt.nextOp
if pt == outPt1:
return True
return _PointInPolygon(pt.pt, outPt2)
def _EdgesAdjacent(inode):
return (inode.e1.nextInSEL == inode.e2) or (inode.e1.prevInSEL == inode.e2)
def _UpdateOutPtIdxs(outrec):
op = outrec.pts
while True:
op.idx = outrec.idx
op = op.prevOp
if op == outrec.pts:
break
class Clipper(ClipperBase):
def __init__(self):
ClipperBase.__init__(self)
self.ReverseSolution = False
self.ForceSimple = False
self._PolyOutList = []
self._ClipType = ClipType.Intersection
self._Scanbeam = None
self._ActiveEdges = None
self._SortedEdges = None
self._IntersectNodes = None
self._ClipFillType = PolyFillType.EvenOdd
self._SubjFillType = PolyFillType.EvenOdd
self._ExecuteLocked = False
self._UsingPolyTree = False
self._JoinList = None
self._HorzJoins = None
def _Reset(self):
ClipperBase._Reset(self)
self._Scanbeam = None
self._PolyOutList = []
lm = self._LocalMinList
while lm is not None:
self._InsertScanbeam(lm.y)
lm = lm.nextLm
def Clear(self):
self._PolyOutList = []
ClipperBase.Clear(self)
def _InsertScanbeam(self, y):
if self._Scanbeam is None:
self._Scanbeam = Scanbeam(y)
elif y > self._Scanbeam.y:
self._Scanbeam = Scanbeam(y, self._Scanbeam)
else:
sb = self._Scanbeam
while sb.nextSb is not None and y <= sb.nextSb.y:
sb = sb.nextSb
if y == sb.y:
return
newSb = Scanbeam(y, sb.nextSb)
sb.nextSb = newSb
def _PopScanbeam(self):
result = self._Scanbeam.y
self._Scanbeam = self._Scanbeam.nextSb
return result
def _SetWindingCount(self, edge):
e = edge.prevInAEL
while e is not None and e.PolyType != edge.PolyType:
e = e.prevInAEL
if e is None:
edge.windCnt = edge.windDelta
edge.windCnt2 = 0
e = self._ActiveEdges
elif self._IsEvenOddFillType(edge):
edge.windCnt = 1
edge.windCnt2 = e.windCnt2
e = e.nextInAEL
else:
if e.windCnt * e.windDelta < 0:
if abs(e.windCnt) > 1:
if e.windDelta * edge.windDelta < 0:
edge.windCnt = e.windCnt
else:
edge.windCnt = e.windCnt + edge.windDelta
else:
edge.windCnt = e.windCnt + e.windDelta + edge.windDelta
elif (abs(e.windCnt) > 1) and (e.windDelta * edge.windDelta < 0):
edge.windCnt = e.windCnt
elif e.windCnt + edge.windDelta == 0:
edge.windCnt = e.windCnt
else:
edge.windCnt = e.windCnt + edge.windDelta
edge.windCnt2 = e.windCnt2
e = e.nextInAEL
# update windCnt2 ...
if self._IsEvenOddAltFillType(edge):
while e != edge:
if edge.windCnt2 == 0:
edge.windCnt2 = 1
else:
edge.windCnt2 = 0
e = e.nextInAEL
else:
while e != edge:
edge.windCnt2 += e.windDelta
e = e.nextInAEL
def _IsEvenOddFillType(self, edge):
if edge.PolyType == PolyType.Subject:
return self._SubjFillType == PolyFillType.EvenOdd
else:
return self._ClipFillType == PolyFillType.EvenOdd
def _IsEvenOddAltFillType(self, edge):
if edge.PolyType == PolyType.Subject:
return self._ClipFillType == PolyFillType.EvenOdd
else:
return self._SubjFillType == PolyFillType.EvenOdd
def _IsContributing(self, edge):
if edge.PolyType == PolyType.Subject:
pft = self._SubjFillType
pft2 = self._ClipFillType
else:
pft = self._ClipFillType
pft2 = self._SubjFillType
if pft == PolyFillType.EvenOdd or pft == PolyFillType.NonZero:
if abs(edge.windCnt) != 1:
return False
elif pft == PolyFillType.Positive:
if edge.windCnt != 1:
return False
elif pft == PolyFillType.Negative:
if edge.windCnt != -1:
return False
if self._ClipType == ClipType.Intersection: ###########
if pft2 == PolyFillType.EvenOdd or pft2 == PolyFillType.NonZero:
return edge.windCnt2 != 0
elif pft2 == PolyFillType.Positive:
return edge.windCnt2 > 0
else:
return edge.windCnt2 < 0 # Negative
elif self._ClipType == ClipType.Union: ###########
if pft2 == PolyFillType.EvenOdd or pft2 == PolyFillType.NonZero:
return edge.windCnt2 == 0
elif pft2 == PolyFillType.Positive:
return edge.windCnt2 <= 0
else:
return edge.windCnt2 >= 0 # Negative
elif self._ClipType == ClipType.Difference: ###########
if edge.PolyType == PolyType.Subject:
if pft2 == PolyFillType.EvenOdd or pft2 == PolyFillType.NonZero:
return edge.windCnt2 == 0
elif edge.PolyType == PolyFillType.Positive:
return edge.windCnt2 <= 0
else:
return edge.windCnt2 >= 0
else:
if pft2 == PolyFillType.EvenOdd or pft2 == PolyFillType.NonZero:
return edge.windCnt2 != 0
elif pft2 == PolyFillType.Positive:
return edge.windCnt2 > 0
else:
return edge.windCnt2 < 0
else: # self._ClipType == ClipType.XOR: ###########
return True
def _AddEdgeToSEL(self, edge):
if self._SortedEdges is None:
self._SortedEdges = edge
edge.prevInSEL = None
edge.nextInSEL = None
else:
# add edge to front of list ...
edge.nextInSEL = self._SortedEdges
edge.prevInSEL = None
self._SortedEdges.prevInSEL = edge
self._SortedEdges = edge
def _CopyAELToSEL(self):
e = self._ActiveEdges
self._SortedEdges = e
while e is not None:
e.prevInSEL = e.prevInAEL
e.nextInSEL = e.nextInAEL
e = e.nextInAEL
def _InsertEdgeIntoAEL(self, edge):
edge.prevInAEL = None
edge.nextInAEL = None
if self._ActiveEdges is None:
self._ActiveEdges = edge
elif _E2InsertsBeforeE1(self._ActiveEdges, edge):
edge.nextInAEL = self._ActiveEdges
self._ActiveEdges.prevInAEL = edge
self._ActiveEdges = edge
else:
e = self._ActiveEdges
while e.nextInAEL is not None and not _E2InsertsBeforeE1(e.nextInAEL, edge):
e = e.nextInAEL
edge.nextInAEL = e.nextInAEL
if e.nextInAEL is not None:
e.nextInAEL.prevInAEL = edge
edge.prevInAEL = e
e.nextInAEL = edge
def _InsertLocalMinimaIntoAEL(self, botY):
while self._CurrentLocMin is not None and self._CurrentLocMin.y == botY:
lb = self._CurrentLocMin.leftBound
rb = self._CurrentLocMin.rightBound
self._InsertEdgeIntoAEL(lb)
self._InsertScanbeam(lb.Top.y)
self._InsertEdgeIntoAEL(rb)
if self._IsEvenOddFillType(lb):
lb.windDelta = 1
rb.windDelta = 1
else:
rb.windDelta = -lb.windDelta
self._SetWindingCount(lb)
rb.windCnt = lb.windCnt
rb.windCnt2 = lb.windCnt2
if rb.dx == horizontal:
self._AddEdgeToSEL(rb)
self._InsertScanbeam(rb.nextInLML.Top.y)
else:
self._InsertScanbeam(rb.Top.y)
if self._IsContributing(lb):
self._AddLocalMinPoly(lb, rb, Point(lb.Curr.x, self._CurrentLocMin.y))
if rb.outIdx >= 0 and rb.dx == horizontal and self._HorzJoins is not None:
hj = self._HorzJoins
while True:
dummy1, dummy2, overlap = _GetOverlapSegment(
hj.edge.Bot, hj.edge.Top, rb.Bot, rb.Top
)
if overlap:
self._AddJoin(hj.edge, rb, hj.savedIdx)
hj = hj.nextHj
if hj == self._HorzJoins:
break
if lb.nextInAEL != rb:
if (
rb.outIdx >= 0
and rb.prevInAEL.outIdx >= 0
and _SlopesEqual2(rb.prevInAEL, rb)
):
self._AddJoin(rb, rb.prevInAEL)
e = lb.nextInAEL
pt = lb.Curr
while e != rb:
self._IntersectEdges(rb, e, pt)
e = e.nextInAEL
self._PopLocalMinima()
def _SwapPositionsInAEL(self, e1, e2):
if e1.nextInAEL == e2:
nextE = e2.nextInAEL
if nextE is not None:
nextE.prevInAEL = e1
prevE = e1.prevInAEL
if prevE is not None:
prevE.nextInAEL = e2
e2.prevInAEL = prevE
e2.nextInAEL = e1
e1.prevInAEL = e2
e1.nextInAEL = nextE
elif e2.nextInAEL == e1:
nextE = e1.nextInAEL
if nextE is not None:
nextE.prevInAEL = e2
prevE = e2.prevInAEL
if prevE is not None:
prevE.nextInAEL = e1
e1.prevInAEL = prevE
e1.nextInAEL = e2
e2.prevInAEL = e1
e2.nextInAEL = nextE
else:
nextE = e1.nextInAEL
prevE = e1.prevInAEL
e1.nextInAEL = e2.nextInAEL
if e1.nextInAEL is not None:
e1.nextInAEL.prevInAEL = e1
e1.prevInAEL = e2.prevInAEL
if e1.prevInAEL is not None:
e1.prevInAEL.nextInAEL = e1
e2.nextInAEL = nextE
if e2.nextInAEL is not None:
e2.nextInAEL.prevInAEL = e2
e2.prevInAEL = prevE
if e2.prevInAEL is not None:
e2.prevInAEL.nextInAEL = e2
if e1.prevInAEL is None:
self._ActiveEdges = e1
elif e2.prevInAEL is None:
self._ActiveEdges = e2
def _SwapPositionsInSEL(self, e1, e2):
if e1.nextInSEL == e2:
nextE = e2.nextInSEL
if nextE is not None:
nextE.prevInSEL = e1
prevE = e1.prevInSEL
if prevE is not None:
prevE.nextInSEL = e2
e2.prevInSEL = prevE
e2.nextInSEL = e1
e1.prevInSEL = e2
e1.nextInSEL = nextE
elif e2.nextInSEL == e1:
nextE = e1.nextInSEL
if nextE is not None:
nextE.prevInSEL = e2
prevE = e2.prevInSEL
if prevE is not None:
prevE.nextInSEL = e1
e1.prevInSEL = prevE
e1.nextInSEL = e2
e2.prevInSEL = e1
e2.nextInSEL = nextE
else:
nextE = e1.nextInSEL
prevE = e1.prevInSEL
e1.nextInSEL = e2.nextInSEL
e1.nextInSEL = e2.nextInSEL
if e1.nextInSEL is not None:
e1.nextInSEL.prevInSEL = e1
e1.prevInSEL = e2.prevInSEL
if e1.prevInSEL is not None:
e1.prevInSEL.nextInSEL = e1
e2.nextInSEL = nextE
if e2.nextInSEL is not None:
e2.nextInSEL.prevInSEL = e2
e2.prevInSEL = prevE
if e2.prevInSEL is not None:
e2.prevInSEL.nextInSEL = e2
if e1.prevInSEL is None:
self._SortedEdges = e1
elif e2.prevInSEL is None:
self._SortedEdges = e2
def _IsTopHorz(self, xPos):
e = self._SortedEdges
while e is not None:
if (xPos >= min(e.Curr.x, e.Top.x)) and (xPos <= max(e.Curr.x, e.Top.x)):
return False
e = e.nextInSEL
return True
def _ProcessHorizontal(self, horzEdge):
if horzEdge.Curr.x < horzEdge.Top.x:
horzLeft = horzEdge.Curr.x
horzRight = horzEdge.Top.x
direction = Direction.LeftToRight
else:
horzLeft = horzEdge.Top.x
horzRight = horzEdge.Curr.x
direction = Direction.RightToLeft
eMaxPair = None
if horzEdge.nextInLML is None:
eMaxPair = _GetMaximaPair(horzEdge)
e = _GetnextInAEL(horzEdge, direction)
while e is not None:
if (e.Curr.x == horzEdge.Top.x) and eMaxPair is None:
if _SlopesEqual2(e, horzEdge.nextInLML):
if horzEdge.outIdx >= 0 and e.outIdx >= 0:
self._AddJoin(horzEdge.nextInLML, e, horzEdge.outIdx)
break
elif e.dx < horzEdge.nextInLML.dx:
break
eNext = _GetnextInAEL(e, direction)
if (
eMaxPair is not None
or ((direction == Direction.LeftToRight) and (e.Curr.x < horzRight))
or ((direction == Direction.RightToLeft) and (e.Curr.x > horzLeft))
):
if e == eMaxPair:
if direction == Direction.LeftToRight:
self._IntersectEdges(
horzEdge, e, Point(e.Curr.x, horzEdge.Curr.y)
)
else:
self._IntersectEdges(
e, horzEdge, Point(e.Curr.x, horzEdge.Curr.y)
)
return
elif e.dx == horizontal and not _IsMinima(e) and e.Curr.x <= e.Top.x:
if direction == Direction.LeftToRight:
self._IntersectEdges(
horzEdge,
e,
Point(e.Curr.x, horzEdge.Curr.y),
_ProtectRight(not self._IsTopHorz(e.Curr.x)),
)
else:
self._IntersectEdges(
e,
horzEdge,
Point(e.Curr.x, horzEdge.Curr.y),
_ProtectLeft(not self._IsTopHorz(e.Curr.x)),
)
elif direction == Direction.LeftToRight:
self._IntersectEdges(
horzEdge,
e,
Point(e.Curr.x, horzEdge.Curr.y),
_ProtectRight(not self._IsTopHorz(e.Curr.x)),
)
else:
self._IntersectEdges(
e,
horzEdge,
Point(e.Curr.x, horzEdge.Curr.y),
_ProtectLeft(not self._IsTopHorz(e.Curr.x)),
)
self._SwapPositionsInAEL(horzEdge, e)
elif (direction == Direction.LeftToRight and e.Curr.x >= horzRight) or (
direction == Direction.RightToLeft and e.Curr.x <= horzLeft
):
break
e = eNext
if horzEdge.nextInLML is not None:
if horzEdge.outIdx >= 0:
self._AddOutPt(horzEdge, horzEdge.Top)
self._UpdateEdgeIntoAEL(horzEdge)
else:
if horzEdge.outIdx >= 0:
self._IntersectEdges(
horzEdge,
eMaxPair,
Point(horzEdge.Top.x, horzEdge.Curr.y),
Protects.Both,
)
if eMaxPair.outIdx >= 0:
raise Exception("Clipper: Horizontal Error")
self._DeleteFromAEL(eMaxPair)
self._DeleteFromAEL(horzEdge)
def _ProcessHorizontals(self):
while self._SortedEdges is not None:
e = self._SortedEdges
self._DeleteFromSEL(e)
self._ProcessHorizontal(e)
def _AddJoin(self, e1, e2, e1OutIdx=-1, e2OutIdx=-1):
jr = JoinRec()
if e1OutIdx >= 0:
jr.poly1Idx = e1OutIdx
else:
jr.poly1Idx = e1.outIdx
jr.pt1a = e1.Curr
jr.pt1b = e1.Top
if e2OutIdx >= 0:
jr.poly2Idx = e2OutIdx
else:
jr.poly2Idx = e2.outIdx
jr.pt2a = e2.Curr
jr.pt2b = e2.Top
if self._JoinList is None:
self._JoinList = []
self._JoinList.append(jr)
def _FixupJoinRecs(self, jr, outPt, startIdx):
for i in range(startIdx, len(self._JoinList)):
jr2 = self._JoinList[i]
if jr2.poly1Idx == jr.poly1Idx and _PointIsVertex(jr2.pt1a, outPt):
jr2.poly1Idx = jr.poly2Idx
if jr2.poly2Idx == jr.poly1Idx and _PointIsVertex(jr2.pt2a, outPt):
jr2.poly2Idx = jr.poly2Idx
def _AddHorzJoin(self, e, idx):
hj = HorzJoin(e, idx)
if self._HorzJoins == None:
self._HorzJoins = hj
hj.nextHj = hj
hj.prevHj = hj
else:
hj.nextHj = self._HorzJoins
hj.prevHj = self._HorzJoins.prevHj
self._HorzJoins.prevHj.nextHj = hj
self._HorzJoins.prevHj = hj
def _InsertIntersectNode(self, e1, e2, pt):
newNode = IntersectNode(e1, e2, pt)
if self._IntersectNodes is None:
self._IntersectNodes = newNode
elif newNode.pt.y > self._IntersectNodes.pt.y:
newNode.nextIn = self._IntersectNodes
self._IntersectNodes = newNode
else:
node = self._IntersectNodes
while node.nextIn is not None and newNode.pt.y < node.nextIn.pt.y:
node = node.nextIn
newNode.nextIn = node.nextIn
node.nextIn = newNode
def _ProcessIntersections(self, botY, topY):
try:
self._BuildIntersectList(botY, topY)
if self._IntersectNodes is None:
return True
if (
self._IntersectNodes.nextIn is not None
and not self._FixupIntersectionOrder()
):
return False
self._ProcessIntersectList()
return True
finally:
self._IntersectNodes = None
self._SortedEdges = None
def _BuildIntersectList(self, botY, topY):
e = self._ActiveEdges
if e is None:
return
self._SortedEdges = e
while e is not None:
e.prevInSEL = e.prevInAEL
e.nextInSEL = e.nextInAEL
e.Curr = Point(_TopX(e, topY), e.Curr.y)
e = e.nextInAEL
while True:
isModified = False
e = self._SortedEdges
while e.nextInSEL is not None:
eNext = e.nextInSEL
if e.Curr.x <= eNext.Curr.x:
e = eNext
continue
pt, intersected = _IntersectPoint(e, eNext)
if not intersected and e.Curr.x > eNext.Curr.x + 1:
raise Exception("Intersect Error")
if pt.y > botY:
pt = Point(_TopX(e, botY), botY)
self._InsertIntersectNode(e, eNext, pt)
self._SwapPositionsInSEL(e, eNext)
isModified = True
if e.prevInSEL is not None:
e.prevInSEL.nextInSEL = None
else:
break
if not isModified:
break
self._SortedEdges = None
return
def _ProcessIntersectList(self):
while self._IntersectNodes is not None:
node = self._IntersectNodes
self._IntersectEdges(node.e1, node.e2, node.pt, Protects.Both)
self._SwapPositionsInAEL(node.e1, node.e2)
self._IntersectNodes = node.nextIn
def _DeleteFromAEL(self, e):
aelPrev = e.prevInAEL
aelNext = e.nextInAEL
if aelPrev is None and aelNext is None and e != self._ActiveEdges:
return
if aelPrev is not None:
aelPrev.nextInAEL = aelNext
else:
self._ActiveEdges = aelNext
if aelNext is not None:
aelNext.prevInAEL = aelPrev
e.nextInAEL = None
e.prevInAEL = None
def _DeleteFromSEL(self, e):
SELPrev = e.prevInSEL
SELNext = e.nextInSEL
if SELPrev is None and SELNext is None and e != self._SortedEdges:
return
if SELPrev is not None:
SELPrev.nextInSEL = SELNext
else:
self._SortedEdges = SELNext
if SELNext is not None:
SELNext.prevInSEL = SELPrev
e.nextInSEL = None
e.prevInSEL = None
def _IntersectEdges(self, e1, e2, pt, protects=Protects.Neither):
e1stops = (
protects & Protects.Left == 0
and e1.nextInLML is None
and e1.Top.x == pt.x
and e1.Top.y == pt.y
)
e2stops = (
protects & Protects.Right == 0
and e2.nextInLML is None
and e2.Top.x == pt.x
and e2.Top.y == pt.y
)
e1Contributing = e1.outIdx >= 0
e2contributing = e2.outIdx >= 0
if e1.PolyType == e2.PolyType:
if self._IsEvenOddFillType(e1):
e1Wc = e1.windCnt
e1.windCnt = e2.windCnt
e2.windCnt = e1Wc
else:
if e1.windCnt + e2.windDelta == 0:
e1.windCnt = -e1.windCnt
else:
e1.windCnt += e2.windDelta
if e2.windCnt - e1.windDelta == 0:
e2.windCnt = -e2.windCnt
else:
e2.windCnt -= e1.windDelta
else:
if not self._IsEvenOddFillType(e2):
e1.windCnt2 += e2.windDelta
elif e1.windCnt2 == 0:
e1.windCnt2 = 1
else:
e1.windCnt2 = 0
if not self._IsEvenOddFillType(e1):
e2.windCnt2 -= e1.windDelta
elif e2.windCnt2 == 0:
e2.windCnt2 = 1
else:
e2.windCnt2 = 0
if e1.PolyType == PolyType.Subject:
e1FillType = self._SubjFillType
e1FillType2 = self._ClipFillType
else:
e1FillType = self._ClipFillType
e1FillType2 = self._SubjFillType
if e2.PolyType == PolyType.Subject:
e2FillType = self._SubjFillType
e2FillType2 = self._ClipFillType
else:
e2FillType = self._ClipFillType
e2FillType2 = self._SubjFillType
if e1FillType == PolyFillType.Positive:
e1Wc = e1.windCnt
elif e1FillType == PolyFillType.Negative:
e1Wc = -e1.windCnt
else:
e1Wc = abs(e1.windCnt)
if e2FillType == PolyFillType.Positive:
e2Wc = e2.windCnt
elif e2FillType == PolyFillType.Negative:
e2Wc = -e2.windCnt
else:
e2Wc = abs(e2.windCnt)
if e1Contributing and e2contributing:
if (
e1stops
or e2stops
or (e1Wc != 0 and e1Wc != 1)
or (e2Wc != 0 and e2Wc != 1)
or (e1.PolyType != e2.PolyType and self._ClipType != ClipType.Xor)
):
self._AddLocalMaxPoly(e1, e2, pt)
else:
self._AddOutPt(e1, pt)
self._AddOutPt(e2, pt)
_SwapSides(e1, e2)
_SwapPolyIndexes(e1, e2)
elif e1Contributing:
if e2Wc == 0 or e2Wc == 1:
self._AddOutPt(e1, pt)
_SwapSides(e1, e2)
_SwapPolyIndexes(e1, e2)
elif e2contributing:
if e1Wc == 0 or e1Wc == 1:
self._AddOutPt(e2, pt)
_SwapSides(e1, e2)
_SwapPolyIndexes(e1, e2)
elif (
(e1Wc == 0 or e1Wc == 1)
and (e2Wc == 0 or e2Wc == 1)
and not e1stops
and not e2stops
):
e1FillType2 = e2FillType2 = PolyFillType.EvenOdd
if e1FillType2 == PolyFillType.Positive:
e1Wc2 = e1.windCnt2
elif e1FillType2 == PolyFillType.Negative:
e1Wc2 = -e1.windCnt2
else:
e1Wc2 = abs(e1.windCnt2)
if e2FillType2 == PolyFillType.Positive:
e2Wc2 = e2.windCnt2
elif e2FillType2 == PolyFillType.Negative:
e2Wc2 = -e2.windCnt2
else:
e2Wc2 = abs(e2.windCnt2)
if e1.PolyType != e2.PolyType:
self._AddLocalMinPoly(e1, e2, pt)
elif e1Wc == 1 and e2Wc == 1:
if self._ClipType == ClipType.Intersection:
if e1Wc2 > 0 and e2Wc2 > 0:
self._AddLocalMinPoly(e1, e2, pt)
elif self._ClipType == ClipType.Union:
if e1Wc2 <= 0 and e2Wc2 <= 0:
self._AddLocalMinPoly(e1, e2, pt)
elif self._ClipType == ClipType.Difference:
if (e1.PolyType == PolyType.Clip and e1Wc2 > 0 and e2Wc2 > 0) or (
e1.PolyType == PolyType.Subject and e1Wc2 <= 0 and e2Wc2 <= 0
):
self._AddLocalMinPoly(e1, e2, pt)
else:
self._AddLocalMinPoly(e1, e2, pt)
else:
_SwapSides(e1, e2, self._PolyOutList)
if e1stops != e2stops and (
(e1stops and e1.outIdx >= 0) or (e2stops and e2.outIdx >= 0)
):
_SwapSides(e1, e2, self._PolyOutList)
_SwapPolyIndexes(e1, e2)
if e1stops:
self._DeleteFromAEL(e1)
if e2stops:
self._DeleteFromAEL(e2)
def _DoMaxima(self, e, topY):
eMaxPair = _GetMaximaPair(e)
x = e.Top.x
eNext = e.nextInAEL
while eNext != eMaxPair:
if eNext is None:
raise Exception("DoMaxima error")
self._IntersectEdges(e, eNext, Point(x, topY), Protects.Both)
self._SwapPositionsInAEL(e, eNext)
eNext = e.nextInAEL
if e.outIdx < 0 and eMaxPair.outIdx < 0:
self._DeleteFromAEL(e)
self._DeleteFromAEL(eMaxPair)
elif e.outIdx >= 0 and eMaxPair.outIdx >= 0:
self._IntersectEdges(e, eMaxPair, Point(x, topY))
else:
raise Exception("DoMaxima error")
def _UpdateEdgeIntoAEL(self, e):
if e.nextInLML is None:
raise Exception("UpdateEdgeIntoAEL error")
aelPrev = e.prevInAEL
aelNext = e.nextInAEL
e.nextInLML.outIdx = e.outIdx
if aelPrev is not None:
aelPrev.nextInAEL = e.nextInLML
else:
self._ActiveEdges = e.nextInLML
if aelNext is not None:
aelNext.prevInAEL = e.nextInLML
e.nextInLML.side = e.side
e.nextInLML.windDelta = e.windDelta
e.nextInLML.windCnt = e.windCnt
e.nextInLML.windCnt2 = e.windCnt2
e = e.nextInLML
e.prevInAEL = aelPrev
e.nextInAEL = aelNext
if e.dx != horizontal:
self._InsertScanbeam(e.Top.y)
return e
def _AddLocalMinPoly(self, e1, e2, pt):
if e2.dx == horizontal or e1.dx > e2.dx:
self._AddOutPt(e1, pt)
e2.outIdx = e1.outIdx
e1.side = EdgeSide.Left
e2.side = EdgeSide.Right
e = e1
if e.prevInAEL == e2:
prevE = e2.prevInAEL
else:
prevE = e1.prevInAEL
else:
self._AddOutPt(e2, pt)
e1.outIdx = e2.outIdx
e1.side = EdgeSide.Right
e2.side = EdgeSide.Left
e = e2
if e.prevInAEL == e1:
prevE = e1.prevInAEL
else:
prevE = e.prevInAEL
if (
prevE is not None
and prevE.outIdx >= 0
and _TopX(prevE, pt.y) == _TopX(e, pt.y)
and _SlopesEqual2(e, prevE)
):
self._AddJoin(e, prevE)
return
def _AddLocalMaxPoly(self, e1, e2, pt):
self._AddOutPt(e1, pt)
if e1.outIdx == e2.outIdx:
e1.outIdx = -1
e2.outIdx = -1
elif e1.outIdx < e2.outIdx:
self._AppendPolygon(e1, e2)
else:
self._AppendPolygon(e2, e1)
def _CreateOutRec(self):
outRec = OutRec(len(self._PolyOutList))
self._PolyOutList.append(outRec)
return outRec
def _AddOutPt(self, e, pt):
toFront = e.side == EdgeSide.Left
if e.outIdx < 0:
outRec = self._CreateOutRec()
e.outIdx = outRec.idx
op = OutPt(outRec.idx, pt)
op.nextOp = op
op.prevOp = op
outRec.pts = op
_SetHoleState(e, outRec, self._PolyOutList)
else:
outRec = self._PolyOutList[e.outIdx]
op = outRec.pts
if (toFront and _PointsEqual(pt, op.pt)) or (
not toFront and _PointsEqual(pt, op.prevOp.pt)
):
return
op2 = OutPt(outRec.idx, pt)
op2.nextOp = op
op2.prevOp = op.prevOp
op.prevOp.nextOp = op2
op.prevOp = op2
if toFront:
outRec.pts = op2
def _AppendPolygon(self, e1, e2):
outRec1 = self._PolyOutList[e1.outIdx]
outRec2 = self._PolyOutList[e2.outIdx]
holeStateRec = None
if _Param1RightOfParam2(outRec1, outRec2):
holeStateRec = outRec2
elif _Param1RightOfParam2(outRec2, outRec1):
holeStateRec = outRec1
else:
holeStateRec = _GetLowermostRec(outRec1, outRec2)
p1_lft = outRec1.pts
p2_lft = outRec2.pts
p1_rt = p1_lft.prevOp
p2_rt = p2_lft.prevOp
newSide = EdgeSide.Left
if e1.side == EdgeSide.Left:
if e2.side == EdgeSide.Left:
# z y x a b c
_ReversePolyPtLinks(p2_lft)
p2_lft.nextOp = p1_lft
p1_lft.prevOp = p2_lft
p1_rt.nextOp = p2_rt
p2_rt.prevOp = p1_rt
outRec1.pts = p2_rt
else:
# x y z a b c
p2_rt.nextOp = p1_lft
p1_lft.prevOp = p2_rt
p2_lft.prevOp = p1_rt
p1_rt.nextOp = p2_lft
outRec1.pts = p2_lft
else:
newSide = EdgeSide.Right
if e2.side == EdgeSide.Right:
# a b c z y x
_ReversePolyPtLinks(p2_lft)
p1_rt.nextOp = p2_rt
p2_rt.prevOp = p1_rt
p2_lft.nextOp = p1_lft
p1_lft.prevOp = p2_lft
else:
# a b c x y z
p1_rt.nextOp = p2_lft
p2_lft.prevOp = p1_rt
p1_lft.prevOp = p2_rt
p2_rt.nextOp = p1_lft
outRec1.bottomPt = None
if holeStateRec == outRec2:
if outRec2.FirstLeft != outRec1:
outRec1.FirstLeft = outRec2.FirstLeft
outRec1.isHole = outRec2.isHole
outRec2.pts = None
outRec2.bottomPt = None
outRec2.FirstLeft = outRec1
OKIdx = outRec1.idx
ObsoleteIdx = outRec2.idx
e1.outIdx = -1
e2.outIdx = -1
e = self._ActiveEdges
while e is not None:
if e.outIdx == ObsoleteIdx:
e.outIdx = OKIdx
e.side = newSide
break
e = e.nextInAEL
outRec2.idx = outRec1.idx
def _FixupIntersectionOrder(self):
self._CopyAELToSEL()
inode = self._IntersectNodes
while inode is not None:
if not _EdgesAdjacent(inode):
nextNode = inode.nextIn
while nextNode and not _EdgesAdjacent(nextNode):
nextNode = nextNode.nextIn
if nextNode is None:
return False
e1 = inode.e1
e2 = inode.e2
p = inode.pt
inode.e1 = nextNode.e1
inode.e2 = nextNode.e2
inode.pt = nextNode.pt
nextNode.e1 = e1
nextNode.e2 = e2
nextNode.pt = p
self._SwapPositionsInSEL(inode.e1, inode.e2)
inode = inode.nextIn
return True
def _ProcessEdgesAtTopOfScanbeam(self, topY):
e = self._ActiveEdges
while e is not None:
if _IsMaxima(e, topY) and _GetMaximaPair(e).dx != horizontal:
ePrev = e.prevInAEL
self._DoMaxima(e, topY)
if ePrev is None:
e = self._ActiveEdges
else:
e = ePrev.nextInAEL
else:
intermediateVert = _IsIntermediate(e, topY)
if intermediateVert and e.nextInLML.dx == horizontal:
if e.outIdx >= 0:
self._AddOutPt(e, e.Top)
hj = self._HorzJoins
if hj is not None:
while True:
_1, _2, overlap = _GetOverlapSegment(
hj.edge.Bot,
hj.edge.Top,
e.nextInLML.Bot,
e.nextInLML.Top,
)
if overlap:
self._AddJoin(
hj.edge, e.nextInLML, hj.savedIdx, e.outIdx
)
hj = hj.nextHj
if hj == self._HorzJoins:
break
self._AddHorzJoin(e.nextInLML, e.outIdx)
e = self._UpdateEdgeIntoAEL(e)
self._AddEdgeToSEL(e)
else:
e.Curr = Point(_TopX(e, topY), topY)
if (
self.ForceSimple
and e.prevInAEL is not None
and e.prevInAEL.Curr.x == e.Curr.x
and e.outIdx >= 0
and e.prevInAEL.outIdx >= 0
):
if intermediateVert:
self._AddOutPt(e.prevInAEL, Point(e.Curr.x, topY))
else:
self._AddOutPt(e, Point(e.Curr.x, topY))
e = e.nextInAEL
self._ProcessHorizontals()
e = self._ActiveEdges
while e is not None:
if _IsIntermediate(e, topY):
if e.outIdx >= 0:
self._AddOutPt(e, e.Top)
e = self._UpdateEdgeIntoAEL(e)
ePrev = e.prevInAEL
eNext = e.nextInAEL
if (
ePrev is not None
and ePrev.Curr.x == e.Bot.x
and (ePrev.Curr.y == e.Bot.y)
and (e.outIdx >= 0)
and (ePrev.outIdx >= 0)
and (ePrev.Curr.y > ePrev.Top.y)
and _SlopesEqual2(e, ePrev)
):
self._AddOutPt(ePrev, e.Bot)
self._AddJoin(e, ePrev)
elif (
eNext is not None
and (eNext.Curr.x == e.Bot.x)
and (eNext.Curr.y == e.Bot.y)
and (e.outIdx >= 0)
and (eNext.outIdx >= 0)
and (eNext.Curr.y > eNext.Top.y)
and _SlopesEqual2(e, eNext)
):
self._AddOutPt(eNext, e.Bot)
self._AddJoin(e, eNext)
e = e.nextInAEL
def _Area(self, pts):
# see http://www.mathopenref.com/coordpolygonarea2.html
result = 0.0
p = pts
while True:
result += (p.pt.x + p.prevOp.pt.x) * (p.prevOp.pt.y - p.pt.y)
p = p.nextOp
if p == pts:
break
return result / 2
def _JoinPoints(self, jr):
p1, p2 = None, None
outRec1 = self._PolyOutList[jr.poly1Idx]
outRec2 = self._PolyOutList[jr.poly2Idx]
if outRec1 is None or outRec2 is None:
return p1, p2, False
pp1a = outRec1.pts
pp2a = outRec2.pts
pt1 = jr.pt2a
pt2 = jr.pt2b
pt3 = jr.pt1a
pt4 = jr.pt1b
pp1a, pt1, pt2, result = _FindSegment(pp1a, pt1, pt2)
if not result:
return p1, p2, False
if outRec1 == outRec2:
pp2a = pp1a.nextOp
pp2a, pt3, pt4, result = _FindSegment(pp2a, pt3, pt4)
if not result or pp2a == pp1a:
return p1, p2, False
else:
pp2a, pt3, pt4, result = _FindSegment(pp2a, pt3, pt4)
if not result:
return p1, p2, False
pt1, pt2, result = _GetOverlapSegment(pt1, pt2, pt3, pt4)
if not result:
return p1, p2, False
prevOp = pp1a.prevOp
if _PointsEqual(pp1a.pt, pt1):
p1 = pp1a
elif _PointsEqual(prevOp.pt, pt1):
p1 = prevOp
else:
p1 = _InsertPolyPtBetween(pp1a, prevOp, pt1)
if _PointsEqual(pp1a.pt, pt2):
p2 = pp1a
elif _PointsEqual(prevOp.pt, pt2):
p2 = prevOp
elif (p1 == pp1a) or (p1 == prevOp):
p2 = _InsertPolyPtBetween(pp1a, prevOp, pt2)
elif _Pt3IsBetweenPt1AndPt2(pp1a.pt, p1.pt, pt2):
p2 = _InsertPolyPtBetween(pp1a, p1, pt2)
else:
p2 = _InsertPolyPtBetween(p1, prevOp, pt2)
prevOp = pp2a.prevOp
if _PointsEqual(pp2a.pt, pt1):
p3 = pp2a
elif _PointsEqual(prevOp.pt, pt1):
p3 = prevOp
else:
p3 = _InsertPolyPtBetween(pp2a, prevOp, pt1)
if _PointsEqual(pp2a.pt, pt2):
p4 = pp2a
elif _PointsEqual(prevOp.pt, pt2):
p4 = prevOp
elif (p3 == pp2a) or (p3 == prevOp):
p4 = _InsertPolyPtBetween(pp2a, prevOp, pt2)
elif _Pt3IsBetweenPt1AndPt2(pp2a.pt, p3.pt, pt2):
p4 = _InsertPolyPtBetween(pp2a, p3, pt2)
else:
p4 = _InsertPolyPtBetween(p3, prevOp, pt2)
if p1.nextOp == p2 and p3.prevOp == p4:
p1.nextOp = p3
p3.prevOp = p1
p2.prevOp = p4
p4.nextOp = p2
return p1, p2, True
elif p1.prevOp == p2 and p3.nextOp == p4:
p1.prevOp = p3
p3.nextOp = p1
p2.nextOp = p4
p4.prevOp = p2
return p1, p2, True
return p1, p2, False
def _FixupFirstLefts1(self, oldOutRec, newOutRec):
for outRec in self._PolyOutList:
if outRec.pts is not None and outRec.FirstLeft == oldOutRec:
if _Poly2ContainsPoly1(outRec.pts, newOutRec.pts):
outRec.FirstLeft = newOutRec
def _FixupFirstLefts2(self, oldOutRec, newOutRec):
for outRec in self._PolyOutList:
if outRec.FirstLeft == oldOutRec:
outRec.FirstLeft = newOutRec
def _GetOutRec(self, idx):
outrec = self._PolyOutList[idx]
while outrec != self._PolyOutList[outrec.idx]:
outrec = self._PolyOutList[outrec.idx]
return outrec
def _JoinCommonEdges(self):
for i in range(len(self._JoinList)):
jr = self._JoinList[i]
outRec1 = self._GetOutRec(jr.poly1Idx)
outRec2 = self._GetOutRec(jr.poly2Idx)
if outRec1.pts is None or outRec2.pts is None:
continue
if outRec1 == outRec2:
holeStateRec = outRec1
elif _Param1RightOfParam2(outRec1, outRec2):
holeStateRec = outRec2
elif _Param1RightOfParam2(outRec2, outRec1):
holeStateRec = outRec1
else:
holeStateRec = _GetLowermostRec(outRec1, outRec2)
p1, p2, result = self._JoinPoints(jr)
if not result:
continue
if outRec1 == outRec2:
outRec1.pts = p1
outRec1.bottomPt = None
outRec2 = self._CreateOutRec()
outRec2.pts = p2
jr.poly2Idx = outRec2.idx
if _Poly2ContainsPoly1(outRec2.pts, outRec1.pts):
outRec2.isHole = not outRec1.isHole
outRec2.FirstLeft = outRec1
self._FixupJoinRecs(jr, p2, i + 1)
if self._UsingPolyTree:
self._FixupFirstLefts2(outRec2, outRec1)
_FixupOutPolygon(outRec1)
_FixupOutPolygon(outRec2)
if (
(outRec2.isHole ^ self.ReverseSolution)
== self._Area(outRec2)
> 0.0
):
_ReversePolyPtLinks(outRec2.pts)
elif _Poly2ContainsPoly1(outRec1.pts, outRec2.pts):
outRec2.isHole = outRec1.isHole
outRec1.isHole = not outRec2.isHole
outRec2.FirstLeft = outRec1.FirstLeft
outRec1.FirstLeft = outRec2
self._FixupJoinRecs(jr, p2, i + 1)
if self._UsingPolyTree:
self._FixupFirstLefts2(outRec1, outRec2)
_FixupOutPolygon(outRec1)
_FixupOutPolygon(outRec2)
if (
(outRec1.isHole ^ self.ReverseSolution)
== self._Area(outRec1)
> 0.0
):
_ReversePolyPtLinks(outRec1.pts)
else:
outRec2.isHole = outRec1.isHole
outRec2.FirstLeft = outRec1.FirstLeft
self._FixupJoinRecs(jr, p2, i + 1)
if self._UsingPolyTree:
self._FixupFirstLefts1(outRec1, outRec2)
_FixupOutPolygon(outRec1)
_FixupOutPolygon(outRec2)
else:
_FixupOutPolygon(outRec1)
outRec2.pts = None
outRec2.bottomPt = None
outRec2.idx = outRec1.idx
outRec1.isHole = holeStateRec.isHole
if holeStateRec == outRec2:
outRec1.FirstLeft = outRec2.FirstLeft
outRec2.FirstLeft = outRec1
if self._UsingPolyTree:
self._FixupFirstLefts2(outRec2, outRec1)
return
def _DoSimplePolygons(self):
i = 0
while i < len(self._PolyOutList):
outrec = self._PolyOutList[i]
i += 1
op = outrec.pts
if op is None:
continue
while True:
op2 = op.nextOp
while op2 != outrec.pts:
if (
_PointsEqual(op.pt, op2.pt)
and op2.nextOp != op
and op2.prevOp != op
):
# split the polygon into two ...
op3 = op.prevOp
op4 = op2.prevOp
op.prevOp = op4
op4.nextOp = op
op2.prevOp = op3
op3.nextOp = op2
outrec.pts = op
outrec2 = self._CreateOutRec()
outrec2.pts = op2
_UpdateOutPtIdxs(outrec2)
if _Poly2ContainsPoly1(outrec2.pts, outrec.pts):
# OutRec2 is contained by OutRec1 ...
outrec2.isHole = not outrec.isHole
outrec2.FirstLeft = outrec
elif _Poly2ContainsPoly1(outrec.pts, outrec2.pts):
# OutRec1 is contained by OutRec2 ...
outrec2.isHole = outrec.isHole
outrec.isHole = not outrec2.isHole
outrec2.FirstLeft = outrec.FirstLeft
outrec.FirstLeft = outrec2
else:
# the 2 polygons are separate ...
outrec2.isHole = outrec.isHole
outrec2.FirstLeft = outrec.FirstLeft
op2 = op
# ie get ready for the next iteration
op2 = op2.nextOp
op = op.nextOp
if op == outrec.pts:
break
return
def _ExecuteInternal(self):
# try:
try:
self._Reset()
if self._Scanbeam is None:
return True
botY = self._PopScanbeam()
while True:
self._InsertLocalMinimaIntoAEL(botY)
self._HorzJoins = None
self._ProcessHorizontals()
topY = self._PopScanbeam()
if not self._ProcessIntersections(botY, topY):
return False
self._ProcessEdgesAtTopOfScanbeam(topY)
botY = topY
if self._Scanbeam is None and self._CurrentLocMin is None:
break
for outRec in self._PolyOutList:
if outRec.pts is None:
continue
_FixupOutPolygon(outRec)
if outRec.pts is None:
continue
if (outRec.isHole ^ self.ReverseSolution) == (
self._Area(outRec.pts) > 0.0
):
_ReversePolyPtLinks(outRec.pts)
if self._JoinList is not None:
self._JoinCommonEdges()
if self.ForceSimple:
self._DoSimplePolygons()
return True
finally:
self._JoinList = None
self._HorzJoins = None
# except:
# return False
def Execute(
self,
clipType,
solution,
subjFillType=PolyFillType.EvenOdd,
clipFillType=PolyFillType.EvenOdd,
):
if self._ExecuteLocked:
return False
try:
self._ExecuteLocked = True
self._UsingPolyTree = True
del solution[:]
self._SubjFillType = subjFillType
self._ClipFillType = clipFillType
self._ClipType = clipType
result = self._ExecuteInternal()
if result:
self._BuildResult(solution)
finally:
self._ExecuteLocked = False
self._UsingPolyTree = False
return result
def Execute2(
self,
clipType,
solutionTree,
subjFillType=PolyFillType.EvenOdd,
clipFillType=PolyFillType.EvenOdd,
):
if self._ExecuteLocked:
return False
try:
self._ExecuteLocked = True
self._UsingPolyTree = True
solutionTree.Clear()
self._SubjFillType = subjFillType
self._ClipFillType = clipFillType
self._ClipType = clipType
result = self._ExecuteInternal()
if result:
self._BuildResult2(solutionTree)
finally:
self._ExecuteLocked = False
self._UsingPolyTree = False
return result
def _BuildResult(self, polygons):
for outRec in self._PolyOutList:
if outRec is None:
continue
cnt = _PointCount(outRec.pts)
if cnt < 3:
continue
poly = []
op = outRec.pts
for _ in range(cnt):
poly.append(op.pt)
op = op.prevOp
polygons.append(poly)
return
def _BuildResult2(self, polyTree):
for outRec in self._PolyOutList:
if outRec is None:
continue
cnt = _PointCount(outRec.pts)
if cnt < 3:
continue
_FixHoleLinkage(outRec)
# add nodes to _AllNodes list ...
polyNode = PolyNode()
polyTree._AllNodes.append(polyNode)
outRec.PolyNode = polyNode
op = outRec.pts
while True:
polyNode.Contour.append(op.pt)
op = op.prevOp
if op == outRec.pts:
break
# build the tree ...
for outRec in self._PolyOutList:
if outRec.PolyNode is None:
continue
if outRec.FirstLeft is None:
polyTree._AddChild(outRec.PolyNode)
else:
outRec.FirstLeft.PolyNode._AddChild(outRec.PolyNode)
return
# ===============================================================================
# OffsetPolygons (+ ancilliary functions)
# ===============================================================================
def _GetUnitNormal(pt1, pt2):
if pt2.x == pt1.x and pt2.y == pt1.y:
return FloatPoint(0.0, 0.0)
dx = float(pt2.x - pt1.x)
dy = float(pt2.y - pt1.y)
f = 1.0 / math.hypot(dx, dy)
dx = float(dx) * f
dy = float(dy) * f
return FloatPoint(dy, -dx)
def _GetBounds(pts):
left = None
for poly in pts:
for pt in poly:
left = pt.x
top = pt.y
right = pt.x
bottom = pt.y
break
break
for poly in pts:
for pt in poly:
if pt.x < left:
left = pt.x
if pt.x > right:
right = pt.x
if pt.y < top:
top = pt.y
if pt.y > bottom:
bottom = pt.y
if left is None:
return Rect(0, 0, 0, 0)
else:
return Rect(left, top, right, bottom)
def _GetLowestPt(poly):
# precondition: poly must not be empty
result = poly[0]
for pt in poly:
if pt.y > result.y or (pt.y == result.y and pt.x < result.x):
result = pt
return result
def _StripDupPts(poly):
if poly == []:
return poly
for i in range(1, len(poly)):
if _PointsEqual(poly[i - 1], poly[i]):
poly.pop(i)
i = len(poly) - 1
while i > 0 and _PointsEqual(poly[i], poly[0]):
poly.pop(i)
i -= 1
return poly
def _OffsetInternal(
polys, isPolygon, delta, jointype=JoinType.Square, endtype=EndType.Square, limit=0.0
):
def _DoSquare(pt):
# see offset_triginometry.svg in the documentation folder ...
dx = math.tan(
math.atan2(sinA, Normals[k].x * Normals[j].x + Normals[k].y * Normals[j].y)
/ 4
)
result.append(
Point(
round(pt.x + delta * (Normals[k].x - Normals[k].y * dx)),
round(pt.y + delta * (Normals[k].y + Normals[k].x * dx)),
)
)
result.append(
Point(
round(pt.x + delta * (Normals[j].x + Normals[j].y * dx)),
round(pt.y + delta * (Normals[j].y - Normals[j].x * dx)),
)
)
return
def _DoMiter(pt, r):
q = delta / r
result.append(
Point(
round(pt.x + (Normals[k].x + Normals[j].x) * q),
round(pt.y + (Normals[k].y + Normals[j].y) * q),
)
)
return
def _DoRound(pt):
a = math.atan2(sinA, Normals[k].x * Normals[j].x + Normals[k].y * Normals[j].y)
steps = round(step360 * abs(a))
X, Y = Normals[k].x, Normals[k].y
for _ in range(steps):
result.append(Point(round(pt.x + X * delta), round(pt.y + Y * delta)))
X2 = X
X = X * mcos - msin * Y
Y = X2 * msin + Y * mcos
result.append(
Point(
round(pt.x + Normals[j].x * delta), round(pt.y + Normals[j].y * delta)
)
)
return
def GetSin():
result = Normals[k].x * Normals[j].y - Normals[j].x * Normals[k].y
if result > 1.0:
result = 1.0
elif result < -1.0:
result = -1.0
return result
def _OffsetPoint(jointype):
if sinA * delta < 0:
result.append(
Point(
round(pts[j].x + Normals[k].x * delta),
round(pts[j].y + Normals[k].y * delta),
)
)
result.append(pts[j])
result.append(
Point(
round(pts[j].x + Normals[j].x * delta),
round(pts[j].y + Normals[j].y * delta),
)
)
elif jointype == JoinType.Miter:
r = 1.0 + (Normals[j].x * Normals[k].x + Normals[j].y * Normals[k].y)
if r >= miterLim:
_DoMiter(pts[j], r)
else:
_DoSquare(pts[j])
elif jointype == JoinType.Square:
_DoSquare(pts[j])
else:
_DoRound(pts[j])
return j
if delta == 0:
return polys
if not isPolygon and delta < 0:
delta = -delta
if jointype == JoinType.Miter:
# miterLim: see offset_triginometry3.svg in the documentation folder ...
if limit > 2:
miterLim = 2 / (limit * limit)
else:
miterLim = 0.5
if endtype == EndType.Round:
limit = 0.25
if jointype == JoinType.Round or endtype == EndType.Round:
if limit <= 0:
limit = 0.25
elif limit > abs(delta) * 0.25:
limit = abs(delta) * 0.25
# step360: see offset_triginometry2.svg in the documentation folder ...
step360 = math.pi / math.acos(1 - limit / abs(delta))
msin = math.sin(2 * math.pi / step360)
mcos = math.cos(2 * math.pi / step360)
step360 /= math.pi * 2
if delta < 0:
msin = -msin
res = []
ppts = polys[:]
for pts in ppts:
Normals = []
result = []
cnt = len(pts)
if cnt == 0 or cnt < 3 and delta <= 0:
continue
if cnt == 1:
if jointype == JoinType.Round:
X, Y = 1.0, 0.0
for _ in range(round(step360 * 2 * math.pi)):
result.append(
Point(round(pts[0].x + X * delta), round(pts[0].y + Y * delta))
)
X2 = X
X = X * mcos - msin * Y
Y = X2 * msin + Y * mcos
else:
X, Y = -1.0, -1.0
for _ in range(4):
result.append(
Point(round(pts[0].x + X * delta), round(pts[0].y + Y * delta))
)
if X < 0:
X = 1
elif Y < 0:
Y = 1
else:
X = -1
continue
forceClose = _PointsEqual(pts[0], pts[cnt - 1])
if forceClose:
cnt -= 1
for j in range(cnt - 1):
Normals.append(_GetUnitNormal(pts[j], pts[j + 1]))
if isPolygon or forceClose:
Normals.append(_GetUnitNormal(pts[cnt - 1], pts[0]))
else:
Normals.append(Normals[cnt - 2])
if isPolygon or forceClose:
k = cnt - 1
for j in range(cnt):
sinA = GetSin()
k = _OffsetPoint(jointype)
res.append(result)
if not isPolygon:
result = []
delta = -delta
k = cnt - 1
for j in range(cnt):
sinA = GetSin()
k = _OffsetPoint(jointype)
delta = -delta
res.append(result[::-1])
else:
# offset the polyline going forward ...
k = 0
for j in range(1, cnt - 1):
sinA = GetSin()
k = _OffsetPoint(jointype)
# handle the end (butt, round or square) ...
if endtype == EndType.Butt:
j = cnt - 1
pt1 = Point(
round(float(pts[j].x) + Normals[j].x * delta),
round(float(pts[j].y) + Normals[j].y * delta),
)
result.append(pt1)
pt1 = Point(
round(float(pts[j].x) - Normals[j].x * delta),
round(float(pts[j].y) - Normals[j].y * delta),
)
result.append(pt1)
else:
j = cnt - 1
k = cnt - 2
Normals[j] = FloatPoint(-Normals[j].x, -Normals[j].y)
if endtype == EndType.Square:
_DoSquare(pts[j])
else:
_DoRound(pts[j])
# re-build Normals ...
for j in range(cnt - 1, 0, -1):
Normals[j] = FloatPoint(-Normals[j - 1].x, -Normals[j - 1].y)
Normals[0] = FloatPoint(-Normals[1].x, -Normals[1].y)
# offset the polyline going backward ...
k = cnt - 1
for j in range(cnt - 2, 0, -1):
sinA = GetSin()
k = _OffsetPoint(jointype)
# finally handle the start (butt, round or square) ...
if endtype == EndType.Butt:
pt1 = Point(
round(float(pts[0].x) - Normals[0].x * delta),
round(float(pts[0].y) - Normals[0].y * delta),
)
result.append(pt1)
pt1 = Point(
round(float(pts[0].x) + Normals[0].x * delta),
round(float(pts[0].y) + Normals[0].y * delta),
)
result.append(pt1)
else:
j = 0
k = 1
if endtype == EndType.Square:
_DoSquare(pts[0])
else:
_DoRound(pts[0])
res.append(result)
c = Clipper()
c.AddPolygons(res, PolyType.Subject)
if delta > 0:
c.Execute(ClipType.Union, res, PolyFillType.Positive, PolyFillType.Positive)
else:
bounds = _GetBounds(res)
outer = []
outer.append(Point(bounds.left - 10, bounds.bottom + 10))
outer.append(Point(bounds.right + 10, bounds.bottom + 10))
outer.append(Point(bounds.right + 10, bounds.top - 10))
outer.append(Point(bounds.left - 10, bounds.top - 10))
c.AddPolygon(outer, PolyType.Subject)
c.ReverseSolution = True
c.Execute(ClipType.Union, res, PolyFillType.Negative, PolyFillType.Negative)
if len(res) > 0:
res.pop(0)
return res
def OffsetPolygons(polys, delta, jointype=JoinType.Square, limit=0.0, autoFix=True):
if not autoFix:
return _OffsetInternal(polys, True, delta, jointype, EndType.Butt, limit)
pts = polys[:]
botPoly = None
botPt = None
for poly in pts:
poly = _StripDupPts(poly)
if len(poly) < 3:
continue
bot = _GetLowestPt(poly)
if botPt is None or (bot.y > botPt.y) or (bot.y == botPt.y and bot.x < botPt.x):
botPt = bot
botPoly = poly
if botPt is None:
return []
# if the outermost polygon has the wrong orientation,
# reverse the orientation of all the polygons ...
if Area(botPoly) < 0.0:
for i in range(len(pts)):
pts[i] = pts[i][::-1]
return _OffsetInternal(pts, True, delta, jointype, EndType.Butt, limit)
def OffsetPolyLines(
polys, delta, jointype=JoinType.Square, endtype=EndType.Square, limit=0.0
):
polys2 = polys[:]
for p in polys2:
if p == []:
continue
for i in range(1, len(p)):
if _PointsEqual(p[i - 1], p[i]):
p.pop(i)
if endtype == EndType.Closed:
for i in range(len(polys2)):
polys2.append(polys2[i][::-1])
return _OffsetInternal(polys2, True, delta, jointype, EndType.Butt, limit)
else:
return _OffsetInternal(polys2, False, delta, jointype, endtype, limit)
def _DistanceSqrd(pt1, pt2):
dx = pt1.x - pt2.x
dy = pt1.y - pt2.y
return dx * dx + dy * dy
def _ClosestPointOnLine(pt, linePt1, linePt2):
dx = linePt2.x - linePt1.x
dy = linePt2.y - linePt1.y
if dx == 0 and dy == 0:
return FloatPoint(linePt1.x, linePt1.y)
q = ((pt.x - linePt1.x) * dx + (pt.Y - linePt1.Y) * dy) / (dx * dx + dy * dy)
return FloatPoint(
(1 - q) * linePt1.X + q * linePt2.X, (1 - q) * linePt1.Y + q * linePt2.Y
)
def _SlopesNearColinear(pt1, pt2, pt3, distSqrd):
if _DistanceSqrd(pt1, pt2) > _DistanceSqrd(pt1, pt3):
return False
cpol = _ClosestPointOnLine(pt2, pt1, pt3)
dx = pt2.x - cpol.x
dy = pt2.y - cpol.y
return (dx * dx + dy * dy) < distSqrd
def _PointsAreClose(pt1, pt2, distSqrd):
dx = pt1.x - pt2.x
dy = pt1.y - pt2.y
return (dx * dx) + (dy * dy) <= distSqrd
def CleanPolygon(poly, distance=1.415):
distSqrd = distance * distance
highI = len(poly) - 1
while highI > 0 and _PointsEqual(poly[highI], poly[0]):
highI -= 1
if highI < 2:
return []
pt = poly[highI]
result = []
i = 0
while True:
while i < highI and _PointsAreClose(pt, poly[i + 1], distSqrd):
i += 2
i2 = i
while i < highI and (
_PointsAreClose(poly[i], poly[i + 1], distSqrd)
or _SlopesNearColinear(pt, poly[i], poly[i + 1], distSqrd)
):
i += 1
if i >= highI:
break
elif i != i2:
continue
pt = poly[i]
i += 1
result.append(pt)
if i <= highI:
result.append(poly[i])
j = len(result)
if j > 2 and _SlopesNearColinear(result[j - 2], result[j - 1], result[0], distSqrd):
del result[j - 1 :]
if len(result) < 3:
return []
else:
return result
def CleanPolygons(polys, distance=1.415):
result = []
for poly in polys:
result.append(CleanPolygon(poly, distance=1.415))
return result
def SimplifyPolygon(poly, fillType):
result = []
c = Clipper()
c.ForceSimple = True
c.AddPolygon(poly, PolyType.Subject)
c.Execute(ClipType.Union, result, fillType, fillType)
return result
def SimplifyPolygons(polys, fillType):
result = []
c = Clipper()
c.ForceSimple = True
c.AddPolygons(polys, PolyType.Subject)
c.Execute(ClipType.Union, result, fillType, fillType)
return result
```
#### File: meerk40t/test/test_core_elements.py
```python
import unittest
from meerk40t.core.node.node import Node
from test import bootstrap
from meerk40t.core.units import UNITS_PER_MIL
from meerk40t.svgelements import Circle, Rect
class TestElements(unittest.TestCase):
def test_elements_type(self):
"""
Tests some generic elements commands and validates output as correct type
"""
kernel = bootstrap.bootstrap()
@kernel.console_command("validate_type", input_type="elements")
def validation_type(command, data=None, **kwargs):
for node in data:
self.assertTrue(isinstance(node, Node))
try:
for cmd, path, command in kernel.find("command/elements.*"):
kernel.console("element* " + command.split("/")[-1] + " validate_type\n")
finally:
kernel.shutdown()
def test_elements_specific(self):
"""
Tests specific elements for correct non-failure.
"""
kernel = bootstrap.bootstrap()
try:
kernel.console("polyline grid 3 3\n")
kernel.console("polyline 3cm 3cm 2cm 2cm 1cm 1cm grid 3 3\n")
kernel.console("circle 2cm 2cm 1cm grid 3 3\n")
kernel.console("rect 2cm 2cm 1cm 1cm grid 3 3\n")
kernel.console("ellipse 2cm 2cm 1cm 1cm grid 3 3\n")
kernel.console("line 2cm 2cm 1cm 1cm grid 3 3\n")
finally:
kernel.shutdown()
def test_elements_circle(self):
"""
Intro test for elements
:return:
"""
kernel = bootstrap.bootstrap()
try:
kernel_root = kernel.get_context("/")
kernel_root("circle 1in 1in 1in\n")
for node in kernel_root.elements.elems():
# print(element)
self.assertEqual(
node.shape,
Circle(
center=(1000 * UNITS_PER_MIL, 1000 * UNITS_PER_MIL),
r=1000 * UNITS_PER_MIL,
),
)
self.assertEqual(node.stroke, "black")
finally:
kernel.shutdown()
def test_elements_rect(self):
"""
Intro test for elements
:return:
"""
kernel = bootstrap.bootstrap()
try:
kernel_root = kernel.get_context("/")
kernel_root("rect 1in 1in 1in 1in stroke red fill blue\n")
for node in kernel_root.elements.elems():
self.assertEqual(
node.shape,
Rect(
1000 * UNITS_PER_MIL,
1000 * UNITS_PER_MIL,
1000 * UNITS_PER_MIL,
1000 * UNITS_PER_MIL,
),
)
self.assertEqual(node.stroke, "red")
self.assertEqual(node.fill, "blue")
finally:
kernel.shutdown()
def test_elements_clipboard(self):
"""
Intro test for elements
:return:
"""
kernel = bootstrap.bootstrap()
try:
kernel_root = kernel.get_context("/")
kernel_root("rect 1in 1in 1in 1in stroke red fill blue\n")
kernel_root("clipboard copy\n")
kernel_root("clipboard paste -xy 2in 2in\n")
kernel_root("grid 2 4\n")
finally:
kernel.shutdown()
def test_elements_shapes(self):
"""
Intro test for elements
:return:
"""
kernel = bootstrap.bootstrap()
try:
kernel_root = kernel.get_context("/")
kernel_root("shape 5 2in 2in 1in\n")
# kernel_root("polygon 1in 1in 2in 2in 0in 4cm\n")
finally:
kernel.shutdown()
```
#### File: meerk40t/test/test_core_viewports.py
```python
import random
import unittest
from meerk40t.core.units import Length, ViewPort, UNITS_PER_MIL
class TestViewport(unittest.TestCase):
def test_viewport_arbitrary(self):
"""
Test arbitrary viewport.
:return:
"""
bed_width = Length(amount=random.randint(0, 65535 * 1000)).length_mm
bed_height = Length(amount=random.randint(0, 65535 * 1000)).length_mm
for i in range(100):
view = ViewPort(
bed_width,
bed_height,
user_scale_x=1.0,
user_scale_y=1.0,
native_scale_x=UNITS_PER_MIL,
native_scale_y=UNITS_PER_MIL,
origin_x=random.random(),
origin_y=random.random(),
flip_x=bool(random.randint(0, 1)),
flip_y=bool(random.randint(0, 1)),
swap_xy=bool(random.randint(0, 1)),
)
x, y = view.scene_to_device_position(0, 0)
x, y = view.device_to_scene_position(x, y)
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, -10)
self.assertGreaterEqual(10, y)
def test_viewport_lihuiyu_user_scale(self):
"""
Test Lihuiyu-esque viewport. User-scaled to mm
:return:
"""
bed_width = "330mm"
bed_height = "225mm"
view = ViewPort(
bed_width,
bed_height,
user_scale_x=1.2,
user_scale_y=1.0,
native_scale_x=UNITS_PER_MIL,
native_scale_y=UNITS_PER_MIL,
origin_x=0,
origin_y=0,
)
x, y = view.scene_to_device_position(0, 0)
self.assertGreaterEqual(x, -1)
self.assertGreaterEqual(1, x)
self.assertGreaterEqual(y, -1)
self.assertGreaterEqual(1, y)
x, y = view.scene_to_device_position(0, 0)
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, -10)
self.assertGreaterEqual(10, y)
x, y = view.device_to_scene_position(0, Length(bed_height).mil)
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, float(Length(bed_height)) - 10)
self.assertGreaterEqual(float(Length(bed_height)) + 10, y)
x, y = view.device_to_scene_position(
Length(bed_width).mil, Length(bed_height).mil
)
self.assertGreaterEqual(x, float(Length(bed_width)) * 1.2 - 10)
self.assertGreaterEqual(float(Length(bed_width)) * 1.2 + 10, x)
self.assertGreaterEqual(y, float(Length(bed_height)) - 10)
self.assertGreaterEqual(float(Length(bed_height)) + 10, y)
def test_viewport_lihuiyu_swap_xy(self):
"""
Test Lihuiyu-esque viewport. User-scaled to mm
:return:
"""
bed_width = "330mm"
bed_height = "225mm"
view = ViewPort(
bed_width,
bed_height,
user_scale_x=1.2,
user_scale_y=1.0,
native_scale_x=UNITS_PER_MIL,
native_scale_y=UNITS_PER_MIL,
origin_x=0,
origin_y=0,
swap_xy=True,
)
x, y = view.scene_to_device_position(0, 0)
self.assertGreaterEqual(x, -1)
self.assertGreaterEqual(1, x)
self.assertGreaterEqual(y, -1)
self.assertGreaterEqual(1, y)
x, y = view.scene_to_device_position(0, 0)
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, -10)
self.assertGreaterEqual(10, y)
x, y = view.device_to_scene_position(0, Length(bed_height).mil)
self.assertGreaterEqual(x, float(Length(bed_height)) - 10)
self.assertGreaterEqual(float(Length(bed_height)) + 10, x)
self.assertGreaterEqual(y, -10)
self.assertGreaterEqual(10, y)
x, y = view.device_to_scene_position(
Length(bed_width).mil, Length(bed_height).mil
)
self.assertGreaterEqual(x, float(Length(bed_height)) - 10)
self.assertGreaterEqual(float(Length(bed_height)) + 10, x)
self.assertGreaterEqual(y, float(Length(bed_width)) * 1.2 - 10)
self.assertGreaterEqual(float(Length(bed_width)) * 1.2 + 10, y)
def test_viewport_grbl(self):
"""
Test GRBL-esque viewport.
:return:
"""
bed_size = "225mm"
view = ViewPort(
bed_size,
bed_size,
native_scale_x=UNITS_PER_MIL,
native_scale_y=UNITS_PER_MIL,
origin_x=0,
origin_y=1,
flip_y=True,
)
x, y = view.scene_to_device_position(0, 0)
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, Length(bed_size).mil - 10)
self.assertGreaterEqual(Length(bed_size).mil + 10, y)
x, y = view.scene_to_device_position(0, float(Length(bed_size)))
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, -10)
self.assertGreaterEqual(10, y)
x, y = view.device_to_scene_position(0, Length(bed_size).mil)
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, -10)
self.assertGreaterEqual(10, y)
def test_viewport_grbl_user_scale(self):
"""
Test GRBL-esque viewport. User-scaled to mm
:return:
"""
bed_size = "225mm"
view = ViewPort(
bed_size,
bed_size,
user_scale_x=1.0 / Length("1mil").mm,
user_scale_y=1.0 / Length("1mil").mm,
native_scale_x=UNITS_PER_MIL,
native_scale_y=UNITS_PER_MIL,
origin_x=0,
origin_y=1,
flip_y=True,
)
x, y = view.scene_to_device_position(0, 0)
self.assertGreaterEqual(x, -1)
self.assertGreaterEqual(1, x)
self.assertGreaterEqual(y, Length(bed_size).mm - 1)
self.assertGreaterEqual(Length(bed_size).mm + 1, y)
x, y = view.scene_to_device_position(0, float(Length(bed_size)))
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, -10)
self.assertGreaterEqual(10, y)
x, y = view.device_to_scene_position(0, Length(bed_size).mm)
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, -10)
self.assertGreaterEqual(10, y)
def test_viewport_balor(self):
"""
Test Balor-esque viewport.
Center x, y.
:return:
"""
lens_size = "110mm"
unit_size = float(Length(lens_size))
galvo_range = 0xFFFF
units_per_galvo = unit_size / galvo_range
view = ViewPort(
lens_size,
lens_size,
native_scale_x=units_per_galvo,
native_scale_y=units_per_galvo,
origin_x=0.5,
origin_y=0.5,
)
x, y = view.device_to_scene_position(0x8000, 0x8000)
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, -10)
self.assertGreaterEqual(10, y)
def test_viewport_balor_flip_y(self):
"""
Test Balor-esque viewport.
Center x, y. flip_y
:return:
"""
lens_size = "110mm"
unit_size = float(Length(lens_size))
galvo_range = 0xFFFF
units_per_galvo = unit_size / galvo_range
view = ViewPort(
lens_size,
lens_size,
native_scale_x=units_per_galvo,
native_scale_y=units_per_galvo,
origin_x=0.5,
origin_y=0.5,
flip_y=True,
)
x, y = view.device_to_scene_position(0x8000, 0x8000)
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, -10)
self.assertGreaterEqual(10, y)
def test_viewport_balor_flip_x(self):
"""
Test Balor-esque viewport.
Center x, y. flip_x
:return:
"""
lens_size = "110mm"
unit_size = float(Length(lens_size))
galvo_range = 0xFFFF
units_per_galvo = unit_size / galvo_range
view = ViewPort(
lens_size,
lens_size,
native_scale_x=units_per_galvo,
native_scale_y=units_per_galvo,
origin_x=0.5,
origin_y=0.5,
flip_x=True,
)
x, y = view.device_to_scene_position(0x8000, 0x8000)
self.assertGreaterEqual(x, -10)
self.assertGreaterEqual(10, x)
self.assertGreaterEqual(y, -10)
self.assertGreaterEqual(10, y)
```
#### File: meerk40t/test/test_lifecycle.py
```python
import unittest
from meerk40t.kernel import Kernel
state = 0
class TestLifeCycle(unittest.TestCase):
def test_kernel_lifecycle(self):
def lifecycle_test(obj=None, lifecycle=None):
global state
if lifecycle == "preregister":
self.assertEquals(state, 0)
state = 1
if lifecycle == "register":
self.assertEquals(state, 1)
state = 2
if lifecycle == "configure":
self.assertEquals(state, 2)
state = 3
if lifecycle == "preboot":
self.assertEquals(state, 3)
state = 4
if lifecycle == "boot":
self.assertEquals(state, 4)
state = 5
if lifecycle == "postboot":
self.assertEquals(state, 5)
state = 6
if lifecycle == "prestart":
self.assertEquals(state, 6)
state = 7
if lifecycle == "start":
self.assertEquals(state, 7)
state = 8
if lifecycle == "poststart":
self.assertEquals(state, 8)
state = 9
if lifecycle == "ready":
self.assertEquals(state, 9)
state = 10
if lifecycle == "finished":
self.assertEquals(state, 10)
state = 11
if lifecycle == "premain":
self.assertEquals(state, 11)
state = 12
if lifecycle == "mainloop":
self.assertEquals(state, 12)
state = 13
if lifecycle == "mainloop":
self.assertEquals(state, 13)
state = 14
# Mainloop here merely quits.
kernel.console("quit\n")
if lifecycle == "preshutdown":
self.assertEquals(state, 14)
state = 15
if lifecycle == "shutdown":
self.assertEquals(state, 15)
state = 16
print(lifecycle)
kernel = Kernel("MeerK40t", "0.0.0-testing", "MeerK40t")
kernel.add_plugin(lifecycle_test)
kernel()
``` |
{
"source": "joern19/randomStuff",
"score": 4
} |
#### File: pythonScripts/tic-tac-toe/logic.py
```python
class logicClass:
game = [[None,None,None],[None,None,None],[None,None,None]]
#1 -> X
#2 -> O
def checkwin(self, player, x, y):
if (self.game[0][y] == self.game[1][y] == self.game[2][y]):
return True
if self.game[x][0] == self.game[x][1] == self.game[x][2]:
return True
if x == y and self.game[0][0] == self.game[1][1] == self.game [2][2]:
return True
if x + y == 2 and self.game[0][2] == self.game[1][1] == self.game [2][0]:
return True
return False
def place(self, h, v, player):
if self.game[h][v] == None:
self.game[h][v] = player
if self.checkwin(player, h, v):
return None
return True
else:
return False
``` |
{
"source": "joernheissler/ab-decrypt",
"score": 3
} |
#### File: ab-decrypt/ab_decrypt/cli.py
```python
from contextlib import ExitStack
from getpass import getpass
from os import environ
from sys import argv, stderr, stdin, stdout
from .decryptor import decrypt_android_backup
def get_password() -> bytes:
"""
Get user password.
Returns:
UTF-8 encoded password.
"""
pwd = environ.get("AB_DECRYPT_PASSWORD")
if pwd is None:
pwd = getpass("Password: ")
return pwd.encode()
def main():
"""
Command Line Interface
"""
if len(argv) > 3 or len(argv) == 2 and argv[1] in {"-h", "--help"}:
print(f"Usage: {argv[0]} [- | infile] [- | outfile]", file=stderr)
exit(2)
with ExitStack() as stack:
if len(argv) > 1 and argv[1] != "-":
in_stream = stack.enter_context(open(argv[1], "rb"))
else:
in_stream = stdin.buffer
if len(argv) > 2 and argv[2] != "-":
out_stream = stack.enter_context(open(argv[2], "wb"))
else:
out_stream = stdout.buffer
decrypt_android_backup(in_stream, out_stream, get_password)
``` |
{
"source": "joernheissler/aioyhsm",
"score": 2
} |
#### File: src/aioyhsm/client.py
```python
from __future__ import annotations
from contextlib import AsyncExitStack, asynccontextmanager
from os import urandom
from struct import pack
from typing import AnyStr, ByteString, Type, TypeVar
import httpx
from . import constants, crypto, messages
from .error import YubiHsmError
ResponseType = TypeVar("ResponseType", bound="messages.Response")
def encode_label(label: AnyStr) -> bytes:
tmp = label.encode() if isinstance(label, str) else bytes(label)
if len(tmp) > 40:
raise ValueError
return tmp.ljust(40, b"\0")
def encode_domains(domains: Set[int]) -> int:
return sum(1 << domain for domain in domains)
def encode_capabilities(capabilities: Set[constants.Capability]) -> int:
return sum(1 << capa for capa in capabilities)
class YubiHsmClient:
def __init__(self, url: str) -> None:
self.url = url
async def __aenter__(self) -> YubiHsmClient:
self._stack = await AsyncExitStack().__aenter__()
self._http = await self._stack.enter_async_context(httpx.AsyncClient())
return self
async def __aexit__(self, exc_type, exc, tb):
return await self._stack.__aexit__(exc_type, exc, tb)
@asynccontextmanager
async def open_session_password(self, key_id: int, password: AnyStr) -> YubiHsmSession:
async with self.open_session_symmetric(
key_id, *crypto.derive_password(password)
) as session:
yield session
@asynccontextmanager
async def open_session_symmetric(
self, key_id: int, enc_key: ByteString, mac_key: ByteString
) -> YubiHsmSession:
host_challenge = urandom(8)
create_resp = await self.create_session(key_id, host_challenge)
sid = create_resp.session_id
skey = crypto.SessionKeys(enc_key, mac_key, host_challenge, create_resp.card_challenge)
if not crypto.bytes_eq(skey.card_cryptogram, create_resp.card_cryptogram):
raise Exception("Authentication error")
auth_req = messages.AuthenticateSessionRequest(sid, skey.host_cryptogram, bytes(8))
mac = skey.calc_host_mac(auth_req.encode()[:-8])
auth_resp = await self.authenticate_session(sid, skey.host_cryptogram, mac)
session = YubiHsmSession(self, sid, skey)
yield session
await session.close_session()
async def query(self, request: messages.Request, rtype: Type[ResponseType]) -> ResponseType:
resp = await self._http.post(
self.url + "/connector/api", content=request.encode(), allow_redirects=False
)
resp.raise_for_status()
response = messages.Message.decode(resp.content)
if not isinstance(response, rtype):
if isinstance(response, messages.ErrorMessage):
raise YubiHsmError(response.code)
else:
raise TypeError(type(response))
return response
async def authenticate_session(
self, session_id: int, host_cryptogram: ByteString, mac: ByteString
) -> messages.AuthenticateSessionResponse:
msg = messages.AuthenticateSessionRequest(session_id, host_cryptogram, mac)
return await self.query(msg, messages.AuthenticateSessionResponse)
async def create_session(
self, key_set_id: int, host_challenge: ByteString
) -> messages.CreateSessionResponse:
msg = messages.CreateSessionRequest(key_set_id, host_challenge)
return await self.query(msg, messages.CreateSessionResponse)
async def echo(self, data: ByteString) -> messages.EchoResponse:
msg = messages.EchoRequest(data)
return await self.query(msg, messages.EchoResponse)
async def session_message(
self, session_id: int, inner: ByteString, mac: ByteString
) -> messages.SessionMessageResponse:
msg = messages.SessionMessageRequest(session_id, inner, mac)
return await self.query(msg, messages.SessionMessageResponse)
class YubiHsmSession:
""""""
client: YubiHsmClient
session_id: int
_session_keys: crypto.SessionKeys
def __init__(
self, client: YubiHsmClient, session_id: int, session_keys: crypto.SessionKeys
) -> None:
self.client = client
self.session_id = session_id
self._session_keys = session_keys
async def query(self, request: messages.Request, rtype: Type[ResponseType]) -> ResponseType:
iv, enc_msg = self._session_keys.encrypt(request.encode())
tmp = messages.SessionMessageRequest(self.session_id, enc_msg, bytes(8))
req_mac = self._session_keys.calc_host_mac(tmp.encode()[:-8])
enc_rsp = await self.client.session_message(self.session_id, enc_msg, req_mac)
rsp_mac = self._session_keys.calc_card_mac(enc_rsp.encode()[:-8])
if not crypto.bytes_eq(rsp_mac, enc_rsp.mac):
raise Exception
if self.session_id != enc_rsp.session_id:
raise Exception
raw_response = self._session_keys.decrypt(iv, enc_rsp.inner)
response = messages.Message.decode(raw_response)
if not isinstance(response, rtype):
if isinstance(response, messages.ErrorMessage):
raise YubiHsmError(response.code)
else:
raise TypeError(type(response))
return response
async def blink_device(self, seconds: int) -> messages.BlinkDeviceResponse:
msg = messages.BlinkDeviceRequest(seconds)
return await self.query(msg, messages.BlinkDeviceResponse)
async def close_session(self) -> messages.CloseSessionResponse:
msg = messages.CloseSessionRequest()
return await self.query(msg, messages.CloseSessionResponse)
async def echo(self, data: ByteString) -> messages.EchoResponse:
msg = messages.EchoRequest(data)
return await self.query(msg, messages.EchoResponse)
async def generate_asymmetric_key(
self,
object_id: int,
label: AnyString,
domains: Set[int],
capabilities: Set[constants.Capability],
algorithm: constants.Algorithm,
) -> messages.GenerateAsymmetricKeyResponse:
msg = messages.GenerateAsymmetricKeyRequest(
object_id,
encode_label(label),
encode_domains(domains),
encode_capabilities(capabilities),
algorithm,
)
return await self.query(msg, messages.GenerateAsymmetricKeyResponse)
async def get_log_entries(self) -> messages.GetLogEntriesResponse:
msg = messages.GetLogEntriesRequest()
return await self.query(msg, messages.GetLogEntriesResponse)
async def get_pseudo_random(self, count: int) -> messages.GetPseudoRandomResponse:
msg = messages.GetPseudoRandomRequest(count)
return await self.query(msg, messages.GetPseudoRandomResponse)
async def get_public_key(self, object_id: int) -> messages.GetPublicKeyResponse:
msg = messages.GetPublicKeyRequest(object_id)
return await self.query(msg, messages.GetPublicKeyResponse)
async def get_storage_info(self) -> messages.GetStorageInfoResponse:
msg = messages.GetStorageInfoRequest()
return await self.query(msg, messages.GetStorageInfoResponse)
async def set_log_index(self, log_index: int) -> messages.SetLogIndexResponse:
msg = messages.SetLogIndexRequest(log_index)
return await self.query(msg, messages.SetLogIndexResponse)
async def sign_pkcs1(
self, object_id: int, digest: ByteString
) -> messages.SignPkcs1Response:
msg = messages.SignPkcs1Request(object_id, digest)
return await self.query(msg, messages.SignPkcs1Response)
async def sign_pss(
self,
object_id: int,
hash_algorithm: constants.Algorithm,
hash_length: int,
digest: ByteString,
) -> messages.SignPssResponse:
msg = messages.SignPssRequest(object_id, hash_algorithm, hash_length, digest)
return await self.query(msg, messages.SignPssResponse)
async def put_asymmetric_key(
self,
object_id: int,
label: AnyString,
domains: Set[int],
capabilities: Set[constants.Capability],
algorithm: constants.Algorithm,
parameter: ByteString,
) -> messages.PutAsymmetricKeyResponse:
msg = messages.PutAsymmetricKeyRequest(
object_id,
encode_label(label),
encode_domains(domains),
encode_capabilities(capabilities),
algorithm,
parameter,
)
return await self.query(msg, messages.PutAsymmetricKeyResponse)
async def put_rsa_key(
self,
object_id: int,
label: AnyString,
domains: Set[int],
prime0: int,
prime1: int,
exportable: bool,
*,
sign_pkcs: bool = False,
sign_pss: bool = False,
sign_ssh_cert: bool = False,
decrypt_pkcs: bool = False,
decrypt_oaep: bool = False,
) -> messages.PutAsymmetricKeyResponse:
capabilities = {
capa
for flag, capa in [
(exportable, constants.Capability.ExportableUnderWrap),
(sign_pkcs, constants.Capability.SignPkcs),
(sign_pss, constants.Capability.SignPss),
(sign_ssh_cert, constants.Capability.SignSshCertificate),
(decrypt_pkcs, constants.Capability.DecryptPkcs),
(decrypt_oaep, constants.Capability.DecryptOaep),
]
if flag
}
bits = prime0.bit_length()
if bits != prime1.bit_length():
raise ValueError(f"Primes have different lengths: {bits} != {prime1.bit_length()}")
try:
algorithm = {
1024: constants.Algorithm.RSA_2048,
1536: constants.Algorithm.RSA_3072,
2048: constants.Algorithm.RSA_4096,
}[bits]
except KeyError as ex:
raise ValueError(f"Primes must be 1024, 1536 or 2048 bit, not {bits}") from ex
param = prime0.to_bytes(bits // 8, "big") + prime1.to_bytes(bits // 8, "big")
return await self.put_asymmetric_key(
object_id, label, domains, capabilities, algorithm, param
)
async def put_authentication_key(
self,
object_id: int,
label: AnyString,
domains: Set[int],
capabilities: Set[constants.Capability],
algorithm: constants.Algorithm,
delegated_capabilities: Set[constants.Capability],
encryption_key: ByteString,
mac_key: ByteString,
) -> messages.PutAuthenticationKeyResponse:
msg = messages.PutAuthenticationKeyRequest(
object_id,
encode_label(label),
encode_domains(domains),
encode_capabilities(capabilities),
algorithm,
encode_capabilities(delegated_capabilities),
encryption_key,
mac_key,
)
return await self.query(msg, messages.PutAuthenticationKeyResponse)
async def put_authentication_key_password(
self,
object_id: int,
label: AnyString,
domains: Set[int],
capabilities: Set[constants.Capability],
algorithm: constants.Algorithm,
delegated_capabilities: Set[constants.Capability],
password: <PASSWORD>,
) -> messages.PutAuthenticationKeyResponse:
return await self.put_authentication_key(
object_id,
label,
domains,
capabilities,
algorithm,
delegated_capabilities,
*crypto.derive_password(password),
)
```
#### File: src/aioyhsm/error.py
```python
from __future__ import annotations
from .constants import Error
class YubiHsmError(Exception):
def __init__(self, code: Error) -> None:
self.code = code
``` |
{
"source": "joernheissler/ansible-eval",
"score": 3
} |
#### File: ansible-eval/filter_plugins/eval.py
```python
eval_compiled = {}
def eval_filter(code, **kwargs):
try:
c = eval_compiled[code]
except KeyError:
c = compile(code, '<eval_filter>', 'eval')
eval_compiled[code] = c
result = eval(c, {}, kwargs)
return result
class FilterModule(object):
def filters(self):
return {
'eval': eval_filter,
}
``` |
{
"source": "joernheissler/asn1crypto",
"score": 3
} |
#### File: asn1crypto/asn1crypto/parser.py
```python
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
from ._types import byte_cls, chr_cls, type_name
from .util import int_from_bytes, int_to_bytes
_PY2 = sys.version_info <= (3,)
_INSUFFICIENT_DATA_MESSAGE = 'Insufficient data - %s bytes requested but only %s available'
def emit(class_, method, tag, contents):
"""
Constructs a byte string of an ASN.1 DER-encoded value
This is typically not useful. Instead, use one of the standard classes from
asn1crypto.core, or construct a new class with specific fields, and call the
.dump() method.
:param class_:
An integer ASN.1 class value: 0 (universal), 1 (application),
2 (context), 3 (private)
:param method:
An integer ASN.1 method value: 0 (primitive), 1 (constructed)
:param tag:
An integer ASN.1 tag value
:param contents:
A byte string of the encoded byte contents
:return:
A byte string of the ASN.1 DER value (header and contents)
"""
if not isinstance(class_, int):
raise TypeError('class_ must be an integer, not %s' % type_name(class_))
if class_ < 0 or class_ > 3:
raise ValueError('class_ must be one of 0, 1, 2 or 3, not %s' % class_)
if not isinstance(method, int):
raise TypeError('method must be an integer, not %s' % type_name(method))
if method < 0 or method > 1:
raise ValueError('method must be 0 or 1, not %s' % method)
if not isinstance(tag, int):
raise TypeError('tag must be an integer, not %s' % type_name(tag))
if tag < 0:
raise ValueError('tag must be greater than zero, not %s' % tag)
if not isinstance(contents, byte_cls):
raise TypeError('contents must be a byte string, not %s' % type_name(contents))
return _dump_header(class_, method, tag, contents) + contents
def parse(contents, strict=False):
"""
Parses a byte string of ASN.1 BER/DER-encoded data.
This is typically not useful. Instead, use one of the standard classes from
asn1crypto.core, or construct a new class with specific fields, and call the
.load() class method.
:param contents:
A byte string of BER/DER-encoded data
:param strict:
A boolean indicating if trailing data should be forbidden - if so, a
ValueError will be raised when trailing data exists
:raises:
ValueError - when the contents do not contain an ASN.1 header or are truncated in some way
TypeError - when contents is not a byte string
:return:
A 6-element tuple:
- 0: integer class (0 to 3)
- 1: integer method
- 2: integer tag
- 3: byte string header
- 4: byte string content
- 5: byte string trailer
"""
if not isinstance(contents, byte_cls):
raise TypeError('contents must be a byte string, not %s' % type_name(contents))
contents_len = len(contents)
info, consumed = _parse(contents, contents_len)
if strict and consumed != contents_len:
raise ValueError('Extra data - %d bytes of trailing data were provided' % (contents_len - consumed))
return info
def peek(contents):
"""
Parses a byte string of ASN.1 BER/DER-encoded data to find the length
This is typically used to look into an encoded value to see how long the
next chunk of ASN.1-encoded data is. Primarily it is useful when a
value is a concatenation of multiple values.
:param contents:
A byte string of BER/DER-encoded data
:raises:
ValueError - when the contents do not contain an ASN.1 header or are truncated in some way
TypeError - when contents is not a byte string
:return:
An integer with the number of bytes occupied by the ASN.1 value
"""
if not isinstance(contents, byte_cls):
raise TypeError('contents must be a byte string, not %s' % type_name(contents))
info, consumed = _parse(contents, len(contents))
return consumed
def _parse(encoded_data, data_len, pointer=0, lengths_only=False):
"""
Parses a byte string into component parts
:param encoded_data:
A byte string that contains BER-encoded data
:param data_len:
The integer length of the encoded data
:param pointer:
The index in the byte string to parse from
:param lengths_only:
A boolean to cause the call to return a 2-element tuple of the integer
number of bytes in the header and the integer number of bytes in the
contents. Internal use only.
:return:
A 2-element tuple:
- 0: A tuple of (class_, method, tag, header, content, trailer)
- 1: An integer indicating how many bytes were consumed
"""
if data_len < pointer + 2:
raise ValueError(_INSUFFICIENT_DATA_MESSAGE % (2, data_len - pointer))
start = pointer
first_octet = ord(encoded_data[pointer]) if _PY2 else encoded_data[pointer]
pointer += 1
tag = first_octet & 31
# Base 128 length using 8th bit as continuation indicator
if tag == 31:
tag = 0
while True:
num = ord(encoded_data[pointer]) if _PY2 else encoded_data[pointer]
pointer += 1
tag *= 128
tag += num & 127
if num >> 7 == 0:
break
length_octet = ord(encoded_data[pointer]) if _PY2 else encoded_data[pointer]
pointer += 1
if length_octet >> 7 == 0:
if lengths_only:
return (pointer, pointer + (length_octet & 127))
contents_end = pointer + (length_octet & 127)
else:
length_octets = length_octet & 127
if length_octets:
pointer += length_octets
contents_end = pointer + int_from_bytes(encoded_data[pointer - length_octets:pointer], signed=False)
if lengths_only:
return (pointer, contents_end)
else:
# To properly parse indefinite length values, we need to scan forward
# parsing headers until we find a value with a length of zero. If we
# just scanned looking for \x00\x00, nested indefinite length values
# would not work.
contents_end = pointer
# Unfortunately we need to understand the contents of the data to
# properly scan forward, which bleeds some representation info into
# the parser. This condition handles the unused bits byte in
# constructed bit strings.
if tag == 3:
contents_end += 1
while contents_end < data_len:
sub_header_end, contents_end = _parse(encoded_data, data_len, contents_end, lengths_only=True)
if contents_end == sub_header_end and encoded_data[contents_end - 2:contents_end] == b'\x00\x00':
break
if lengths_only:
return (pointer, contents_end)
if contents_end > data_len:
raise ValueError(_INSUFFICIENT_DATA_MESSAGE % (contents_end, data_len))
return (
(
first_octet >> 6,
(first_octet >> 5) & 1,
tag,
encoded_data[start:pointer],
encoded_data[pointer:contents_end - 2],
b'\x00\x00'
),
contents_end
)
if contents_end > data_len:
raise ValueError(_INSUFFICIENT_DATA_MESSAGE % (contents_end, data_len))
return (
(
first_octet >> 6,
(first_octet >> 5) & 1,
tag,
encoded_data[start:pointer],
encoded_data[pointer:contents_end],
b''
),
contents_end
)
def _dump_header(class_, method, tag, contents):
"""
Constructs the header bytes for an ASN.1 object
:param class_:
An integer ASN.1 class value: 0 (universal), 1 (application),
2 (context), 3 (private)
:param method:
An integer ASN.1 method value: 0 (primitive), 1 (constructed)
:param tag:
An integer ASN.1 tag value
:param contents:
A byte string of the encoded byte contents
:return:
A byte string of the ASN.1 DER header
"""
header = b''
id_num = 0
id_num |= class_ << 6
id_num |= method << 5
if tag >= 31:
cont_bit = 0
while tag > 0:
header = chr_cls(cont_bit | (tag & 0x7f)) + header
if not cont_bit:
cont_bit = 0x80
tag = tag >> 7
header = chr_cls(id_num | 31) + header
else:
header += chr_cls(id_num | tag)
length = len(contents)
if length <= 127:
header += chr_cls(length)
else:
length_bytes = int_to_bytes(length)
header += chr_cls(0x80 | len(length_bytes))
header += length_bytes
return header
``` |
{
"source": "joernheissler/chipcard",
"score": 3
} |
#### File: chipcard/pcsc/exception.py
```python
from __future__ import annotations
from typing import Any, Dict
class ScardException(Exception):
_subs: Dict[int, ScardException] = {}
def __init_subclass__(cls, code: int = None, **kwargs):
super().__init_subclass__(**kwargs)
if code:
cls._subs[code] = cls
def __init__(self, msg):
self.msg = msg
@classmethod
def create(cls, msg: Any, code: int) -> ScardException:
try:
return cls._subs[code](msg)
except KeyError:
return ScardExceptionUnknown(msg, code)
class ScardExceptionUnknown(ScardException):
def __init__(self, msg: Any, code: int):
super().__init__(msg)
self.code = code
def __str__(self):
return f"code=0x{self.code:08X}, msg={self.msg!r}"
# https://docs.microsoft.com/en-us/previous-versions/aa924526(v=msdn.10)
class ScardFailureInternalError(ScardException, code=0x80100001):
"""
An internal consistency check failed.
"""
class ScardErrorCancelled(ScardException, code=0x80100002):
"""
The action was cancelled by a SCardCancel request.
"""
class ScardErrorInvalidHandle(ScardException, code=0x80100003):
"""
The supplied handle was invalid.
"""
class ScardErrorInvalidParameter(ScardException, code=0x80100004):
"""
One or more of the supplied parameters could not be properly interpreted.
"""
class ScardErrorInvalidTarget(ScardException, code=0x80100005):
"""
Registry startup information is missing or invalid.
"""
class ScardErrorNoMemory(ScardException, code=0x80100006):
"""
Not enough memory available to complete this command.
"""
class ScardFailureWaitedTooLong(ScardException, code=0x80100007):
"""
An internal consistency timer has expired.
"""
class ScardErrorInsufficientBuffer(ScardException, code=0x80100008):
"""
The data buffer to receive returned data is too small for the returned data.
"""
class ScardErrorUnknownReader(ScardException, code=0x80100009):
"""
The specified reader name is not recognized.
"""
class ScardErrorTimeout(ScardException, code=0x8010000A):
"""
The user-specified timeout value has expired.
"""
class ScardErrorSharingViolation(ScardException, code=0x8010000B):
"""
The smart card cannot be accessed because of other connections outstanding.
"""
class ScardErrorNoSmartcard(ScardException, code=0x8010000C):
"""
The operation requires a smart card, but no smart card is currently in the device.
"""
class ScardErrorUnknownCard(ScardException, code=0x8010000D):
"""
The specified smart card name is not recognized.
"""
class ScardErrorCantDispose(ScardException, code=0x8010000E):
"""
The system could not dispose of the media in the requested manner.
"""
class ScardErrorProtoMismatch(ScardException, code=0x8010000F):
"""
The requested protocols are incompatible with the protocol currently in use with the smart card.
"""
class ScardErrorNotReady(ScardException, code=0x80100010):
"""
The reader or smart card is not ready to accept commands.
"""
class ScardErrorInvalidValue(ScardException, code=0x80100011):
"""
One or more of the supplied parameters values could not be properly interpreted.
"""
class ScardErrorSystemCancelled(ScardException, code=0x80100012):
"""
The action was cancelled by the system, presumably to log off or shut down.
"""
class ScardFailureCommError(ScardException, code=0x80100013):
"""
An internal communications error has been detected.
"""
class ScardFailureUnknownError(ScardException, code=0x80100014):
"""
An internal error has been detected, but the source is unknown.
"""
class ScardErrorInvalidAtr(ScardException, code=0x80100015):
"""
An ATR obtained from the registry is not a valid ATR string.
"""
class ScardErrorNotTransacted(ScardException, code=0x80100016):
"""
An attempt was made to end a non-existent transaction.
"""
class ScardErrorReaderUnavailable(ScardException, code=0x80100017):
"""
The specified reader is not currently available for use.
"""
class ScardPanicShutdown(ScardException, code=0x80100018):
"""
The operation has been aborted to allow the server application to exit.
"""
class ScardErrorPciTooSmall(ScardException, code=0x80100019):
"""
The PCI Receive buffer was too small.
"""
class ScardErrorReaderUnsupported(ScardException, code=0x8010001A):
"""
The reader driver does not meet minimal requirements for support.
"""
class ScardErrorDuplicateReader(ScardException, code=0x8010001B):
"""
The reader driver did not produce a unique reader name.
"""
class ScardErrorCardUnsupported(ScardException, code=0x8010001C):
"""
The smart card does not meet minimal requirements for support.
"""
class ScardErrorNoService(ScardException, code=0x8010001D):
"""
The Smart Card Resource Manager is not running.
"""
class ScardErrorServiceStopped(ScardException, code=0x8010001E):
"""
The Smart Card Resource Manager has shut down.
"""
class ScardErrorUnexpected(ScardException, code=0x8010001F):
"""
An unexpected card error has occurred.
"""
class ScardErrorIccInstallation(ScardException, code=0x80100020):
"""
No primary provider can be found for the smart card.
"""
class ScardErrorIccCreateorder(ScardException, code=0x80100021):
"""
The requested order of object creation is not supported.
"""
class ScardErrorUnsupportedFeature(ScardException, code=0x80100022):
"""
This smart card does not support the requested feature.
"""
class ScardErrorDirNotFound(ScardException, code=0x80100023):
"""
The identified directory does not exist in the smart card.
"""
class ScardErrorFileNotFound(ScardException, code=0x80100024):
"""
The identified file does not exist in the smart card.
"""
class ScardErrorNoDir(ScardException, code=0x80100025):
"""
The supplied path does not represent a smart card directory.
"""
class ScardErrorNoFile(ScardException, code=0x80100026):
"""
The supplied path does not represent a smart card file.
"""
class ScardErrorNoAccess(ScardException, code=0x80100027):
"""
Access is denied to this file.
"""
class ScardErrorWriteTooMany(ScardException, code=0x80100028):
"""
The smart card does not have enough memory to store the information.
"""
class ScardErrorBadSeek(ScardException, code=0x80100029):
"""
There was an error trying to set the smart card file object pointer.
"""
class ScardErrorInvalidChv(ScardException, code=0x8010002A):
"""
The supplied PIN is incorrect.
"""
class ScardErrorUnknownResMng(ScardException, code=0x8010002B):
"""
An unrecognized error code was returned from a layered component.
"""
class ScardErrorNoSuchCertificate(ScardException, code=0x8010002C):
"""
The requested certificate does not exist.
"""
class ScardErrorCertificateUnavailable(ScardException, code=0x8010002D):
"""
The requested certificate could not be obtained.
"""
class ScardErrorNoReadersAvailable(ScardException, code=0x8010002E):
"""
Cannot find a smart card reader.
"""
class ScardErrorCommDataLost(ScardException, code=0x8010002F):
"""
A communications error with the smart card has been detected. Retry the operation.
"""
class ScardErrorNoKeyContainer(ScardException, code=0x80100030):
"""
The requested key container does not exist on the smart card.
"""
class ScardErrorServerTooBusy(ScardException, code=0x80100031):
"""
The Smart Card Resource Manager is too busy to complete this operation.
"""
class ScardWarningUnsupportedCard(ScardException, code=0x80100065):
"""
The reader cannot communicate with the card, due to ATR string configuration conflicts.
"""
class ScardWarningUnresponsiveCard(ScardException, code=0x80100066):
"""
The smart card is not responding to a reset.
"""
class ScardWarningUnpoweredCard(ScardException, code=0x80100067):
"""
Power has been removed from the smart card, so that further communication is not possible.
"""
class ScardWarningResetCard(ScardException, code=0x80100068):
"""
The smart card has been reset, so any shared state information is invalid.
"""
class ScardWarningRemovedCard(ScardException, code=0x80100069):
"""
The smart card has been removed, so further communication is not possible.
"""
class ScardWarningSecurityViolation(ScardException, code=0x8010006A):
"""
Access was denied because of a security violation.
"""
class ScardWarningWrongChv(ScardException, code=0x8010006B):
"""
The card cannot be accessed because the wrong PIN was presented.
"""
class ScardWarningChvBlocked(ScardException, code=0x8010006C):
"""
The card cannot be accessed because the maximum number of PIN entry attempts has been reached.
"""
class ScardWarningEof(ScardException, code=0x8010006D):
"""
The end of the smart card file has been reached.
"""
class ScardWarningCancelledByUser(ScardException, code=0x8010006E):
"""
The action was cancelled by the user.
"""
class ScardWarningCardNotAuthenticated(ScardException, code=0x8010006F):
"""
No PIN was presented to the smart card.
"""
```
#### File: chipcard/pcsc/proto.py
```python
from __future__ import annotations
import struct
from contextlib import asynccontextmanager
from dataclasses import InitVar, astuple, dataclass, field, fields, Field
from typing import ByteString, List, Any, Type, Dict
from . import const
from .exception import ScardException
def _param(typ: Any, fmt: str, doc: str) -> Field:
"""
Used to specify a parameter for a dataclass.
Args:
typ: python type to annotate parameter with
fmt: Format string for struct.pack/.unpack
doc: DocString for this parameter
Returns:
Field for the dataclass parameter.
"""
return field(metadata={"typ": typ, "fmt": fmt, "doc": doc})
def _int32(doc: str) -> Field:
"""
Signed 32 bit integer parameter.
Args:
doc: DocString for this parameter
Returns:
Field for the dataclass parameter.
"""
return _param(int, "i", doc)
def _uint32(doc: str) -> Field:
"""
Unsigned 32 bit integer parameter.
Args:
doc: DocString for this parameter
Returns:
Field for the dataclass parameter.
"""
return _param(int, "I", doc)
def _char_array(size: int, doc: str) -> Field:
"""
Character string, zero padded to next multiple of 4 bytes.
Args:
doc: DocString for this parameter
Returns:
Field for the dataclass parameter.
"""
return _param(bytes, f"{size}s{-size % 4}x", doc)
class ProtocolMeta(type):
"""
Meta Class which creates a proper data class and sets some class variables
"""
def __new__(cls, name, bases, dct, cmd=None):
# Ignore ProtocolMessage base class alone
if cmd is None:
return super().__new__(cls, name, bases, dct)
self = super().__new__(cls, name, bases, dct, cmd=cmd)
# Empty message types inherit annotations from the base class. Detect this and empty the annotations.
if "_subs" in self.__annotations__:
self.__annotations__ = {}
# Create dataclass attributes and annotations
for k, v in self.__annotations__.items():
# annotations are strings.
v = eval(v)
self.__annotations__[k] = v.metadata["typ"]
setattr(self, k, v)
# Expect that every class is initiated with a "byteorder" parameter
self.__annotations__["byteorder"] = InitVar[str]
# Apply dataclass decorator
self = dataclass(self)
# Create struct format string
fmt = "".join(f.metadata["fmt"] for f in fields(self))
# Calculate message size
self._size = struct.calcsize(f"={fmt}")
# Create class attribues
self._fmt = fmt
self._cmd = cmd
return self
class ProtocolMessage(metaclass=ProtocolMeta):
"""
Base class for all protocol messages.
"""
# Used by server implementations to convert messages to the correct type.
_subs: Dict[int, Type[ProtocolMessage]] = {}
# Byte order, one of "<", "=", ">" for little, native or big endian.
_byteorder: str
# Message size class attribute, set by meta class.
_size: int
# Message format class attribute excluding byte order, set by meta class.
_fmt: str
# Message command class attribute, set by meta class.
_cmd: int
def __init_subclass__(cls, cmd: int, **kwargs: Dict[str, Any]) -> None:
"""
Register sub class
"""
if cmd >= 0:
cls._subs[cmd] = cls
super().__init_subclass__(**kwargs)
def __post_init__(self, byteorder: str) -> None:
"""
Process `byteorder` InitVar that was added by meta class.
Args:
byteorder: Byte order, one of "<", "=", ">" for little, native or big endian.
"""
self._byteorder = byteorder
@property
def encoded_req(self) -> bytes:
"""
Encode this message as a PCSC-lite request.
Returns:
Encoded message
"""
return struct.pack(
f"{self._byteorder}II{self._fmt}", self._size, self._cmd, *astuple(self)
)
@property
def encoded_resp(self) -> bytes:
"""
Encode this message as a PCSC-lite response.
Returns:
Encoded message
"""
return struct.pack(f"{self._byteorder}{self._fmt}", *astuple(self))
@classmethod
def decode(cls, buf: ByteString, byteorder: str) -> ProtocolMessage:
"""
Decode a message (excluding length / command)
Args:
buf: message to decode
byteorder: one of "<", "=", ">" for little, native or big endian.
Returns:
Decoded message.
"""
# Convert all fields to their correct type
values = (
fld.type(value)
for value, fld in zip(struct.unpack(byteorder + cls._fmt, buf), fields(cls))
)
return cls(*values, byteorder=byteorder)
class CmdVersion(ProtocolMessage, cmd=const.MsgCommand.VERSION):
"""
Negotiate protocol version. Needs to be sent as first message.
"""
major: _int32("Major protocol version")
minor: _int32("Minor protocol version")
rv: _uint32("Return value")
class ScardEstablishContext(ProtocolMessage, cmd=const.MsgCommand.ESTABLISH_CONTEXT):
"""
Creates an Application Context to the PC/SC Resource Manager.
"""
scope: _param(const.ScardScope, "I", "Scope")
context: _uint32("Context")
rv: _uint32("Return value")
class SCardReleaseContext(ProtocolMessage, cmd=const.MsgCommand.RELEASE_CONTEXT):
"""
Destroys a communication context to the PC/SC Resource Manager.
"""
context: _uint32("Context")
rv: _uint32("Return value")
class SCardCancel(ProtocolMessage, cmd=const.MsgCommand.CANCEL):
"""
Really just the same as CmdStopWaitingReaderStateChange. Don't use.
"""
context: _uint32("Context")
rv: _uint32("Return value")
class SCardConnect(ProtocolMessage, cmd=const.MsgCommand.CONNECT):
"""
Establishes a connection to the reader specified in `reader`.
"""
context: _uint32("Context")
reader: _char_array(const.MAX_READERNAME, "Reader name")
share_mode: _param(const.ScardShare, "I", "Share mode")
preferred_protocols: _param(const.ScardProtocol, "I", "Preferred Protocols")
card: _int32("Card")
active_protocol: _param(const.ScardProtocol, "I", "Active Protocol")
rv: _uint32("Return value")
class SCardReconnect(ProtocolMessage, cmd=const.MsgCommand.RECONNECT):
"""
Reestablishes a connection to a reader that was previously connected to using
SCardConnect().
"""
card: _int32("Card")
share_mode: _param(const.ScardShare, "I", "Share mode")
preferred_protocols: _param(const.ScardProtocol, "I", "Preferred Protocols")
initialization: _uint32("Initialization")
active_protocol: _param(const.ScardProtocol, "I", "Active Protocol")
rv: _uint32("Return value")
class SCardDisconnect(ProtocolMessage, cmd=const.MsgCommand.DISCONNECT):
"""
Terminates a connection made through SCardConnect().
"""
card: _int32("Card")
disposition: _param(const.Disposition, "I", "Disposition")
rv: _uint32("Return value")
class SCardBeginTransaction(ProtocolMessage, cmd=const.MsgCommand.BEGIN_TRANSACTION):
"""
Establishes a temporary exclusive access mode for doing a series of commands in a
transaction.
"""
card: _int32("Card")
rv: _uint32("Return value")
class SCardEndTransaction(ProtocolMessage, cmd=const.MsgCommand.END_TRANSACTION):
"""
Ends a previously begun transaction.
"""
card: _int32("Card")
disposition: _param(const.Disposition, "I", "Disposition")
rv: _uint32("Return value")
class SCardTransmit(ProtocolMessage, cmd=const.MsgCommand.TRANSMIT):
"""
Sends an APDU to the smart card contained in the reader connected to by SCardConnect().
"""
card: _int32("Card")
send_pci_protocol: _uint32("ioSendPciProtocol")
send_pci_length: _uint32("ioSendPciLength")
send_length: _uint32("cbSendLength")
recv_pci_protocol: _uint32("ioRecvPciProtocol")
recv_pci_length: _uint32("ioRecvPciLength")
recv_length: _uint32("pcbRecvLength")
rv: _uint32("Return value")
class SCardControl(ProtocolMessage, cmd=const.MsgCommand.CONTROL):
"""
Sends a command directly to the IFD Handler (reader driver) to be processed by the reader.
"""
card: _int32("Card")
control_code: _uint32("Control Code")
send_length: _uint32("cbSendLength")
recv_length: _uint32("pcbRecvLength")
bytes_returned: _uint32("Bytes Returned")
rv: _uint32("Return value")
class SCardStatus(ProtocolMessage, cmd=const.MsgCommand.STATUS):
"""
Returns the current status of the reader connected to by `card`.
"""
card: _int32("Card")
rv: _uint32("Return value")
class SCardGetAttrib(ProtocolMessage, cmd=const.MsgCommand.GET_ATTRIB):
"""
Get an attribute from the IFD Handler (reader driver).
"""
card: _int32("Card")
attr_id: _uint32("Attribute ID")
attr: _char_array(264, "Attribute")
attr_len: _uint32("Attribute Length")
rv: _uint32("Return value")
class SCardSetAttrib(ProtocolMessage, cmd=const.MsgCommand.SET_ATTRIB):
"""
Set an attribute of the IFD Handler.
"""
card: _int32("Card")
attr_id: _uint32("Attribute ID")
attr: _char_array(264, "Attribute")
attr_len: _uint32("Attribute Length")
rv: _uint32("Return value")
class CmdGetReadersState(ProtocolMessage, cmd=const.MsgCommand.GET_READERS_STATE):
"""
Get current state of readers. Will be responded to with MAX_READERS_CONTEXTS times
MsgReaderState.
"""
class CmdWaitReaderStateChange(ProtocolMessage, cmd=const.MsgCommand.WAIT_READER_STATE_CHANGE):
"""
Get current state of readers and wait for a change in the state.
Will be responded to with MAX_READERS_CONTEXTS times MsgReaderState.
On change or when the client sends CmdStopWaitingReaderStateChange,
WaitReaderStateChange is sent.
"""
class CmdStopWaitingReaderStateChange(
ProtocolMessage, cmd=const.MsgCommand.STOP_WAITING_READER_STATE_CHANGE
):
"""
Cancel a running CmdWaitReaderStateChange.
"""
class WaitReaderStateChange(ProtocolMessage, cmd=-1):
"""
Response to CmdWaitReaderStateChange.
"""
timeout: _uint32("Timeout")
rv: _uint32("Return value")
class MsgReaderState(ProtocolMessage, cmd=-1):
"""
Returned MAX_READERS_CONTEXTS times after sending CmdGetReadersState or
CmdWaitReaderStateChange
"""
# reader_name is zero padded (at least 1 byte)
reader_name: _char_array(const.MAX_READERNAME, "Reader Name")
event_counter: _uint32("Event Counter")
reader_state: _param(const.ReaderState, "I", "Reader State")
reader_sharing: _int32("Reader Sharing") # ScardSharing or integer
card_atr: _char_array(33, "Answer To Reset")
card_atr_length: _uint32("Length of ATR")
card_protocol: _param(const.ScardProtocol, "I", "Protocol")
```
#### File: stubs/asn1crypto/core.py
```python
from __future__ import annotations
from typing import ByteString, Union, Dict, Any as _Any, Optional, TypeVar, Type, Iterator
ASN1_VALUE = TypeVar("ASN1_VALUE", bound="Asn1Value")
class Asn1Value:
def __init__(self, value: Optional[_Any] = None) -> None:
...
def dump(self, force: bool = False) -> bytes:
...
@classmethod
def load(cls: Type[ASN1_VALUE], encoded_data: ByteString, strict: bool = False, **kwargs: Dict[str, _Any]) -> ASN1_VALUE:
...
@property
def parsed(self) -> _Any:
...
@property
def native(self) -> _Any:
...
class ObjectIdentifier(Asn1Value):
@property
def dotted(self) -> str:
...
class Null(Asn1Value):
...
class Sequence(Asn1Value):
def __getitem__(self, key: str) -> Asn1Value:
...
class UTF8String(Asn1Value):
...
class PrintableString(Asn1Value):
...
class Choice(Asn1Value):
@property
def chosen(self) -> Asn1Value:
...
class Integer(Asn1Value):
def __int__(self) -> int:
...
class OctetString(Asn1Value):
def __bytes__(self) -> bytes:
...
class SequenceOf(Asn1Value):
def __iter__(self) -> Iterator[Asn1Value]:
...
class Any(Asn1Value):
...
``` |
{
"source": "joernheissler/cryptokey",
"score": 3
} |
#### File: backend/hashlib/__init__.py
```python
from __future__ import annotations
import hashlib
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import Any, Dict, Optional, Tuple, cast
from ... import hashes
from ...hashes import HashAlgorithmId
class _HashlibHash(metaclass=ABCMeta):
"""
Class used to help the type annotations
"""
@abstractmethod
def update(self, data: bytes) -> None:
""""""
@abstractmethod
def digest(self, length: Optional[int] = None) -> bytes:
""""""
@property
@abstractmethod
def digest_size(self) -> int:
""""""
@abstractmethod
def copy(self) -> _HashlibHash:
""""""
@dataclass(eq=False)
class HashlibHash(hashes.HashFunction):
def __post_init__(self) -> None:
self._hash = self._create_hash()
def update(self, data: bytes) -> HashlibHash:
self._hash.update(data)
return self
def _finalize(self) -> bytes:
f = _algos[self.algorithm.algorithm_id][2]
params: Tuple[Any, ...]
if f is None:
params = ()
elif isinstance(f, int):
params = (f,)
else:
params = (f(self.algorithm.parameters),)
return self._hash.digest(*params)
def _create_hash(self) -> _HashlibHash:
try:
name, init, __, __ = _algos[self.algorithm.algorithm_id]
except KeyError:
raise NotImplementedError
init_params = init(self.algorithm.parameters) if init else {}
try:
f = getattr(hashlib, name)
except AttributeError:
try:
return cast(_HashlibHash, hashlib.new(name, **init_params))
except ValueError:
raise NotImplementedError
else:
return cast(_HashlibHash, f(**init_params))
def copy(self) -> HashlibHash:
new = HashlibHash(self.algorithm)
new._hash = self._hash.copy()
return new
def _blake2_len(params: hashes.HashParameters) -> Dict[str, int]:
return {"digest_size": cast(hashes.Blake2Params, params).length}
def _shake_len(params: hashes.HashParameters) -> int:
return cast(hashes.ShakeLenParams, params).length
_algos = {
HashAlgorithmId.BLAKE2B: ("blake2b", _blake2_len, None, None),
HashAlgorithmId.BLAKE2S: ("blake2s", _blake2_len, None, None),
HashAlgorithmId.MD5: ("md5", None, None, None),
HashAlgorithmId.RIPEMD_160: ("ripemd160", None, None, None),
HashAlgorithmId.SHA1: ("sha1", None, None, None),
HashAlgorithmId.SHA2_224: ("sha224", None, None, None),
HashAlgorithmId.SHA2_256: ("sha256", None, None, None),
HashAlgorithmId.SHA2_384: ("sha384", None, None, None),
HashAlgorithmId.SHA2_512: ("sha512", None, None, None),
HashAlgorithmId.SHA2_512_224: ("sha512-224", None, None, None),
HashAlgorithmId.SHA2_512_256: ("sha512-256", None, None, None),
HashAlgorithmId.SHA3_224: ("sha3_224", None, None, None),
HashAlgorithmId.SHA3_256: ("sha3_256", None, None, None),
HashAlgorithmId.SHA3_384: ("sha3_384", None, None, None),
HashAlgorithmId.SHA3_512: ("sha3_512", None, None, None),
HashAlgorithmId.SHAKE_128: ("shake_128", None, 16, None),
HashAlgorithmId.SHAKE_256: ("shake_256", None, 32, None),
HashAlgorithmId.SHAKE_128_LEN: ("shake_128", None, _shake_len, _shake_len),
HashAlgorithmId.SHAKE_256_LEN: ("shake_256", None, _shake_len, _shake_len),
HashAlgorithmId._TEST_DUMMY: ("test_dummy", None, None, None),
}
def blake2b(data: bytes, length: int) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.blake2b(length), data)
def blake2s(data: bytes, length: int) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.blake2s(length), data)
def md5(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.md5(), data)
def ripemd_160(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.ripemd_160(), data)
def sha1(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha1(), data)
def sha2_224(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha2_224(), data)
def sha2_256(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha2_256(), data)
def sha2_384(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha2_384(), data)
def sha2_512(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha2_512(), data)
def sha2_512_224(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha2_512_224(), data)
def sha2_512_256(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha2_512_256(), data)
def sha3_224(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha3_224(), data)
def sha3_256(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha3_256(), data)
def sha3_384(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha3_384(), data)
def sha3_512(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha3_512(), data)
def shake_128(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.shake_128(), data)
def shake_128_len(data: bytes, length: int) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.shake_128_len(length), data)
def shake_256(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.shake_256(), data)
def shake_256_len(data: bytes, length: int) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.shake_256_len(length), data)
```
#### File: backend/textbook/ecc.py
```python
from __future__ import annotations
from dataclasses import InitVar, dataclass, field
from typing import Optional, Union
from ...public import ecc
from .math import invert
# Notation follows https://tools.ietf.org/html/rfc6979
@dataclass
class Curve:
"""
$y^2 = x^3 + ax + b (mod p)
"""
curve_id: ecc.CurveId
# modulus on which curve calculations are carried out
p: int
# first coefficient of curve polynomial
a: int
# second coefficient of curve polynomial
b: int
# curve order
q: int
# Generator x coordinate
x: InitVar[int]
# Generator y coordinate
y: InitVar[int]
# Generator point
gen: CurvePoint = field(init=False)
def __post_init__(self, x: int, y: int) -> None:
self.gen = CurvePoint(self.curve_id, x, y, self)
class NeutralPoint(ecc.NeutralPoint):
def __add__(self, other: Point) -> Point:
if not isinstance(other, (CurvePoint, NeutralPoint)):
return NotImplemented
return other
def __iadd__(self, other: Point) -> Point:
if not isinstance(other, (CurvePoint, NeutralPoint)):
return NotImplemented
return other
def __mul__(self, other: int) -> NeutralPoint:
if not isinstance(other, int):
return NotImplemented
return self
def __imul__(self, other: int) -> NeutralPoint:
if not isinstance(other, int):
return NotImplemented
return self
def __rmul__(self, other: int) -> NeutralPoint:
if not isinstance(other, int):
return NotImplemented
return self
def __neg__(self) -> NeutralPoint:
return self
def __pos__(self) -> NeutralPoint:
return self
def __bool__(self) -> bool:
return False
neutral_point = NeutralPoint()
@dataclass
class CurvePoint(ecc.CurvePoint):
curve: Curve = field(init=False)
_curve: InitVar[Optional[Curve]] = None
def __post_init__(self, _curve: Optional[Curve]) -> None:
self.curve = _curve or curve_map[self.curve_id]
if self.y ** 2 % self.curve.p != (self.x ** 3 + self.curve.a * self.x + self.curve.b) % self.curve.p:
raise ValueError("point not on curve")
def __add__(self, other: Point) -> Point:
if isinstance(other, NeutralPoint):
return self
if not isinstance(other, CurvePoint) or self.curve != other.curve:
return NotImplemented
p = self.curve.p
if self.x == other.x and (self.y + other.y) % p == 0:
return neutral_point
if self == other:
m = (3 * self.x ** 2 + self.curve.a) * invert(2 * self.y, p) % p
else:
m = (self.y - other.y) * invert(self.x - other.x, p) % p
x = (m ** 2 - self.x - other.x) % p
y = (m * (self.x - x) - self.y) % p
return CurvePoint(self.curve_id, x, y, self.curve)
def __mul__(self, other: int) -> Point:
if not isinstance(other, int):
return NotImplemented
result = neutral_point
tmp = self
while other:
if other % 2:
result += tmp
other >>= 1
tmp += tmp
return result
def __rmul__(self, other: int) -> Point:
if not isinstance(other, int):
return NotImplemented
return self * other
def __neg__(self) -> CurvePoint:
return CurvePoint(self.curve_id, self.x, -self.y % self.curve.p, self.curve)
def __pos__(self) -> CurvePoint:
return self
def __bool__(self) -> bool:
return True
Point = Union[NeutralPoint, CurvePoint]
NIST_P_256 = Curve(
ecc.CurveId.NIST_P_256,
2 ** 256 - 2 ** 224 + 2 ** 192 + 2 ** 96 - 1,
-3,
0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B,
0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551,
0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296,
0x4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5,
)
NIST_P_384 = Curve(
ecc.CurveId.NIST_P_384,
2 ** 384 - 2 ** 128 - 2 ** 96 + 2 ** 32 - 1,
-3,
0xB3312FA7E23EE7E4988E056BE3F82D19181D9C6EFE8141120314088F5013875AC656398D8A2ED19D2A85C8EDD3EC2AEF,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC7634D81F4372DDF581A0DB248B0A77AECEC196ACCC52973,
0xAA87CA22BE8B05378EB1C71EF320AD746E1D3B628BA79B9859F741E082542A385502F25DBF55296C3A545E3872760AB7,
0x3617DE4A96262C6F5D9E98BF9292DC29F8F41DBD289A147CE9DA3113B5F0B8C00A60B1CE1D7E819D7A431D7C90EA0E5F,
)
curve_map = {ecc.CurveId.NIST_P_256: NIST_P_256, ecc.CurveId.NIST_P_384: NIST_P_384}
# XXX compute NIST parameters from the magic seed.
# https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
# D.1.2.3 Curve P-256
# p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
# n = 115792089210356248762697446949407573529996955224135760342422259061068512044369
# SEED = c49d360886e704936a6678e1139d26b7819f7e90
# c = 7efba1662985be9403cb055c75d4f7e0ce8d84a9c5114abcaf3177680104fa0d
# b = 5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b
# G x = 6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296
# G y = 4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5
# https://github.com/andreacorbellini/ecc/tree/master/scripts
# XXX compute group order n using Schoof or better
```
#### File: backend/textbook/math.py
```python
from math import gcd
from typing import Tuple
def gcdext(a: int, b: int) -> Tuple[int, int, int]:
"""
Extended Euclidean algorithm to compute the Greatest Common Divisor.
Return integers g, x, y such that g = ax + by = gcd(a, b)
"""
x0, x1, y0, y1 = 1, 0, 0, 1
while b:
q, a, b = a // b, b, a % b
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return a, x0, y0
def lcm(a: int, b: int) -> int:
"""
Least Common Multiple.
"""
return a * b // gcd(a, b)
def invert(a: int, n: int) -> int:
"""
Compute multiplicative inverse of `a` modulo `n`.
invert(a, n) * a % n == 1
"""
g, x, __ = gcdext(a, n)
if g != 1:
raise ValueError("Arguments are not coprime")
return x % n
```
#### File: backend/textbook/rsa.py
```python
from __future__ import annotations
from dataclasses import InitVar, dataclass, field
from functools import reduce
from operator import mul
from typing import Optional, Sequence, Set, Tuple, Union
from asn1crypto import keys as asn1keys
from asn1crypto.pem import unarmor
from ...public.key import AsymmetricAlgorithm, PrivateKey, PublicKey
from ...public.rsa import RsaPrivateKey, RsaPublicKey, RsaScheme, RsaSignature, RsaSignatureMetadata
from ..partial.rsa import PartialRsaPrivateKey, PartialRsaPublicKey
from .math import invert, lcm
@dataclass
class TextbookRsaPublicKey(PartialRsaPublicKey):
exp: int
mod: int
@classmethod
def from_key(cls, key: PublicKey) -> TextbookRsaPublicKey:
if not isinstance(key, RsaPublicKey):
raise TypeError()
return cls(key.public_exponent, key.modulus)
@property
def public_exponent(self) -> int:
"""
Public RSA exponent (e).
"""
return self.exp
@property
def modulus(self) -> int:
"""
RSA modulus (n).
"""
return self.mod
# def validate(self) -> None:
# pass
# def verify(self, signature: Signature, message: bytes) -> None:
# if not isinstance(signature, RsaSignature):
# raise TypeError()
# def verify_digest(self, signature: Signature, digest: MessageDigest) -> None:
# if not isinstance(signature, RsaSignature):
# raise TypeError()
# XXX rework function signature
async def encrypt_int(self, msg: int) -> int:
return pow(msg, self.exp, self.mod)
async def encrypt_ascii_string(self, message: str) -> str:
"""
Stupid encryption scheme often used for crypto riddles.
Each char is encrypted individually using textbook RSA. The resulting
integers are represented as decimal numbers and concatenated.
"""
return "".join([str(await self.encrypt_int(ord(c))) for c in message])
# encrypt individual chars using textbook rsa and small moduli, concatenate the result (e.g. in decimal) to a string.
@dataclass
class TextbookRsaPrivateKey(PartialRsaPrivateKey):
public_exponent: InitVar[int]
prime_factors: InitVar[Sequence[int]]
_primes: Tuple[int, ...] = field(init=False)
_modulus: int = field(init=False)
_private_exponent: int = field(init=False)
_public: TextbookRsaPublicKey = field(init=False)
@classmethod
def from_key(cls, key: PrivateKey) -> TextbookRsaPrivateKey:
if not isinstance(key, RsaPrivateKey):
raise TypeError()
return cls(public_exponent=key.public.public_exponent, prime_factors=key.primes)
@classmethod
def from_asn1crypto(cls, key: asn1keys.PrivateKeyInfo) -> TextbookRsaPrivateKey:
if key.algorithm != "rsa":
raise ValueError()
priv = key["private_key"].parsed
# XXX load all other parts, implement CRT.
return cls(
public_exponent=priv["public_exponent"].native,
prime_factors=(priv["prime1"].native, priv["prime2"].native),
)
@classmethod
def load(
cls, content: Union[str, bytes, asn1keys.PrivateKeyInfo], password: Optional[Union[str, bytes]] = None
) -> TextbookRsaPrivateKey:
"""
Load a private key from one of:
* PKCS#8, PEM (str/bytes) or DER (bytes)
* Traditional RSA (str/bytes), PEM or DER (bytes)
* OpenSSH (XXX)
* asn1crypto PrivateKeyInfo
If key is encrypted, pass the decryption password.
"""
if isinstance(content, asn1keys.PrivateKeyInfo):
return cls.from_asn1crypto(content)
if isinstance(content, str):
content = content.encode()
assert isinstance(content, bytes)
if content.strip().startswith(b"-----BEGIN"):
unarmor_result = unarmor(content)
assert isinstance(unarmor_result, tuple)
pem_type, headers, content = unarmor_result
if content[0] == 0x30:
key = asn1keys.PrivateKeyInfo.load(content)
if key.dump(True) != content:
raise ValueError()
return cls.from_asn1crypto(key)
raise TypeError()
def __post_init__(self, public_exponent: int, prime_factors: Sequence[int]) -> None:
if len(prime_factors) < 2:
raise ValueError("Need at least 2 primes!")
self._primes = tuple(prime_factors)
self._modulus = reduce(mul, prime_factors, 1)
# output of Carmichael function
lambda_n = reduce(lcm, (r - 1 for r in prime_factors), 1)
self._private_exponent = invert(public_exponent, lambda_n)
# XXX implement CRT
self._public = TextbookRsaPublicKey(public_exponent, self._modulus)
@property
def public(self) -> TextbookRsaPublicKey:
"""
Get public key for this private key.
"""
return self._public
@property
def primes(self) -> Sequence[int]:
"""
Get list of primes. XXX describe order!
"""
return self._primes
@property
def private_exponent(self) -> int:
"""
Get private exponent.
"""
return self._private_exponent
async def sign_int(self, msg: int, meta: Optional[RsaSignatureMetadata] = None) -> RsaSignature:
"""
Sign an integer value.
"""
return RsaSignature.from_int(
key=self._public,
meta=meta or RsaSignatureMetadata(AsymmetricAlgorithm.RSA, RsaScheme.RAW),
value=pow(msg, self._private_exponent, self._modulus),
)
# async def decrypt(self, ciphertext: bytes) -> bytes:
# raise NotImplementedError
# XXX rework function signature
async def decrypt_int(self, ciphertext: int) -> int:
return pow(ciphertext, self._private_exponent, self._modulus)
async def _decrypt_ascii_next(self, ciphertext: str, valid_chars: Set[int]) -> Set[Tuple[int, str]]:
results: Set[Tuple[int, str]] = set()
value = 0
if ciphertext[0] == '0':
# Leading zeros are not valid encodings in this scheme.
# Exception is "0" itself, if it's valid.
if 0 in valid_chars:
results.add((1, chr(0)))
return results
for length, digit in enumerate(ciphertext, start=1):
value = value * 10 + int(digit)
if value >= self._modulus:
break
decrypted = await self.decrypt_int(value)
if decrypted in valid_chars:
results.add((length, chr(decrypted)))
return results
async def decrypt_ascii_string(self, ciphertext: str, pos=0) -> Set[str]:
"""
Stupid encryption scheme often used for crypto riddles.
Each char is encrypted individually using textbook RSA. The resulting
integers are represented as decimal numbers and concatenated.
Multiple decryptions are possible, so return a set.
If decryption fails, raise ValueError
"""
valid_chars = {9, 10, 13, *range(32, 127)}
result = ""
while pos < len(ciphertext):
cur = await self._decrypt_ascii_next(ciphertext[pos:], valid_chars)
if not cur:
raise ValueError(f"Cannot decrypt ciphertext at pos {pos}")
if len(cur) == 1:
((cur_pos, cur_char),) = cur
pos += cur_pos
result += cur_char
else:
results = set()
for cur_pos, cur_char in cur:
try:
for suffix in await self.decrypt_ascii_string(ciphertext, pos + cur_pos):
results.add(result + cur_char + suffix)
except ValueError:
pass
if not results:
raise ValueError(f"Cannot decrypt ciphertext at pos {pos}")
return results
return {result}
```
#### File: cryptokey/cryptokey/oid.py
```python
from __future__ import annotations
from functools import total_ordering
from itertools import chain
from typing import Sequence, Tuple, Union
from asn1crypto.core import ObjectIdentifier as Asn1ObjId
def to_int_tuple(value: OidValue) -> Tuple[int, ...]:
"""
Convert several types to a tuple of ints.
"""
if isinstance(value, ObjectIdentifier):
return value.value
if isinstance(value, bytes):
value = Asn1ObjId.load(value)
if isinstance(value, Asn1ObjId):
value = value.dotted
if isinstance(value, str):
value = value.split(".")
result = tuple(map(int, value))
# X.660 6.2.1
for i in result:
if i < 0:
raise ValueError("OID arcs cannot be negative")
# X.660 6.2.1 a)
if len(result) > 0 and result[0] > 2:
raise ValueError("Root arc must be 0, 1 or 2")
# X.660 6.2.1 b)
if len(result) > 1 and result[0] < 2 and result[1] > 39:
raise ValueError("Second arc must be in [0, 39] for roots 0 and 1")
return result
@total_ordering
class ObjectIdentifier:
"""
An X.660 Object Identifier.
"""
def __init__(self, value: OidValue):
"""
Construct a new ObjectIdentifier
"""
self.value = to_int_tuple(value)
@property
def asn1(self) -> Asn1ObjId:
"""
Return the asn1crypto version of the ObjectIdentifier.
"""
return Asn1ObjId(self.dotted)
@property
def der(self) -> bytes:
"""
Return the ASN.1 DER encoding of the ObjectIdentifier.
"""
return self.asn1.dump()
@property
def dotted(self) -> str:
"""
Return the ObjectIdentifier in dotted form, e.g. "1.3.6.1.4.1".
"""
return ".".join(str(v) for v in self.value)
def __bytes__(self) -> bytes:
"""
See der.
"""
return self.der
def __contains__(self, item: OidValue) -> bool:
"""
Check for prefixes. E.g. `OID-1-3-6-1-4-1 in OID-1-3-6` is True.
"""
item = to_int_tuple(item)
return self.value == item[0 : len(self.value)]
def __eq__(self, other: object) -> bool:
if not isinstance(other, (tuple, list, str, bytes, Asn1ObjId, ObjectIdentifier)):
return NotImplemented
return self.value == to_int_tuple(other)
def __getitem__(self, key: Union[int, slice]) -> Union[int, ObjectIdentifier]:
"""
Return single arc as an int, or a prefix as another ObjectIdentifier.
"""
if isinstance(key, int):
return self.value[key]
if isinstance(key, slice):
if key.start not in (0, None):
raise IndexError("Slices need to start at 0")
if key.step is not None:
raise IndexError("No step supported")
return ObjectIdentifier(self.value[key])
raise TypeError
def __len__(self) -> int:
"""
Number of arcs in the OID.
"""
return len(self.value)
def __lt__(self, other: OidValue) -> bool:
"""
Compare two OIDs arc-by-arc. E.g. 1.2.3 < 1.4
"""
return self.value < to_int_tuple(other)
def __hash__(self) -> int:
"""
Compute a hash value of the OID.
"""
return hash(self.value)
def __repr__(self) -> str:
"""
Python expression that results in the same ObjectIdentifier.
"""
return "-".join(chain(("OID",), map(str, self.value)))
def __str__(self) -> str:
"""
See dotted.
"""
return self.dotted
def __sub__(self, other: int) -> ObjectIdentifier:
"""
Extend the ObjectIdentifier by a new arc.
Example: foo = OID-1-20-300; foo-4000-50000
"""
return ObjectIdentifier(self.value + (int(other),))
# Root object ID, to be used like OID-1-3-6-1-4-1
OID = ObjectIdentifier(())
# fmt: off
# Type hint for the various OID formats/types supported by the above functions.
OidValue = Union[
Sequence[Union[int, str]], # sequence of ints or strings, e.g. [10, 20, "30", "40", 50]
str, # dotted string like "10.20.30.40.50"
bytes, # DER encoded OID, e.g. b"\x06\x05\x83\x24\x1e\x28\x32"
Asn1ObjId, # asn1crypto ObjectIdentifier, e.g. Asn1ObjId("10.20.30.40.50")
ObjectIdentifier, # ObjectIdentifier which uses a tuple internally
]
# fmt: on
```
#### File: cryptokey/padding/v15.py
```python
from asn1crypto.algos import DigestInfo
from asn1crypto.core import Null
from ..hashes import MessageDigest
def enc_digestinfo(dgst: MessageDigest) -> bytes:
return DigestInfo(
{
"digest_algorithm": {
"algorithm": dgst.oid.dotted,
"parameters": Null(), # XXX dgst.parameters, add parameters to outer or inner or both?
},
"digest": dgst.value,
}
).dump()
def pad_pkcs1_v15(msg: bytes, em_len: int) -> bytes:
msg_len = len(msg)
if msg_len > em_len - 11:
raise ValueError("msg has {} bytes, must be at most {}.".format(msg_len, em_len - 11))
return b"\x00\x01" + (b"\xff" * (em_len - msg_len - 3)) + b"\x00" + msg
```
#### File: cryptokey/public/key.py
```python
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field
from enum import Enum, auto
from ..hashes import MessageDigest
class AsymmetricAlgorithm(Enum):
DSA = auto()
ECDSA = auto()
ELGAMAL = auto()
RSA = auto()
class PublicKey(metaclass=ABCMeta):
# @property
# @abstractmethod
# def encrypt_options(self) -> Any:
# """
# Get default options for encrypting with encrypt()
# """
# # XXX More specific than Any?
@classmethod
@abstractmethod
def from_key(cls, key: PublicKey) -> PublicKey:
"""
Create a backend key instance from another key.
"""
@property
@abstractmethod
def algorithm(self) -> AsymmetricAlgorithm:
"""
Algorithm
"""
@abstractmethod
def export_public_der(self) -> bytes:
"""
"""
@abstractmethod
def export_public_pem(self) -> str:
"""
"""
@abstractmethod
def export_public_openssh(self) -> str:
"""
"""
# @abstractmethod
# def validate(self) -> None:
# """
# Run some checks on the key to determine if it's valid. E.g. for an RSA private
# key this could mean that the modulus is the product of the primes. An EC public
# key could check if its point is on the curve.
# """
# @abstractmethod
# def verify(self, signature: Signature, message: bytes) -> None:
# """
# Validate if a signature is valid for a message.
# """
# @abstractmethod
# def verify_digest(self, signature: Signature, digest: MessageDigest) -> None:
# """
# Validate if a signature is valid for a message.
# """
# @abstractmethod
# def encrypt(self, message: bytes) -> bytes:
# """
# Encrypt a message to a ciphertext.
# """
# # XXX should there be a class for the return value? See what e.g. CMS needs.
class PrivateKey(metaclass=ABCMeta):
@property
@abstractmethod
def sig_meta(self) -> SignatureMetadata:
"""
Get default options for signing with sign()
"""
@classmethod
@abstractmethod
def from_key(cls, key: PrivateKey) -> PrivateKey:
"""
Create a backend key instance from another key.
"""
@property
@abstractmethod
def algorithm(self) -> AsymmetricAlgorithm:
"""
Algorithm
"""
@property
@abstractmethod
def public(self) -> PublicKey:
"""
Get an object that only holds the public portions of the key.
"""
@abstractmethod
def export_private_der(self) -> bytes:
"""
"""
@abstractmethod
def export_private_pem(self) -> str:
"""
"""
@abstractmethod
def export_private_openssh(self) -> str:
"""
"""
@abstractmethod
async def sign_digest(self, digest: MessageDigest) -> Signature:
"""
Sign a message that was already hashed.
"""
@abstractmethod
async def sign(self, msg: bytes) -> Signature:
"""
Sign a message.
"""
# @abstractmethod
# async def decrypt(self, ciphertext: bytes) -> bytes:
# """
# Decrypt a ciphertext to a message.
# """
@dataclass(frozen=True)
class SignatureMetadata:
"""
Meta data for signatures. Extended by algorithm specific sub classes.
"""
algorithm: AsymmetricAlgorithm
@dataclass
class Signature:
"""
Result of a sign operation.
"""
key: PublicKey = field(repr=False)
meta: SignatureMetadata
@property
def algorithm(self) -> AsymmetricAlgorithm:
"""
Algorithm
"""
return self.meta.algorithm
# def verify(self, message: bytes) -> None:
# self.key.verify(self, message)
# def verify_digest(self, digest: MessageDigest) -> None:
# self.key.verify_digest(self, digest)
```
#### File: stubs/asn1crypto/core.py
```python
from __future__ import annotations
from typing import ByteString, Union, Dict, Any, Optional, TypeVar, Type
ASN1_VALUE = TypeVar("ASN1_VALUE", bound="Asn1Value")
class Asn1Value:
def __init__(self, value: Optional[Any] = None) -> None:
...
def dump(self, force: bool = False) -> bytes:
...
@classmethod
def load(cls: Type[ASN1_VALUE], encoded_data: ByteString, strict: bool = False) -> ASN1_VALUE:
...
@property
def parsed(self) -> Any:
...
@property
def native(self) -> Any:
...
class ObjectIdentifier(Asn1Value):
@property
def dotted(self) -> str:
...
class Null(Asn1Value):
...
class Sequence(Asn1Value):
def __getitem__(self, key: str) -> Asn1Value:
...
class UTF8String(Asn1Value):
...
class PrintableString(Asn1Value):
...
```
#### File: stubs/asn1crypto/pem.py
```python
from typing import Tuple, Dict, Union, Generator, Optional, Mapping
Unarmor = Tuple[str, Dict[str, str], bytes]
def unarmor(pem_bytes: bytes, multiple: bool = False) -> Union[Unarmor, Generator[Unarmor, None, None]]:
...
def armor(type_name: str, der_bytes: bytes, headers: Optional[Mapping[str, str]] = None) -> bytes:
...
```
#### File: cryptokey/tests/test_backend_cryptography_rsa.py
```python
from asyncio import run
from base64 import b64decode
import pytest
from cryptography.hazmat.primitives import serialization
from cryptokey import hashes
from cryptokey.backend.cryptography import backend
from cryptokey.backend.cryptography.hashes import md5, sha2_256
from cryptokey.backend.cryptography.rsa import RsaPrivateKey, RsaPublicKey
from cryptokey.backend.textbook import ecc, ecdsa
from cryptokey.backend.textbook.rsa import TextbookRsaPrivateKey
from cryptokey.public import rsa
from cryptokey.public.key import AsymmetricAlgorithm
key_pem = """
-----<KEY>
"""
key = RsaPrivateKey(serialization.load_pem_private_key(key_pem.encode(), password=None, backend=backend))
public = key.public
pub64 = (
"<KEY>"
"<KEY>"
"<KEY>ICQa+3ZGyH1zOkeJ568wT6+RX861ivdJtQpZ0skf+OovDgMO+ZITycP"
"ezDt32KRvI39RyLGrSoc87trWsSCsouYcckufZZIg0CJNtfENZQRufsyoA4SEOiBfk5usxcyqBA2YWOggow6eq+p3kHQIDAQAB"
)
def b64int(val: str) -> int:
return int.from_bytes(b64decode(val), "big")
def test_public_exponent() -> None:
assert public.public_exponent == 65537
def test_public_modulus() -> None:
assert public.modulus % 1234567 == 218930
def test_public_export_der() -> None:
assert public.export_public_der() == b64decode(pub64)
def test_public_export_pem() -> None:
lines = public.export_public_pem().splitlines()
assert lines[0] == "-----BEGIN PUBLIC KEY-----"
assert lines[-1] == "-----END PUBLIC KEY-----"
assert "".join(lines[1:-1]) == pub64
def test_public_export_ssh() -> None:
assert public.export_public_openssh() == (
"ssh-rsa "
"AAAAB3NzaC1yc2EAAAADAQ<KEY>"
"4uCnbK5yeq9IErCYDwX4Omu7G7lsQMx44HPKG4j+iwVSOgXlhWG276W7iosjUuVLO8uQfTjk9tXv3CgzdQ4sEu64ltDOI"
"apRXQiOZBJgs7Cc2RygVpb8mWJeWT8cPDmVNoTUkgJBr7dkbIfXM6R4nnrzBPr5FfzrWK90m1ClnSyR/46i8OAw75khPJ"
"w97MO3fYpG8jf1HIsatKhzzu2taxIKyi5hxyS59lkiDQIk218Q1lBG5+zKgDhIQ6IF+Tm6zFzKoEDZhY6CCjDp6r6neQd"
)
def test_public_from_key() -> None:
with pytest.raises(TypeError):
RsaPublicKey.from_key(b"foo") # type: ignore
pub = RsaPublicKey.from_key(public)
assert pub is not public
assert pub.public_exponent == public.public_exponent
assert pub.modulus == public.modulus
def test_private_from_key() -> None:
priv = RsaPrivateKey.from_key(key)
assert priv is not key
assert sorted(priv.primes) == sorted(key.primes)
assert priv.private_exponent == key.private_exponent
with pytest.raises(TypeError):
RsaPrivateKey.from_key("test") # type: ignore
ecckey = ecdsa.TextbookEccPrivateKey(ecc.NIST_P_256, 12345)
with pytest.raises(TypeError):
RsaPrivateKey.from_key(ecckey)
mprime_key = TextbookRsaPrivateKey(65537, (5284193, 941859169, 259867))
with pytest.raises(NotImplementedError, match="multi-prime RSA"):
RsaPrivateKey.from_key(mprime_key)
mprime_key._primes = (mprime_key._primes[0],)
with pytest.raises(ValueError, match="Need at least 2 primes"):
RsaPrivateKey.from_key(mprime_key)
def test_private_primes() -> None:
assert sorted(key.primes) == [
b64int(
"zNOvX2S//PnmYs3m2erfdSLUmMHTO4bHyml4MCYr2Z0zOPHT19tMEXdfKkQALTry8ph1y7FmYTeiF3ss9wmyLb"
"lDo0lrhHuJ9pvTuAQYk/OZLGpFIHzgwsWyeIWEeB9UErOK/KQUSbWRYGyZAa25dc98qPAU/GzPQGZ4gaLlrDE="
),
b64int(
"<KEY>6Hh8JztQJQrxWA/L/5HR7qoy6FoenKQQJOnLJIaZ5tFvlBx/yDgx29XLPgXG1FdT9jXu2+dn2f2u9w"
"Me24HiGshnlFWKYspyIrQKxxXlsSVRbise3jpgpran4SjDvBZiWI1scG0L9v5gCe6/SHrBunBwpKURzauKN60="
),
]
def test_private_exponent() -> None:
assert key.private_exponent == b64int(
"<KEY>"
"<KEY>"
"<KEY>"
"5+tTQeMHqZEH0/TfIAnlJvA51gsHaq4SCoJMeakCwRKj09M7a84GiXJK9LoBkbI3m3HOKq8wdTbe1Hf5dIwQ=="
)
def test_sign_v15() -> None:
key.default_hash_algorithm = hashes.sha2_256()
sig0 = run(key.sign_v15(b"Hello"))
sig1 = run(key.sign_v15_digest(sha2_256(b"Hello")))
assert sig0 == sig1
assert sig0.algorithm == AsymmetricAlgorithm.RSA
assert sig0.meta == rsa.RsaV15Metadata(AsymmetricAlgorithm.RSA, rsa.RsaScheme.PKCS1v1_5, hashes.sha2_256())
assert sig0.meta == sig1.meta
key.default_hash_algorithm = hashes.md5()
sig2 = run(key.sign_v15(b"Hello"))
sig3 = run(key.sign_v15_digest(md5(b"Hello")))
assert sig2 == sig3
assert sig2.algorithm == AsymmetricAlgorithm.RSA
assert sig2.meta == rsa.RsaV15Metadata(AsymmetricAlgorithm.RSA, rsa.RsaScheme.PKCS1v1_5, hashes.md5())
assert sig2.meta == sig3.meta
sig4 = run(key.sign_v15(b"Hello", hashes.sha2_256()))
assert sig0 == sig4
# XXX validate signature
def test_sign_pss() -> None:
key.default_hash_algorithm = hashes.sha2_256()
key.default_pss_options = None
opt = rsa.PssOptions(salt_length=123)
sig0 = run(key.sign_pss(b"Hello", opt))
sig1 = run(key.sign_pss_digest(sha2_256(b"Hello"), opt))
assert sig0.algorithm == AsymmetricAlgorithm.RSA
assert sig1.algorithm == AsymmetricAlgorithm.RSA
assert sig0.meta == rsa.RsaPssMetadata(
AsymmetricAlgorithm.RSA,
rsa.RsaScheme.PSS,
hashes.sha2_256(),
rsa.Mgf1Metadata(rsa.MgfAlgorithmId.MGF1, hashes.sha2_256()),
123,
b"\xbc",
)
assert sig0.meta == sig1.meta
with pytest.raises(NotImplementedError, match="Unsupported algorithm"):
run(key.sign_pss(b"Hello", rsa.PssOptions(mgf_alg=rsa.MgfAlgorithm(rsa.MgfAlgorithmId.OTHER))))
with pytest.raises(NotImplementedError, match="Only BC trailer supported"):
run(key.sign_pss(b"Hello", rsa.PssOptions(trailer_field=b"foo")))
with pytest.raises(NotImplementedError, match="Custom salt not supported"):
run(key.sign_pss(b"Hello", rsa.PssOptions(salt=b"foo")))
def test_sign() -> None:
key.default_scheme = rsa.RsaScheme.PKCS1v1_5
key.default_hash_algorithm = hashes.sha2_256()
meta = key.sig_meta
sig0 = run(key.sign(b"Hello"))
assert sig0.algorithm == AsymmetricAlgorithm.RSA
assert sig0.meta == meta
sig1 = run(key.sign_digest(sha2_256(b"Hello")))
assert sig1.algorithm == AsymmetricAlgorithm.RSA
assert sig1.meta == meta
key.default_hash_algorithm = hashes.md5()
sig2 = run(key.sign_digest(sha2_256(b"Hello")))
assert sig2.meta == rsa.RsaV15Metadata(AsymmetricAlgorithm.RSA, rsa.RsaScheme.PKCS1v1_5, hashes.sha2_256())
assert sig2.meta == meta
key.default_scheme = rsa.RsaScheme.PSS
key.default_pss_options = rsa.PssOptions(trailer_field=b"\xbc", salt_length=17)
meta_pss = key.sig_meta
sig3 = run(key.sign(b"Hello"))
assert sig3.meta == meta_pss
assert meta_pss == rsa.RsaPssMetadata(
AsymmetricAlgorithm.RSA,
rsa.RsaScheme.PSS,
hashes.md5(),
rsa.Mgf1Metadata(rsa.MgfAlgorithmId.MGF1, hashes.md5()),
17,
b"\xbc",
)
sig4 = run(key.sign_digest(md5(b"Hello")))
assert sig4.meta == meta_pss
key.default_scheme = rsa.RsaScheme.RAW
with pytest.raises(Exception, match="Bad default scheme"):
run(key.sign(b"foo"))
with pytest.raises(Exception, match="Bad default scheme"):
run(key.sign_digest(md5(b"foo")))
with pytest.raises(Exception, match="Unsupported scheme"):
key.sig_meta
# XXX validate signatures. Maybe sign with a key that allows for no salt.
def test_private_export() -> None:
assert isinstance(key.export_private_der(), bytes)
assert isinstance(key.export_private_pem(), str)
with pytest.raises(NotImplementedError):
key.export_private_openssh()
```
#### File: cryptokey/tests/test_backend_hashlib.py
```python
import pytest
from cryptokey.backend import hashlib
from hashvectors import check_vector, hash_vectors
@pytest.mark.parametrize("vector", hash_vectors)
def test_hashlib(vector) -> None:
try:
check_vector(vector, hashlib, hashlib.HashlibHash)
except NotImplementedError:
pass
```
#### File: cryptokey/tests/test_public_ecdsa.py
```python
import pytest
from cryptokey.hashes import sha2_256
from cryptokey.public.ecdsa import EcdsaSignature, EcdsaSignatureMetadata
from cryptokey.public.key import AsymmetricAlgorithm
def test_signature() -> None:
key = None
meta = EcdsaSignatureMetadata(algorithm=AsymmetricAlgorithm.ECDSA, hash_alg=sha2_256())
val_10_20 = b"\x30\x06\x02\x01\x0a\x02\x01\x14"
sig0 = EcdsaSignature(key=key, meta=meta, der=val_10_20)
assert sig0.r == 10
assert sig0.s == 20
sig1 = EcdsaSignature(key=key, meta=meta, r=10, s=20)
assert sig0 == sig1
with pytest.raises(ValueError, match="Bad parameters"):
EcdsaSignature(key=key, meta=meta, der=val_10_20, r=10, s=20)
``` |
{
"source": "joernheissler/hacmec",
"score": 2
} |
#### File: hacmec/hacmec/client.py
```python
from __future__ import annotations
from dataclasses import dataclass
from .http import HttpException, HttpResponse, HttpClient
from . import problem
from typing import Optional, Any, Mapping, Sequence, List, Deque, Dict
from .jws import Jwk, b64_url_enc, sha2_256
from collections import deque
from cryptokey.public.key import PrivateKey
from .util import force
import json
import logging
MAX_NONCES = 10
class Hacmec:
"""
"""
http: HttpClient
_nonces: Deque[str]
_directory: AcmeDirectory
def __init__(self, http: HttpClient) -> None:
"""
"""
self.http = http
self._nonces = deque()
async def load_directory(self, url: str) -> None:
"""
"""
directory = AcmeDirectory(self, url)
await directory.load()
self._directory = directory
@property
def directory(self) -> AcmeDirectory:
try:
return self._directory
except AttributeError:
raise Exception('load_directory was not called')
async def get(self, url: str) -> HttpResponse:
"""
"""
return self._process_resp(await self.http.get(url))
async def head(self, url: str) -> HttpResponse:
"""
"""
return self._process_resp(await self.http.head(url))
async def post(self, key: Jwk, url: str, payload: Optional[Mapping[str, Any]] = None,
kid: Optional[str] = None) -> HttpResponse:
"""
"""
try:
nonce = await self._get_nonce()
body = await key.sign(url, nonce, payload, kid)
return self._process_resp(await self.http.post(url, 'application/jose+json', body))
except problem.AcmeProblemBadNonce as err:
assert err.resp is not None
# Try again with the nonce that was sent with the problem response; ACME § 6.5
if not err.resp.headers('Replay-Nonce'):
raise Exception('Got no nonce')
nonce = self._nonces.pop()
body = await key.sign(url, nonce, payload, kid)
return self._process_resp(await self.http.post(url, 'application/jose+json', body))
async def new_nonce(self) -> str:
"""
Fetch a new nonce and return it.
"""
resp = await self.head(self.directory.new_nonce)
if not resp.headers('Replay-Nonce'):
raise Exception('Got no nonce')
return self._nonces.pop()
def get_account(self, key: PrivateKey, kid: str) -> AcmeAccount:
"""
Return AcmeAccount object for key and kid.
"""
return AcmeAccount(self, Jwk.load(key), kid, {})
async def find_account(self, key: PrivateKey) -> AcmeAccount:
"""
Recover account URL.
"""
return await AcmeAccount.find(self, Jwk.load(key))
async def register_account(self, key: PrivateKey, contact: Optional[Sequence[str]] = None, tos_agreed: bool = False,
ext_kid: Optional[str] = None, ext_key: Optional[str] = None) -> AcmeAccount:
"""
Register a new account.
"""
return await AcmeAccount.register(self, Jwk.load(key), contact or [], tos_agreed, ext_kid, ext_key)
def _process_resp(self, resp: HttpResponse) -> HttpResponse:
"""
Process HTTP response. Check for errors, extract nonces.
"""
for nonce in resp.headers('Replay-Nonce'):
self._nonces.append(nonce)
if len(self._nonces) > MAX_NONCES:
self._nonces.popleft()
if resp.type == problem.HttpProblem.MIME_TYPE:
raise problem.HttpProblem.create(resp.json, resp)
if resp.status // 100 != 2:
raise HttpException(resp)
return resp
async def _get_nonce(self) -> str:
"""
Retrieve a nonce, either from the cache or a fresh one.
"""
try:
return self._nonces.popleft()
except IndexError:
return await self.new_nonce()
class AcmeDirectory:
"""
ACME Directory
"""
_hacmec: Hacmec
url: str
_data: Mapping[str, Any]
def __init__(self, hacmec: Hacmec, url: str) -> None:
"""
hacmec: Parent object
url: ACME directory URL
"""
self._hacmec = hacmec
self.url = url
async def load(self) -> None:
"""
Retrieve the directory.
"""
self._data = force((await self._hacmec.get(self.url)).json, dict)
def get_str(self, key: str) -> str:
"""
Get a value and ensure that it's a str.
"""
return force(self._data[key], str)
@property
def new_nonce(self) -> str:
"""
New nonce
"""
return self.get_str('newNonce')
@property
def new_account(self) -> str:
"""
New account
"""
return self.get_str('newAccount')
@property
def new_order(self) -> str:
"""
New order
"""
return self.get_str('newOrder')
@property
def new_authz(self) -> str:
"""
New authorization
"""
return self.get_str('newAuthz')
@property
def revoke_cert(self) -> str:
"""
Revoke certificate
"""
return self.get_str('revokeCert')
@property
def key_change(self) -> str:
"""
Key change
"""
return self.get_str('keyChange')
@property
def meta(self) -> Mapping[str, Any]:
"""
Metadata object
"""
return force(self._data.get('meta', {}), dict)
@property
def terms_of_service(self) -> str:
"""
A URL identifying the current terms of service.
"""
return force(self.meta['termsOfService'], str)
@property
def website(self) -> str:
"""
An HTTP or HTTPS URL locating a website providing more information about the ACME server.
"""
return force(self.meta['website'], str)
@property
def caa_identities(self) -> List[str]:
"""
The hostnames that the ACME server recognizes as referring to itself for the purposes of CAA record
validation.
"""
value = force(self.meta.get('caaIdentities', []), list)
for val in value:
force(val, str)
return value
@property
def external_account_required(self) -> bool:
"""
If True, then the CA requires that all new-account requests include an "externalAccountBinding" field
associating the new account with an external account.
"""
return force(self.meta.get('externalAccountRequired', False), bool)
class AcmeAccount:
"""
ACME account object.
"""
_hacmec: Hacmec
key: Jwk
kid: str
data: Mapping[str, Any]
@classmethod
async def find(cls, hacmec: Hacmec, key: Jwk) -> AcmeAccount:
"""
Find account by key.
hacmec: Parent object
key: private key
"""
logging.info('Retrieving account by key')
resp = await hacmec.post(key, hacmec.directory.new_account, {
'onlyReturnExisting': True,
})
return AcmeAccount(hacmec, key, resp.location, resp.json)
@classmethod
async def register(cls, hacmec: Hacmec, key: Jwk, contact: Sequence[str], tos_agreed: bool,
ext_kid: Optional[str], ext_key: Optional[str]) -> AcmeAccount:
"""
Register new account.
"""
params: Dict[str, Any] = {}
if contact:
params['contact'] = contact
if tos_agreed:
params['termsOfServiceAgreed'] = tos_agreed
logging.info('Registering new account')
resp = await hacmec.post(key, hacmec.directory.new_account, params)
return AcmeAccount(hacmec, key, resp.location, resp.json)
def __init__(self, hacmec: Hacmec, key: Jwk, kid: str, data: Mapping[str, Any]) -> None:
"""
hacmec: Parent object
"""
self._hacmec = hacmec
self.key = key
self.kid = kid
self.data = data
def __str__(self) -> str:
return f'AcmeAccount(kid={self.kid}, status={self.status})'
@property
def contact(self) -> Sequence[str]:
contacts = force(self.data.get('contact', []), list)
for cont in contacts:
force(cont, str)
return contacts
@property
def status(self) -> str:
return force(self.data['status'], str)
@property
def orders(self) -> str:
return force(self.data['orders'], str)
@property
def tos_agreed(self) -> bool:
return force(self.data['termsOfServiceAgreed'], bool)
async def post(self, url: str, payload: Optional[Mapping[str, Any]] = None) -> HttpResponse:
return await self._hacmec.post(self.key, url, payload, self.kid)
async def update(self, updates: Optional[Mapping[str, Any]] = None) -> None:
self.data = (await self.post(self.kid, updates or {})).json
async def set_contacts(self, contacts: Sequence[str]) -> None:
"""
Update account contacts.
"""
await self.update({'contact': contacts})
async def deactivate(self) -> None:
"""
Deactivate account.
"""
await self.update({'status': 'deactivated'})
async def change_key(self, new_key: PrivateKey) -> None:
"""
Account key rollover.
"""
new_jwk = Jwk.load(new_key)
url = self._hacmec.directory.key_change
sig_new = await new_jwk.sign(url, None, {
'account': self.kid,
'oldKey': self.key.jwk,
})
resp = await self.post(url, json.loads(sig_new))
self.key = new_jwk
async def new_order(self, ids: Sequence[AcmeIdentifier], not_before: Optional[str] = None, not_after: Optional[str] = None) -> AcmeOrder:
"""
Create new ACME order.
"""
payload: Mapping[str, Any] = {
'identifiers': [identifier.obj for identifier in ids],
}
if not_before:
payload['notBefore'] = not_before
if not_after:
payload['notAfter'] = not_after
resp = await self.post(self._hacmec.directory.new_order, payload)
return AcmeOrder(self, resp.location, resp.json)
async def load_order(self, url: str) -> AcmeOrder:
return await AcmeOrder.load(self, url)
class AcmeOrder:
"""
ACME Order object.
"""
_acc: AcmeAccount
_hacmec: Hacmec
url: str
data: Mapping[str, Any]
_identifiers: List[AcmeIdentifier]
_authorizations: List[AcmeAuthorization]
@classmethod
async def load(cls, account: AcmeAccount, url: str) -> AcmeOrder:
"""
Load order by URL
"""
resp = await account.post(url)
return cls(account, url, resp.json)
def __init__(self, account: AcmeAccount, url: str, data: Mapping[str, Any]) -> None:
"""
"""
self._acc = account
self._hacmec = account._hacmec
self.url = url
self.data = data
@property
def status(self) -> str:
return force(self.data['status'], str)
@property
def expires(self) -> str:
return force(self.data['expires'], str)
@property
def identifiers(self) -> List[AcmeIdentifier]:
try:
return self._identifiers
except AttributeError:
self._identifiers = [
AcmeIdentifier(force(identifer['type'], str), force(identifer['value'], str))
for identifer in force(self.data['identifiers'], list)
]
return self._identifiers
@property
def not_before(self) -> str:
return force(self.data['notBefore'], str)
@property
def not_after(self) -> str:
return force(self.data['notAfter'], str)
@property
def error(self) -> problem.HttpError:
raise NotImplementedError()
@property
def authorizations(self) -> List[AcmeAuthorization]:
try:
return self._authorizations
except AttributeError:
self._authorizations = [
AcmeAuthorization(self, force(url, str), {})
for url in force(self.data['authorizations'], list)
]
return self._authorizations
@property
def finalize(self) -> str:
return force(self.data['finalize'], str)
async def send_csr(self, req: bytes) -> None:
self.data = (await self._acc.post(self.finalize, {'csr': b64_url_enc(req)})).json
@property
def certificate(self) -> str:
return force(self.data['certificate'], str)
async def update(self) -> None:
self.data = (await self._acc.post(self.url)).json
async def download(self) -> bytes:
return (await self._acc.post(self.certificate)).body
class AcmeAuthorization:
_acc: AcmeAccount
_hacmec: Hacmec
order: AcmeOrder
url: str
data: Mapping[str, Any]
def __init__(self, order: AcmeOrder, url: str, data: Mapping[str, Any]) -> None:
self._acc = order._acc
self._hacmec = self._acc._hacmec
self.order = order
self.url = url
self.data = data
async def update(self) -> None:
self.data = (await self._acc.post(self.url)).json
@property
def challenges(self) -> Sequence[AcmeChallenge]:
return [
AcmeChallenge.create(self, force(chall, dict))
for chall in force(self.data['challenges'], list)
]
@property
def status(self) -> str:
return force(self.data['status'], str)
@property
def expires(self) -> str:
return force(self.data['expires'], str)
@property
def identifier(self) -> AcmeIdentifier:
iden = force(self.data['identifier'], dict)
return AcmeIdentifier(force(iden['type'], str), force(iden['value'], str))
@property
def wildcard(self) -> bool:
return force(self.data.get('wildcard', False), bool)
class AcmeChallenge:
_acc: AcmeAccount
_hacmec: Hacmec
auth: AcmeAuthorization
data: Mapping[str, Any]
def __init__(self, auth: AcmeAuthorization, data: Mapping[str, Any]) -> None:
self.auth = auth
self.data = data
self._acc = auth._acc
self._hacmec = self._acc._hacmec
@property
def status(self) -> str:
return force(self.data['status'], str)
@property
def url(self) -> str:
return force(self.data['url'], str)
@property
def typ(self) -> str:
return force(self.data['type'], str)
@property
def token(self) -> str:
return force(self.data['token'], str)
@property
def error(self) -> problem.HttpError:
raise NotImplementedError()
@property
def key_authorization(self) -> str:
return self.token + '.' + self._acc.key.thumb
async def respond(self) -> None:
resp = await self._acc.post(self.url, {})
@classmethod
def create(cls, auth: AcmeAuthorization, data: Mapping[str, Any]) -> AcmeChallenge:
typ = force(data['type'], str)
for sub in cls.__subclasses__():
if sub.TYPE == typ:
return sub(auth, data)
return cls(auth, data)
class AcmeChallengeHttp01(AcmeChallenge):
TYPE = 'http-01'
class AcmeChallengeTlsAlpn01(AcmeChallenge):
TYPE = 'tls-alpn-01'
class AcmeChallengeDns01(AcmeChallenge):
TYPE = 'dns-01'
@property
def fqdn(self) -> str:
return '_acme-challenge.' + self.auth.identifier.value.rstrip('.')
@property
def txt_record(self) -> str:
return b64_url_enc(sha2_256(self.key_authorization.encode()).value)
@dataclass(frozen=True)
class AcmeIdentifier:
typ: str
value: str
@property
def obj(self) -> Mapping[str, str]:
return {
'type': self.typ,
'value': self.value,
}
@classmethod
def dns(cls, value: str):
return cls('dns', value)
```
#### File: hacmec/http/aiohttp.py
```python
from __future__ import annotations
from types import TracebackType
from typing import Any, Dict, Mapping, Optional, Sequence, Type
import async_timeout
from multidict import CIMultiDictProxy
from aiohttp import ClientSession
from aiohttp.http import SERVER_SOFTWARE
from . import HttpClient, HttpResponse
class AioHttpResponse(HttpResponse):
"""
Response from AioHttpClient
"""
_status: int
_headers: CIMultiDictProxy[str]
_body: bytes
def __init__(self, status: int, headers: CIMultiDictProxy[str], body: bytes) -> None:
self._status = status
self._headers = headers
self._body = body
@property
def status(self) -> int:
"""
HTTP status code, e.g. 200 or 418.
"""
return self._status
@property
def body(self) -> bytes:
"""
The raw HTTP body, with any transport encoding (Chunked, Compression, etc.) already removed.
"""
return self._body
def headers(self, key: str) -> Sequence[str]:
"""
Get response headers for a case insensitive key.
If key isn't found, return [].
E.g. resp.header('ConTENt-TyPe') should return ['application/json'].
"""
return self._headers.getall(key, [])
class AioSession:
"""
Wrapper around aiohttp.ClientSession to make the context manager easier to use.
"""
_session: Optional[ClientSession] = None
_usage = 0
async def __aenter__(self) -> ClientSession:
"""
Enter
"""
assert self._usage >= 0
assert (self._usage == 0) == (not self._session)
if self._usage == 0:
session = ClientSession()
self._session = await session.__aenter__()
self._usage += 1
assert self._session
return self._session
async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException],
trace: Optional[TracebackType]) -> Any:
"""
Exit
"""
assert self._usage > 0 and self._session
self._usage -= 1
if self._usage == 0:
session = self._session
self._session = None
return await session.__aexit__(exc_type, exc, trace)
class AioHttpClient(HttpClient):
"""
aiohttp based implementation of HttpClient.
"""
_user_agent: str
_language: Optional[str]
_timeout = 30
_session: AioSession
def __init__(self, user_agent: str, language: Optional[str] = None) -> None:
"""
user_agent: Name/version of the ACME client software.
language: Value of Accept-Language http header. One SHOULD be given. See
https://tools.ietf.org/html/rfc7231#section-5.3.5
"""
self._user_agent = f'{SERVER_SOFTWARE} {self.hacmec_user_agent} {user_agent}'
self._language = language
self._session = AioSession()
async def get(self, url: str) -> AioHttpResponse:
"""
Make a GET request
"""
return await self.request('GET', url)
async def head(self, url: str) -> AioHttpResponse:
"""
Make a HEAD request
"""
return await self.request('HEAD', url)
async def post(self, url: str, content_type: str, body: bytes) -> AioHttpResponse:
"""
Make a POST request. A "Content-Type" header needs to be added. The body is already encoded,
only chunked or similar may be applied.
"""
return await self.request('POST', url, {'Content-Type': content_type}, data=body)
async def request(self, method: str, url: str, headers: Optional[Mapping[str, str]] = None,
**kwargs: Any) -> AioHttpResponse:
"""
Make HTTP request.
"""
# print(f'{method:4} {url:60} head={headers} args={kwargs}')
head: Dict[str, Any] = {
'User-Agent': self._user_agent,
**(headers or {}),
}
if self._language:
head['Accept-Language'] = self._language
with async_timeout.timeout(self._timeout):
async with self._session as session:
async with session.request(method, url, headers=head, allow_redirects=False, **kwargs) as response:
return AioHttpResponse(response.status, response.headers, await response.read())
async def __aenter__(self) -> AioHttpClient:
await self._session.__aenter__()
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException],
trace: Optional[TracebackType]) -> Any:
await self._session.__aexit__(exc_type, exc, trace)
```
#### File: hacmec/http/__init__.py
```python
from __future__ import annotations
import json
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import Any, Mapping, Sequence, Union
from httplink import ParsedLinks, parse_link_header
from .. import VERSION
from ..util import force
class HttpResponse(metaclass=ABCMeta):
"""
This is the ABC which the HttpClient class must return for all requests.
"""
_json: Mapping[str, Any]
@property
def json(self) -> Mapping[str, Any]:
"""
Assuming the body is a json object/dict, decode it as such.
"""
try:
return self._json
except AttributeError:
self._json = force(json.loads(self.body.decode()), dict)
return self._json
def header(self, key: str) -> str:
"""
Get value of first header.
"""
return self.headers(key)[0]
@property
def type(self) -> str:
"""
Content-Type: header.
"""
try:
return self.header('Content-Type').split(';')[0].strip()
except IndexError:
return 'application/octet-stream'
@property
def location(self) -> str:
"""
Location: header.
"""
return self.header('Location')
@property
def links(self) -> ParsedLinks:
"""
Parsed Link: http header.
"""
return parse_link_header(','.join(self.headers('Links')))
@property
@abstractmethod
def status(self) -> int:
"""
HTTP status code, e.g. 200 or 418.
"""
@property
@abstractmethod
def body(self) -> Union[bytes, bytearray]:
"""
The raw HTTP body, with any transport encoding (Chunked, Compression, etc.) already removed.
"""
@abstractmethod
def headers(self, key: str) -> Sequence[str]:
"""
Get response headers for a case insensitive key.
If key isn't found, return [].
E.g. resp.header('ConTENt-TyPe') should return ['application/json'].
"""
class HttpClient(metaclass=ABCMeta):
"""
Implementation of an HTTP client.
"""
@property
def hacmec_user_agent(self) -> str:
"""
This string SHOULD be incorporated into the User-Agent request header.
See ACME §6.1
"""
return 'hacmec/' + VERSION
@abstractmethod
async def get(self, url: str) -> HttpResponse:
"""
Make a GET request
"""
@abstractmethod
async def head(self, url: str) -> HttpResponse:
"""
Make a HEAD request
"""
@abstractmethod
async def post(self, url: str, content_type: str, body: bytes) -> HttpResponse:
"""
Make a POST request. A "Content-Type" header needs to be added. The body is already encoded,
only chunked or similar may be applied.
"""
@dataclass
class HttpException(Exception):
"""
Raised when an HTTP server reported an error.
"""
resp: HttpResponse
``` |
{
"source": "joernheissler/pvss",
"score": 3
} |
#### File: pvss/pvss/zq.py
```python
from __future__ import annotations
from dataclasses import dataclass
from os import environ
from secrets import randbelow
from typing import TYPE_CHECKING, Any, Union, cast
from asn1crypto.core import Asn1Value
try:
from gmpy2 import invert, is_prime, mpz
except ImportError: # pragma: no cover
# Work around the fact that gmpy2 is not installed in the readthedocs build image
if "READTHEDOCS" not in environ:
raise
from . import asn1
from .groups import PgvOrInt, PreGroup, PreGroupValue
if TYPE_CHECKING: # pragma: no cover
lazy = property
else:
from lazy import lazy
@dataclass(frozen=True)
class ZqGroup(PreGroup):
"""
Additive group of integers modulo Sophie Germain prime q
"""
q: mpz
def __post_init__(self) -> None:
"""
Ensure that q and 2q+1 are prime.
"""
if self.q < 0:
raise ValueError("q is negative")
if not is_prime(self.q):
raise ValueError("q not prime")
if not is_prime(self.q * 2 + 1):
raise ValueError("2q + 1 not prime")
def __call__(self, value: Union[int, Asn1Value]) -> ZqValue:
"""
Convert an integer into a group element
Returns:
Group element
"""
if isinstance(value, int):
return ZqValue(self, value % self.q)
if isinstance(value, asn1.PreGroupValue):
mpz_value = mpz(int(value))
if not 0 <= mpz_value < self.q:
raise ValueError("Not a valid group element")
return ZqValue(self, mpz_value)
raise TypeError(f"Type not supported: {type(value)}")
@property
def len(self) -> int:
"""
Get number of elements in this group
Returns:
group size
"""
return int(self.q)
@property
def rand(self) -> ZqValue:
"""
Create random element of this group
Returns:
Random group element
"""
return self(randbelow(int(self.q)))
@property
def rand_nonzero(self) -> ZqValue:
"""
Create random element of this group, but never the neutral element.
Returns:
Random group element
"""
return self(1 + randbelow(int(self.q - 1)))
def __repr__(self) -> str:
"""
Outputs a representation of this group.
Returns:
Representation of this group
"""
return f"ZqGroup({self.q})"
@dataclass(frozen=True, eq=False)
class ZqValue(PreGroupValue):
"""
Elements of ZqGroup
"""
group: ZqGroup
_value: mpz
def __int__(self) -> int:
"""
Implement int(a)
Returns:
value
"""
return int(self._value)
def __neg__(self) -> ZqValue:
"""
Implement -a
Returns:
inverse value
"""
return ZqValue(self.group, -self._value % self.group.q)
def __add__(self, other: PgvOrInt) -> ZqValue:
"""
Implement a + b
Args:
other: Second operand
Returns:
Sum of `self` and `other`
"""
if isinstance(other, int):
return ZqValue(self.group, (self._value + other) % self.group.q)
if isinstance(other, ZqValue):
if self.group is not other.group:
raise TypeError("Group mismatch")
return ZqValue(self.group, (self._value + other._value) % self.group.q)
return NotImplemented
def __mul__(self, other: PgvOrInt) -> ZqValue:
"""
Implement a * b
Args:
other: Second operand
Returns:
Product of `self` and `other`
"""
if isinstance(other, int):
return ZqValue(self.group, self._value * other % self.group.q)
if isinstance(other, ZqValue):
if self.group is not other.group:
raise TypeError("Group mismatch")
return ZqValue(self.group, self._value * other._value % self.group.q)
return NotImplemented
@property
def inv(self) -> ZqValue:
"""
Implement multiplicative inverse such that inv(x) * x == 1
Returns:
Multiplicative inverse of `self`
"""
return ZqValue(self.group, invert(self._value, self.group.q))
def __repr__(self) -> str:
"""
Outputs a representation of this value.
Returns:
Representation of this value
"""
return f"{self.group}({self._value})"
@lazy
def asn1(self) -> asn1.PreGroupValue:
"""
Convert value to an ASN.1 type so it can be serialized to DER.
Returns:
Value converted to an ASN.1 value
"""
return asn1.PreGroupValue(int(self._value))
def __eq__(self, other: Any) -> bool:
"""
"""
if isinstance(other, ZqValue):
# "is not" is by intention to force usage of the identical group.
if self.group is not other.group:
return False
return self._value == other._value
return self._value == cast(int, other)
```
#### File: stubs/gmpy2/__init__.py
```python
from __future__ import annotations
from typing import Union, Any
class mpz(object):
def __init__(self, x: _mpz) -> None:
...
def __add__(self, other: _mpz) -> mpz:
...
def __sub__(self, other: _mpz) -> mpz:
...
def __mul__(self, other: _mpz) -> mpz:
...
def __floordiv__(self, other: _mpz) -> mpz:
...
def __mod__(self, other: _mpz) -> mpz:
...
def __rmod__(self, other: _mpz) -> mpz:
...
def __neg__(self) -> mpz:
...
def __int__(self) -> int:
...
def __ge__(self, other: Any) -> bool:
...
def __lt__(self, other: Any) -> bool:
...
def bit_length(self) -> int:
...
_mpz = Union[mpz, int]
def invert(x: _mpz, m: _mpz) -> mpz:
...
def is_prime(x: _mpz, n: int = 25) -> bool:
...
def legendre(x: _mpz, y: _mpz) -> mpz:
...
def powmod(x: _mpz, y: _mpz, m: _mpz) -> mpz:
...
``` |
{
"source": "joernio/joern2sarif",
"score": 3
} |
#### File: joern2sarif/joern2sarif/cli.py
```python
import argparse
import os
import sys
import joern2sarif.lib.convert as convertLib
from joern2sarif.lib.logger import LOG
def build_args():
"""
Constructs command line arguments for the vulndb tool
"""
parser = argparse.ArgumentParser(
description="Utility script to convert joern/ocular json output to sarif."
)
parser.add_argument(
"-i", "--src", dest="src_file", help="Source file", required=True
)
parser.add_argument(
"-o",
"--report_file",
dest="report_file",
default="joern-report.sarif",
help="Report filename with directory",
)
parser.add_argument(
"-t",
"--tool",
dest="tool_name",
choices=["joern", "ocular", "ng-sast", "core"],
default="core",
help="Tool name",
)
return parser.parse_args()
def main():
args = build_args()
src_file = args.src_file
if not os.path.exists(src_file):
print(f"{src_file} doesn't exist")
sys.exit(1)
report_file = args.report_file
reports_dir = os.path.dirname(report_file)
# Create reports directory
if not os.path.exists(reports_dir):
os.makedirs(reports_dir)
work_dir = os.getcwd()
for e in ["GITHUB_WORKSPACE", "WORKSPACE"]:
if os.getenv(e):
work_dir = os.getenv(e)
break
LOG.debug(f"About to convert {src_file}")
sarif_data = convertLib.convert_file(
args.tool_name,
os.getenv("TOOL_ARGS", ""),
work_dir,
src_file,
report_file,
None,
)
if sarif_data:
LOG.info(f"SARIF file created successfully at {report_file}")
if __name__ == "__main__":
main()
``` |
{
"source": "joernleu/Pythorient",
"score": 3
} |
#### File: Pythorient/src/crystallographic_calculations.py
```python
import numpy as np
from numba import jit
# Euler angles in matrix representation
@jit(nopython=True)
def euler2rotmat(ang):
g = np.zeros((3, 3))
sa = np.sin(ang[0])
ca = np.cos(ang[0])
sb = np.sin(ang[1])
cb = np.cos(ang[1])
sc = np.sin(ang[2])
cc = np.cos(ang[2])
g[0, 0] = ca*cc - sa*sc*cb
g[0, 1] = sa*cc + ca*sc*cb
g[0, 2] = sc*sb
g[1, 0] = -ca*sc - sa*cc*cb
g[1, 1] = -sa*sc + ca*cc*cb
g[1, 2] = cc*sb
g[2, 0] = sa*sb
g[2, 1] = -ca*sb
g[2, 2] = cb
#np.array([[ca*cc - sa*sc*cb, sa*cc + ca*sc*cb, sc*sb],
# [-ca*sc - sa*cc*cb, -sa*sc + ca*cc*cb, cc*sb],
# [sa*sb, -ca*sb, cb]])
#g = np.array([[ca*cc - sa*sc*cb, sa*cc + ca*sc*cb, sc*sb],
# [-ca*sc - sa*cc*cb, -sa*sc + ca*cc*cb, cc*sb],
# [sa*sb, -ca*sb, cb]])
#g = np.matrix([g1, g2, g3])
return g
# axis/angle in Euler angles representation
def axisangle2euler(axisangle):
c = np.cos(axisangle[0]*np.pi/180)
s = np.sin(axisangle[0]*np.pi/180)
t =1 - c
u = axisangle[1]/(axisangle[1]**2+axisangle[2]**2+axisangle[3]**2)**0.5
v = axisangle[2]/(axisangle[1]**2+axisangle[2]**2+axisangle[3]**2)**0.5
w = axisangle[3]/(axisangle[1]**2+axisangle[2]**2+axisangle[3]**2)**0.5
g1 = [t*u*u + c, t*u*v - w*s, t*u*w + v*s]
g2 = [t*u*v + w*s, t*v*v + c, t*v*w - u*s]
g3 = [t*u*w - v*s, t*v*w + u*s, t*w*w + c]
phi1 = np.arctan2(-(t*u*w - v*s), (t*v*w + u*s))
Phi = np.arccos(t*w*w + c)
phi2 = np.arctan2((t*u*w + v*s), (t*v*w - u*s))
return phi1, Phi, phi2
# axis/angle in rotation matrix representation
def axisangle2rotmat(angle,axis):
c = np.cos(angle*np.pi/180)
s = np.sin(angle*np.pi/180)
t =1 - c
u = axis[0]/(axis[0]**2+axis[1]**2+axis[2]**2)**0.5
v = axis[1]/(axis[0]**2+axis[1]**2+axis[2]**2)**0.5
w = axis[2]/(axis[0]**2+axis[1]**2+axis[2]**2)**0.5
g1 = [t*u*u + c, t*u*v - w*s, t*u*w + v*s]
g2 = [t*u*v + w*s, t*v*v + c, t*v*w - u*s]
g3 = [t*u*w - v*s, t*v*w + u*s, t*w*w + c]
g = np.matrix([g1, g2, g3])
return g
def ggt(x, y):
while y != 0:
x, y = y, x%y
return x
def round_miller(omega):
min_idx = 1
p_min = 1
# The highest uvw value is chosen to be 20
for i in range(1, 20, 1):
omega_2 = [r*i for r in omega]
p = abs(omega_2[0] - round(omega_2[0]))
+ abs(omega_2[1] - round(omega_2[1]))
+ abs(omega_2[2] - round(omega_2[2]))
if p < p_min:
p_min = p
min_idx = i
omega = [int(round(i*min_idx)) for i in omega]
if ggt(abs(omega[0]), abs(omega[1])) == ggt(abs(omega[0]), abs(omega[2])) == ggt(abs(omega[1]), abs(omega[2])):
omega = [x/abs(ggt(omega[0], omega[1])) for x in omega]
return omega
# Calculate ideal orientations in [hkl]<uvw> representation
def euler2miller(ang):
sa = np.sin(ang[0])
ca = np.cos(ang[0])
sb = np.sin(ang[1])
cb = np.cos(ang[1])
sc = np.sin(ang[2])
cc = np.cos(ang[2])
g1 = [ca*cc - sa*sc*cb, sa*cc + ca*sc*cb, sc*sb]
g2 = [-ca*sc - sa*cc*cb, -sa*sc + ca*cc*cb, cc*sb]
g3 = [sa*sb, -ca*sb, cb]
uvw = [g1[0], g2[0], g3[0]]
hkl = [g1[2], g2[2], g3[2]]
uvw = round_miller(uvw)
hkl = round_miller(hkl)
return hkl, uvw
# Calculate misorientation axis/angle and misorientation angle
def rotmat2misor_axisangle(rotmat, Symmetry_group):
rotmat_sym = [rotmat*x for x in Symmetry_group]
x_trace = [x.trace() for x in rotmat_sym]
min_idx = 0
for i in range(len(x_trace)):
if x_trace[min_idx] < x_trace[i]:
min_idx = i
Theta = np.arccos(((x_trace[min_idx]) - 1)/2)
omega = (1/(2*np.sin(Theta))
*[rotmat_sym[min_idx][2,1] - rotmat_sym[min_idx][1,2],
rotmat_sym[min_idx][0,2] - rotmat_sym[min_idx][2,0],
rotmat_sym[min_idx][1,0] - rotmat_sym[min_idx][0,1]])
omega = omega.tolist()
omega = round_miller(omega[0])
return omega, Theta*180/np.pi
@jit(nopython=True)
def rotmat2misor_angle(rotmat, Symmetry_group):
x_trace = 0
for i in range(len(Symmetry_group)):
x = np.dot(rotmat, Symmetry_group[i])
x_tr = x[0, 0] + x[1, 1] + x[2, 2]
if x_trace < x_tr:
x_trace = x_tr
Theta = np.arccos(((x_trace) - 1)/2)
return Theta*180/np.pi
def get_IPF_color_vals(ang):
h = np.sin(ang[1])*np.sin(ang[2])
k = np.sin(ang[1])*np.cos(ang[2])
l = np.cos(ang[1])
n = (h**2 + k**2 + l**2)**0.5
h= h * n
k= k * n
l= l * n
hkl_max = min([abs(h), abs(k), abs(l)])
if hkl_max != 0:
h = abs(h)/hkl_max
k = abs(k)/hkl_max
l = abs(l)/hkl_max
if h < k:
h, k = k, h
if k > l:
k, l = l, k
if h > l:
h, l = l, h
c_max = max([abs(l - h), abs(h - k), abs(k)])
r = (l - h)/c_max
g = (h - k)/c_max
b = k/c_max
return abs(r), abs(g), abs(b)
if __name__ == '__main__':
Euler_angles1 = [0, 0, 0]
Euler_angles2 = [90, 45, 0]
Euler_angles3 = [149, 54, 45]
Euler_angles_sigma_3 = [63.43, 48.18, 333.4]
#Euler_angles = [x*np.pi/180 for x in Euler_angles]
#r,g,b = get_IPF_color_vals(Euler_angles)
#print(Euler_angles)
g1 = euler2rotmat(Euler_angles2)
g2 = euler2rotmat(Euler_angles3)
print(g1)
print(g2)
print(np.dot(g1,g2))
#ideal_or = euler2miller(Euler_angles)
#print(ideal_or)
``` |
{
"source": "joernnilsson/py3status",
"score": 3
} |
#### File: py3status/modules/backlight.py
```python
from __future__ import division
import os
STRING_NOT_AVAILABLE = "no available device"
def get_device():
for (path, devices, files) in os.walk("/sys/class/backlight/"):
for device in devices:
if "brightness" in os.listdir(
path + device
) and "max_brightness" in os.listdir(path + device):
return path + device
commands = {
"xbacklight": {
"get": lambda: ["xbacklight", "-get"],
"set": lambda level: ["xbacklight", "-time", "0", "-set", str(level)],
},
"light": {
"get": lambda: ["light", "-G"],
"set": lambda level: ["light", "-S", str(level)],
},
}
class Py3status:
"""
"""
# available configuration parameters
brightness_delta = 8
brightness_initial = None
brightness_minimal = 1
button_down = 5
button_up = 4
cache_timeout = 10
command = "xbacklight"
device = None
format = u"☼: {level}%"
hide_when_unavailable = False
low_tune_threshold = 0
class Meta:
deprecated = {
"rename": [
{
"param": "device_path",
"new": "device",
"msg": "obsolete parameter use `device`",
}
]
}
def post_config_hook(self):
if not self.device:
self.device = get_device()
elif "/" not in self.device:
self.device = "/sys/class/backlight/%s" % self.device
if self.device is None:
if self.hide_when_unavailable:
return
else:
raise Exception(STRING_NOT_AVAILABLE)
self.format = self.py3.update_placeholder_formats(self.format, {"level": ":d"})
# check for an error code and an output
self.command_available = False
try:
output = self.py3.command_output(self._command_get())
try:
float(output)
self.command_available = True
except ValueError:
pass
except self.py3.CommandError:
pass
if self.command_available and self.brightness_initial:
self._set_backlight_level(self.brightness_initial)
def on_click(self, event):
if not self.command_available:
return None
level = self._get_backlight_level()
button = event["button"]
if button == self.button_up:
delta = self.brightness_delta if level >= self.low_tune_threshold else 1
level += delta
if level > 100:
level = 100
self._set_backlight_level(level)
elif button == self.button_down:
delta = self.brightness_delta if level > self.low_tune_threshold else 1
level -= delta
if level < self.brightness_minimal:
level = self.brightness_minimal
self._set_backlight_level(level)
def _set_backlight_level(self, level):
self.py3.command_run(self._command_set(level))
def _get_backlight_level(self):
if self.command_available:
return float(self.py3.command_output(self._command_get()))
for brightness_line in open("%s/brightness" % self.device, "rb"):
brightness = int(brightness_line)
for brightness_max_line in open("%s/max_brightness" % self.device, "rb"):
brightness_max = int(brightness_max_line)
return brightness * 100 / brightness_max
# Returns the string array for the command to get the current backlight level
def _command_get(self):
return commands[self.command]["get"]()
# Returns the string array for the command to set the current backlight level
def _command_set(self, level):
return commands[self.command]["set"](level)
def backlight(self):
full_text = ""
if self.device is not None:
level = self._get_backlight_level()
full_text = self.py3.safe_format(self.format, {"level": level})
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": full_text,
}
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
```
#### File: py3status/modules/usbguard.py
```python
from threading import Thread
from gi.repository import GLib, Gio
import re
STRING_USBGUARD_DBUS = "start usbguard-dbus.service"
class Py3status:
"""
"""
# available configuration parameters
format = "{format_device}"
format_button_allow = "\[Allow\]"
format_button_reject = "\[Reject\]"
format_device = "{format_button_reject} [{name}|{usb_id}] {format_button_allow}"
format_device_separator = " "
def post_config_hook(self):
self.init = {
"format_button": self.py3.get_placeholders_list(
self.format_device, "format_button_*"
),
"target": {"allow": 0, "reject": 2},
}
self.keys = [
("serial", re.compile(r"\S*serial \"(\S+)\"\S*")),
("policy", re.compile(r"^(\S+)")),
("usb_id", re.compile(r"id (\S+)")),
("name", re.compile(r"name \"(.*)\" hash")),
("hash", re.compile(r"hash \"(.*)\" parent-hash")),
("parent_hash", re.compile(r"parent-hash \"(.*)\" via-port")),
("port", re.compile(r"via-port \"(.*)\" with-interface")),
("interface", re.compile(r"with-interface \{ (.*) \}$")),
]
self._init_dbus()
def _init_dbus(self):
self.bus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None)
self.proxy = Gio.DBusProxy.new_sync(
self.bus,
Gio.DBusProxyFlags.NONE,
None,
"org.usbguard",
"/org/usbguard/Devices",
"org.usbguard.Devices",
None,
)
for signal in ["DevicePolicyChanged", "DevicePresenceChanged"]:
self.bus.signal_subscribe(
None,
"org.usbguard.Devices",
signal,
None,
None,
0,
lambda *args: self.py3.update(),
)
thread = Thread(target=lambda: GLib.MainLoop().run())
thread.daemon = True
thread.start()
def _get_devices(self):
try:
raw_devices = self.proxy.listDevices("(s)", "block")
except Exception:
raise Exception(STRING_USBGUARD_DBUS)
devices = []
for device_id, string in raw_devices:
device = {"id": device_id}
string = string.encode("latin-1").decode("unicode_escape")
string = string.encode("latin-1").decode("utf-8")
for name, regex in self.keys:
value = regex.findall(string) or None
if value:
value = value[0]
device[name] = value
devices.append(device)
return devices
def _format_device(self, devices):
device_info = []
for device in devices:
for btn in self.init["format_button"]:
composite = self.py3.safe_format(getattr(self, btn), device)
device[btn] = self.py3.composite_update(
composite,
{"index": "{}/{}".format(device["id"], btn.split("_")[-1])},
)
device_info.append(self.py3.safe_format(self.format_device, device))
format_device_separator = self.py3.safe_format(self.format_device_separator)
format_device = self.py3.composite_join(format_device_separator, device_info)
return format_device
def usbguard(self):
devices = self._get_devices()
usbguard_data = {
"device": len(devices),
"format_device": self._format_device(devices),
}
return {
"cached_until": self.py3.CACHE_FOREVER,
"full_text": self.py3.safe_format(self.format, usbguard_data),
"urgent": True,
}
def on_click(self, event):
if isinstance(event["index"], int):
return
device_id, policy_name = event["index"].split("/")
policy = self.init["target"][policy_name]
self.proxy.applyDevicePolicy("(uub)", int(device_id), policy, False)
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
``` |
{
"source": "Joern-Noeller/ratatoskr",
"score": 3
} |
#### File: bin/PARSEC/main.py
```python
import csv
import os
import pandas as pd
import configparser
def setupDirectory(arch, bd, vc, bench):
print("Generating dirs")
print(os.path)
dirName = arch+'_vc_'+str(vc)+'_bd_'+str(bd)+'_'+bench
os.system('mkdir ' + dirName)
os.system('cp -r ../origin/sim ' + dirName)
os.system('mkdir ' + dirName +'/config')
os.system('cp -r ../origin/config/ntConfig.xml ' + dirName+'/config')
os.system('cp -r ../origin/config/'+arch+'.xml ' + dirName+'/config/network.xml')
def execute(arch, bd, vc, bench):
dirName = arch + '_vc_' + str(vc) + '_bd_' + str(bd) + '_' + bench
os.chdir(dirName)
os.system('./sim')
os.chdir('..')
def modifyFiles(arch, bd, vc, bench):
dirName = arch + '_vc_' + str(vc) + '_bd_' + str(bd) + '_' + bench
os.chdir(dirName+'/config')
#set benchmark
filedata = None
with open('ntConfig.xml', 'r') as file:
filedata = file.read()
file.close()
filedata = filedata.replace('BENCHMARK', '../../benchmarks/'+bench)
with open('ntConfig.xml', 'w') as file:
file.write(filedata)
file.close()
#set vc
# set bd
filedata = None
with open('network.xml', 'r') as file:
filedata = file.read()
file.close()
if (arch == 'heteroSynch'):
filedata = filedata.replace('<bufferDepth value=\"4\" />', '<bufferDepth value=\"'+str(bd)+'\"/>')
filedata = filedata.replace('<vcCount value=\"4\" />', '<vcCount value=\"' + str(vc) + '\"/>')
else:
filedata = filedata.replace('<bufferDepth value=\"4\"/>', '<bufferDepth value=\"'+str(bd)+'\"/>')
filedata = filedata.replace('<vcCount value=\"4\"/>', '<vcCount value=\"' + str(vc) + '\"/>')
with open('network.xml', 'w') as file:
file.write(filedata)
file.close()
#set clock speed
os.chdir('../..')
def gather_results(arch, bd, vc, bench):
dirName = arch + '_vc_' + str(vc) + '_bd_' + str(bd) + '_' + bench
latencies = get_latencies(dirName + '/report_Performance.csv')
flit_latency = latencies[0]
return flit_latency
def get_latencies(latencies_results_file):
"""
Read the resulting latencies from the csv file.
Parameters:
- results_file: the path to the result file.
Return:
- A list of the filt, packet and network latencies.
"""
latencies = []
try:
with open(latencies_results_file, newline='') as f:
spamreader = csv.reader(f, delimiter=' ', quotechar='|')
for row in spamreader:
latencies.append(float(row[1]))
except Exception:
# Add dummy values to latencies, -1.
latencies.append(-1)
latencies.append(-1)
latencies.append(-1)
return (latencies)
architectures = ['homoSynch', 'homoAsynch', 'pseudo', 'heteroSynch']
bds = [4, 8]
vcs = [4, 8]
benchmarks = ['blackscholes_64c_simmedium', 'bodytrack_64c_simlarge', 'canneal_64c_simmedium',
'dedup_64c_simmedium', 'ferret_64c_simmedium', 'fluidanimate_64c_simmedium',
'fluidanimate_64c_simmedium', 'vips_64c_simmedium', 'x264_64c_simmedium']
resultsIndex = benchmarks
resultsColumns = []
for arch in architectures:
for bd in bds:
for vc in vcs:
resultsColumns.append(arch + '_vc_' + str(vc) + '_bd_' + str(bd))
results = pd.DataFrame(index=resultsIndex, columns=resultsColumns)
results = results.fillna(0.0) # with 0s rather than NaNs
print(results)
for arch in architectures:
for bd in bds:
for vc in vcs:
for bench in benchmarks:
setupDirectory(arch, bd, vc, bench)
modifyFiles(arch, bd, vc, bench)
execute(arch, bd, vc, bench)
flitLatency = gather_results(arch, bd, vc, bench)
setting = arch + '_vc_' + str(vc) + '_bd_' + str(bd)
if arch == 'homoAsynch':
flitLatency = flitLatency / 2
results.at[bench, setting] = flitLatency
print(results)
results.to_csv("parsec.csv")
```
#### File: setup-experiments/origin/generate_plots.py
```python
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
###############################################################################
import numpy as np
import pickle
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyPDF2 import PdfFileMerger
import glob as glob
import os
###############################################################################
def plot_latencies(results):
"""
Read the raw results from a dictionary of objects, then plot the latencies.
Parameters:
- results: a dictionary of raw data from the pickle file.
Return:
- None.
"""
latenciesFlit = results['latenciesFlit']
latenciesNetwork = results['latenciesNetwork']
latenciesPacket = results['latenciesPacket']
injectionRates = results['injectionRates']
meanLatenciesFlit = np.mean(latenciesFlit, axis=1)
meanLatenciesPacket = np.mean(latenciesPacket, axis=1)
meanLatenciesNetwork = np.mean(latenciesNetwork, axis=1)
stdLatenciesFlit = np.std(latenciesFlit, axis=1)
stdLatenciesPacket = np.std(latenciesPacket, axis=1)
stdLatenciesNetwork = np.std(latenciesNetwork, axis=1)
fig = plt.figure()
plt.ylabel('Latencies in ns', fontsize=11)
plt.xlabel('Injection Rate', fontsize=11)
plt.xlim([0, .03])
linestyle = {'linestyle': '--', 'linewidth': 1, 'markeredgewidth': 1,
'elinewidth': 1, 'capsize': 10}
plt.errorbar(injectionRates, meanLatenciesFlit, yerr=stdLatenciesFlit,
color='r', **linestyle, marker='*')
plt.errorbar(injectionRates, meanLatenciesNetwork,
yerr=stdLatenciesNetwork, color='b', **linestyle,
marker='s')
plt.errorbar(injectionRates, meanLatenciesPacket, yerr=stdLatenciesPacket,
color='g', **linestyle, marker='^')
plt.legend(['Flit', 'Network', 'Packet'])
fig.suptitle('Latencies', fontsize=16)
# plt.show()
fig.savefig('latencies.pdf')
###############################################################################
def plot_VCUsage_stats(inj_dfs, inj_rates):
"""
Plot the VC usage statistics.
Parameteres:
- inj_dfs: the data frames of an injection rate.
- inj_rates: the number of injection rates.
Return:
- None.
"""
for inj_df, inj_rate in zip(inj_dfs, inj_rates):
for layer_id, df in enumerate(inj_df):
fig = plt.figure() # plot a figure for each inj_rate and layer
plt.title('Layer ' + str(layer_id) +
', Injection Rate = ' + str(inj_rate))
plt.ylabel('Count', fontsize=11)
plt.xlabel('VC Usage', fontsize=11)
for col in df.columns.levels[0].values:
plt.errorbar(df.index.values, df[col, 'mean'].values,
yerr=df[col, 'std'].values)
plt.legend(df.columns.levels[0].values)
# plt.show()
fig.savefig('VC_' + str(layer_id) + '_' + str(inj_rate) + '.pdf')
###############################################################################
def plot_BuffUsage_stats(inj_dicts, inj_rates):
"""
Plot the buffer usage statistics.
Parameters:
- inj_dicts: the data dictionaries of an injection rate.
- inj_rates: the number of injection rates.
Return:
- None.
"""
for inj_dict, inj_rate in zip(inj_dicts, inj_rates):
for layer_id, layer_name in enumerate(inj_dict):
layer_dict = inj_dict[layer_name]
fig = plt.figure()
for it, d in enumerate(layer_dict):
df = layer_dict[d]
if not df.empty:
ax = fig.add_subplot(3, 2, it+1, projection='3d')
lx = df.shape[0]
ly = df.shape[1]
xpos = np.arange(0, lx, 1)
ypos = np.arange(0, ly, 1)
xpos, ypos = np.meshgrid(xpos, ypos, indexing='ij')
xpos = xpos.flatten()
ypos = ypos.flatten()
zpos = np.zeros(lx*ly)
dx = 1 * np.ones_like(zpos)
dy = dx.copy()
dz = df.values.flatten()
ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color='b')
ax.set_yticks(ypos)
ax.set_xlabel('Buffer Size')
ax.set_ylabel('VC Index')
ax.set_zlabel('Count')
ax.set_title('Direction:'+str(d))
fig.suptitle('Layer: '+str(layer_name)+', Injection Rate = '
+ str(inj_rate), fontsize=16)
# plt.show()
fig.savefig('Buff_' + str(layer_id) + '_' + str(inj_rate) + '.pdf')
###############################################################################
def read_raw_results(results_file):
"""
Read the raw results from the pickle file.
Parameters:
- results_file: the path to the pickle file.
Return:
- results: a dictionary of objects.
"""
results = None
with open(results_file, 'rb') as f:
results = pickle.load(f)
return results
###############################################################################
def merge_pdfs(output_path):
"""Merge the generated reports in one pdf."""
try:
os.remove(output_path)
except FileNotFoundError:
pass
input_paths = glob.glob('*.pdf')
input_paths.sort()
pdf_merger = PdfFileMerger()
for path in input_paths:
pdf_merger.append(path)
with open(output_path, 'wb') as fileobj:
pdf_merger.write(fileobj)
for path in input_paths:
os.remove(path)
###############################################################################
def main():
"""Main Point of Execution."""
results = read_raw_results('rawResults.pkl')
plot_latencies(results)
#plot_VCUsage_stats(results['VCUsage'], results['injectionRates'])
#plot_BuffUsage_stats(results['BuffUsage'], results['injectionRates'])
merge_pdfs('performance_buffer_VCUsage_report.pdf')
###############################################################################
if __name__ == '__main__':
main()
```
#### File: traffic_generator_receiver/python/data2file.py
```python
import math
import os
# *********************************** Define functions
# Define bit conversion
def bit_length(n):
if n == 1:
return 1
elif n > 1:
return math.ceil(math.log(n) / math.log(2))
# Define bit conversion
def int2binary(w, z):
return bin(w)[2:].zfill(z)
# Define bit conversion
def binary_str2int(str_input):
a = 0
for x in range(0, len(str_input)):
b = int(str_input[len(str_input) - x - 1]) * int(2 ** x)
a += b
return a
# ****** Define bit to str conversion *******
def bits2a(b):
return ''.join(chr(int(''.join(x), 2)) for x in zip(*[iter(b)]*8))
# *********************************** Generic variables
output_file = "lena_converted.jpg" # Must be the same file type in both files
output_file_binary = "data_flits_noc.txt"
output_file_header = "report.txt" # Header parsing report
output_file_mismatch_report = "mismatch_report.txt" # Flit mismatch report
input_data_file_noc = "data_header_noc.txt" # Received data+header from NoC
input_data_file_python = "data_header.txt" # Initial converted data+header from Python
input_packet_length_file = "packet_header_length.txt"
header_included = True # header_included in the packet structure
max_x_dim = 4 # starting from 1 // Must be the same in both files
max_y_dim = 4 # starting from 1 // Must be the same in both files
max_z_dim = 3 # starting from 1 // Must be the same in both files
flit_width = 32 # Must be the same in both files
max_packet_len = 31 # (number of flits + header_included) in a packet // Must be the same in both files
# *********************************** Internal variables
flit_padding_width = flit_width / 16 # Should not be changed
packet_length_line_counter = 0 # Should not be changed
f1_line_counter = 0 # Should not be changed
f2_line_counter = 0 # Should not be changed
line_counter = 0 # Should not be changed
data_line_counter = 1 # Should not be changed
total_packet_length = 0 # Should not be changed
data2save = ""
header2save = ""
header = ""
mismatch_report = ""
data_reconversion = []
# *******************************************************************************
# ********************************************************************* Main body
# ******************************************************************
# *********************************** Header parsing and make report
with open(input_data_file_noc) as f:
input_data = f.readlines()
with open(input_packet_length_file) as f:
packet_length = f.readlines()
line_num = sum(1 for line in open(input_packet_length_file))
x_width = bit_length(max_x_dim)
y_width = bit_length(max_y_dim)
z_width = bit_length(max_z_dim)
packet_length_width = bit_length(max_packet_len)
packet_id_width = math.ceil(math.log(line_num) / math.log(2))
header_total = int(flit_padding_width + packet_id_width + (2 * z_width) + (2 * y_width) + (2 * x_width) + packet_length_width)
header_num = math.ceil(header_total / flit_width)
flit_padding_lsb = (flit_width * header_num) - (packet_id_width + (2 * z_width) + (2 * y_width) + (2 * x_width) + packet_length_width)
flit_padding_msb = (flit_width * header_num) - (int(flit_padding_width) + packet_id_width + (2 * z_width) + (2 * y_width) + (2 * x_width) + packet_length_width)
packet_id_lsb = (flit_width * header_num) - ((2 * z_width) + (2 * y_width) + (2 * x_width) + packet_length_width)
packet_id_msb = (flit_width * header_num) - (packet_id_width + (2 * z_width) + (2 * y_width) + (2 * x_width) + packet_length_width)
source_add_lsb = (flit_width * header_num) - (z_width + y_width + x_width + packet_length_width)
source_add_msb = (flit_width * header_num) - ((2 * z_width) + (2 * y_width) + (2 * x_width) + packet_length_width)
dest_add_lsb = (flit_width * header_num) - packet_length_width
dest_add_msb = (flit_width * header_num) - (z_width + y_width + x_width + packet_length_width)
packet_length_lsb = (flit_width * header_num)
packet_length_msb = (flit_width * header_num) - packet_length_width
counter = 0
for y in range(0, line_num):
packet_length_temp = packet_length[y]
for x in range(total_packet_length, (total_packet_length + int(packet_length_temp))):
if x == total_packet_length:
for z in range(header_num, 0, -1):
header += input_data[x+z-1]
header = header.replace("\n", "")
counter += 1
flit_padding = binary_str2int(header[flit_padding_msb: flit_padding_lsb])
packet_id = binary_str2int(header[packet_id_msb: packet_id_lsb])
source_add = header[source_add_msb: source_add_lsb]
dest_add = header[dest_add_msb: dest_add_lsb]
packet_length_header = binary_str2int(header[packet_length_msb: packet_length_lsb]) - (header_num + int(header_included) - 1)
header2save += "Flit Padding (Byte): " + str(flit_padding).ljust(10) + "| " + "Packet ID: " + \
str(packet_id).ljust(10) + "| " + "Source add (ZYX): " + str(source_add).ljust(15) + "| " + \
"Dest add (ZYX): " + str(dest_add).ljust(15) + "| " + "Packet length: " + \
str(packet_length_header).ljust(10) + "| " + "Header: " + header + "\n"
header = ""
elif (x > total_packet_length + header_num - 1) and (x < ((total_packet_length + int(packet_length_temp)) - 1)):
data2save += str(input_data[x])
elif x == (total_packet_length + int(packet_length_temp)) - 1:
last_line = input_data[x]
data2save += str(last_line[(flit_padding * 8): len(last_line) - 1]) + "\n"
total_packet_length += int(packet_length_temp)
if os.path.exists(output_file_header):
os.remove(output_file_header)
f = open(output_file_header, 'w')
f.write(header2save)
f.close()
if os.path.exists(output_file_binary):
os.remove(output_file_binary)
f = open(output_file_binary, 'w')
f.write(data2save)
f.close()
# ******************************************************************
# ************************************************ Data Reconversion
input_file_2 = open(output_file_binary, 'r')
lines = input_file_2.read()
myList = [item for item in lines.split('\n')]
newString = ''.join(myList)
for i in range(0, len(newString), 8):
a = binary_str2int(newString[i:i+8])
data_reconversion.extend([a])
data_reconversion_bytes = bytes(data_reconversion)
if os.path.exists(output_file):
os.remove(output_file)
f = open(output_file, 'wb')
f.write(data_reconversion_bytes)
f.close()
# ******************************************************************
# *************************** Compare line by line - Mismatch report
f1 = open(input_data_file_noc, "r")
f2 = open(input_data_file_python, "r")
for line1 in f1:
f1_line_counter += 1
for line2 in f2:
f2_line_counter += 1
if line1 != line2:
mismatch_report += "Data mismatch at flit: " + str(f1_line_counter) + "\n"
mismatch_report += "Input data flit from NoC: " + str(line1)
mismatch_report += "Input data flit from Python: " + str(line2)
mismatch_report += "----------------------------------------------------------------------" + "\n"
break
if mismatch_report == "":
mismatch_report = "No mismatch in data!"
if f1_line_counter != f2_line_counter:
print("Data line counter mismatch!")
f1.close()
f2.close()
if os.path.exists(output_file_mismatch_report):
os.remove(output_file_mismatch_report)
f = open(output_file_mismatch_report, 'w')
f.write(mismatch_report)
f.close()
```
#### File: src/interconnect/interconnect.py
```python
import interconnect.phy_struct as phs
import interconnect.data as data
import interconnect.metrics as met
import math
from scipy.optimize import fsolve
import numpy as np
from interconnect.driver import Driver
import warnings
class Interconnect():
'''
Top level class interconnect, which combines all modules.
For the metal wires per default no buffer insertion
is applied (segments=1), if segments is non equal 1,
an equi-distant buffer insertion is assumed.
If TSVs is set, an 3D interconnect structure is defined via the metal-wires
in the source layer (the layer where the TSVs cross the substrate), and the
TSVs. Possible wires in the destination layer build a new interconnect
distance. Per default for interconnect.delay at all wire-->TSV and TSV-->
wire boundaries a buffer is assumed. For no buffers at the bundaries
'''
def __init__(self, B, wire_spacing, wire_width, wire_length=1e-3,
metal_layer=5, segments=1, Driver=Driver.predefined(),
TSVs=False, TSV_radius=1e-6, TSV_pitch=4e-6, TSV_length=50e-6,
KOZ=8e-6, n=None, m=None, ground_ring=False):
if TSVs:
self.is_3D = True
if n is None:
n = round(math.sqrt(B))
m = math.ceil(B/n) # assume as quadratic as possible
if n*m > B:
print("----------------------INFO------------------------")
print("Virtuell increase of link width from %d to %d bit"
% (B, n*m))
print("to make it fit into a quadratic array")
print("--------------------------------------------------")
B = n*m
elif n*m != B:
raise ValueError("Metal wire and TSV bit width do not match")
self.C_3D = phs.TSV_cap_matrix(TSV_length, n, m, TSV_radius,
TSV_pitch, ground_ring=ground_ring)
# store C_3D_g as private property to avoid recalculations
self._C_3D_g = phs.TSV_cap_matrix_prob(TSV_length, n, m,
TSV_radius, TSV_pitch,
np.zeros(B), # worst case
ground_ring=ground_ring)
self.R_3D = phs.TSV_resistance(TSV_length, TSV_radius)
self.TSV_array_n, self.TSV_array_m = n, m
self.TSV_pitch, self.TSV_radius = TSV_pitch, TSV_radius
self.TSV_length = TSV_length
self.KOZ = KOZ
self.ground_ring = ground_ring
else:
self.is_3D = False
self.B = B
self.segments = segments
self._Nb = segments+1 # number of buffers
# self C_2D is the overall acc. capacitance for all segments acc
self.wire_length, self.wire_spacing = wire_length, wire_spacing
self.wire_width = wire_width
self.C_2D = phs.metal_wire_cap_matrix(wire_length, B, wire_spacing,
wire_width, layer=metal_layer)
self.R_2D = phs.metal_wire_resistance(wire_length, wire_spacing,
wire_width, layer=metal_layer)
self.Driver = Driver
self.metal_layer = metal_layer
@property
def area_3D(self):
'''
returns the required silicon area
(0) if no TSVs are used
'''
if self.is_3D is False:
return 0
else:
return met.TSV_array_area(self.TSV_pitch, self.TSV_array_m,
self.TSV_array_n, self.KOZ)
@property
def power_ratio_3D_over_2D(self):
'''
roughly estimates how much power is consumed by the TSVs
compared to the metal wires by comparing the capacitance matrices
'''
return sum(sum(self.C_3D))/sum(sum(self.C_2D))
def power(self, data_inst=None, mux=None, f=1e9):
'''
returns power consumption of the interconnect for the specified
clock frequency "f". Also the transmitted data can be defined via
the variable "data_inst", by either:
#1: assigning instance of class "DataStream" or "DataStreamProbs"
#2: assigning array of samples (numpy or list/tuple)
#3: assigning mutliple instances of the the class "DataStream" or
"DataStreamProbs" via a list/tuple. In this scenarios, the
probabilities for the interleaving of the single DataStreams
has to be defined via variable "mux"
If "data_inst" is 'None', random data is assumed
'''
# # VAR1.__t_0 data unspecified (random data no hold assumed)
if data_inst is None:
C = self.C_2D + self.C_3D if self.is_3D is True else np.copy(
self.C_2D)
return met.mean_power2(C, self.Driver.V_dd, f, self.Driver.C_in) \
+ self._Nb*self.B*self.Driver.mean_power(f=f) # C load dep neg
# # VAR2: data specified
if isinstance(data_inst, (list, tuple, np.ndarray)): # samples
data_inst = data.DataStream(data_inst, self.B)
data_inst = data.DataStreamProb(data_inst, mux)
C = np.copy(self.C_2D)
if self.is_3D is True:
C = C + phs.TSV_cap_matrix_prob(self.TSV_length, self.TSV_array_n,
self.TSV_array_m, self.TSV_radius,
self.TSV_pitch,
data_inst.bit_prob_vec,
C_r=self.C_3D, C_g=self._C_3D_g,
ground_ring=self.ground_ring)
return met.mean_power(data_inst.toggle_prob_vec,
data_inst.corr_switching_mat, C,
self.Driver.V_dd, f, self.Driver.C_in) \
+ self._Nb*sum(
self.Driver.mean_power(data_inst.toggle_prob_vec, f))
def E(self, data_inst=None, mux=None):
'''
(mean) energy consumptions per clock cycle for the transmission
of "data_inst" (for more details on "data_inst" and "mux" see
description of class function "power")
'''
return self.power(data_inst=data_inst, mux=mux, f=1)
def prop_delay(self, wc_switching=None, verbose=False):
'''
-- delay of the interconnect structure --
if CAC coding is applied: "wc_switching" should be defined
'''
t_2d_seg = met.delay(self.C_2D/self.segments, self.R_2D/self.segments,
self.Driver.t_0(), self.Driver.C_in,
self.Driver.R_on(2*max(sum(self.C_2D)
- self.C_2D[0, 0])),
wc_switching=wc_switching)
if self.is_3D is True:
t_3d = met.delay(self._C_3D_g, self.R_3D, self.Driver.t_0(),
self.Driver.C_in, self.Driver.R_on(
2*max(sum(self._C_3D_g) - self._C_3D_g[0, 0])),
wc_switching=wc_switching)
else:
t_3d = 0
if verbose is True:
print("Metal wire delay: %s in [s]" % self.segments*t_2d_seg)
print(" TSV delay: %s in [s]" % t_3d)
return self.segments*t_2d_seg+t_3d
def prop_delay_3D_no_buff(self, wc_switching=None):
'''
-- delay of the interconnect structure for no buffers
between metal wires and TSVs--
(in pratice not recommended due to signal integrity issues!)
if CAC coding is applied: "wc_switching" should be defined.
If you want the delay of the metal wires for no buffers,
just set segments to '1' and run function 'prob_delay'
'''
if (self.is_3D and self.segments == 1) is False:
raise ValueError("Segment count has to be one (no buff 2D),"
" and the interconnect has to be 3D")
C = [self.C_2D, self._C_3D_g]
R = [self.R_2D, self.R_2D]
return met.delay_mult_no_buf(C, R, self.Driver.t_0(),
self.Driver.C_in, self.Driver.R_on(
2*max(sum(self._C_3D_g) - self._C_3D_g[0, 0])),
wc_switching=wc_switching)
def max_metal_wire_length(self, f, factor=0.8, max_segments=10,
wc_switching=None):
'''
function that estimates how long the metal-wires
of a 2D or 3D interconnect can be if it should run with
a clock frequency of "f". "factor" smaller than one is
used to leaf some times for the remaining elements (e.g.
Flip-Flops).For example, for "factor"=0.8: 20%*of the clock
cycle is left for the remaining circuit.
(N_max: maximum buffers inserted)
'''
warnings.filterwarnings(
'ignore', 'The iteration is not making good progress')
t_left = factor/f
c = self.C_2D/self.wire_length # cap per length
r = self.R_2D/self.wire_length # res per length
if self.is_3D is True:
t_3D = met.delay(self._C_3D_g, self.R_3D, self.Driver.t_0(),
self.Driver.C_in, self.Driver.R_on(
2*max(sum(self._C_3D_g) - self._C_3D_g[0, 0])),
wc_switching=wc_switching)
t_left -= t_3D
if t_left <= 0:
raise ValueError('TSV array delay %f [ns] to big'
% (t_left*1e9))
else:
t_3D = 0
guess_l = self.wire_length*((t_left-self.Driver.t_0())/(
self.prop_delay()-t_3D-self.Driver.t_0())) # makes a guess
def fu(i):
def f_int(x):
t_seg = met.delay(c*x/i, r*x/i, self.Driver.t_0(),
self.Driver.C_in,
self.Driver.R_on(2*max(sum(c*x)
- c[0, 0]*x)),
wc_switching=wc_switching)
return (t_left-i*t_seg)
return f_int
l_max = segs = 0
for i in range(1, max_segments+1):
if (i+1)*self.Driver.t_0() <= t_left: # check if general possible
var = fsolve(fu(i), guess_l)
[segs, l_max] = [i, var] if var > l_max else [segs, l_max]
return {'Max Wire Length': float(l_max), 'Segments': segs}
def metal_wire_length(self, length, segments=1):
'''
creates an copy of the instance of the class "Interconnect",
with the exception that the metal-wire-length is set to
"length", and the segments are equal to "segments"
'''
if self.is_3D is True:
return Interconnect(self.B, self.wire_spacing, self.wire_width,
self.wire_length, self.metal_layer,
segments, self.Driver, True,
self.TSV_radius, self.TSV_pitch,
self.TSV_length, self.KOZ,
self.TSV_array_n, self.TSV_array_m,
self.ground_ring)
else:
return Interconnect(self.B, self.wire_spacing, self.wire_width,
self.wire_length, self.metal_layer,
segments, self.Driver, False)
```
#### File: src/test/test_data.py
```python
import unittest
import numpy as np
import random
from interconnect import data
class test_data_module(unittest.TestCase):
def test_functions(self):
print('\n'+"Test Data module ")
self.assertEqual(data.de2bis([10], 4), ['1010'])
self.assertEqual(data.de2bis([-6], 4), ['1010'])
def test_DataStream_object(self):
a = data.DataStream([0, 0], 3)
b = data.DataStream([7, 7], 3)
c = data.DataStream([0, 7], 3)
d = data.DataStream([1, 3], 3)
self.assertTrue(
np.array_equal(a.binary_samples_mat, [[0, 0, 0], [0, 0, 0]]))
self.assertTrue(np.array_equal(
b.binary_samples_mat, [[1, 1, 1], [1, 1, 1]]))
self.assertTrue(np.array_equal(c.bit_prob_mat, 0.5*np.ones((3, 3))))
self.assertTrue(np.array_equal(
d.bit_prob_mat, [[0, 0, 0], [0, .5, .5], [0, .5, 1]]))
with self.assertRaises(ValueError):
data.DataStream(4, 2)
def test_DataStreamProb_object(self):
a = data.DataStream.from_stoch(
1000, 8, uniform=1, ro=0.4)
b = data.DataStream.from_stoch(
1000, 8, uniform=0, ro=0.3, mu=0, log2_std=6)
c = data.DataStream.from_stoch(1000, 8) # random data
a_prob = data.DataStreamProb(a)
b_prob = data.DataStreamProb(b)
c_prob = data.DataStreamProb(c)
self.assertEqual(a_prob.B, a.B)
self.assertEqual(b_prob.B, 8)
self.assertTrue(
np.array_equal(a_prob.toggle_prob_vec, a.toggle_prob_vec))
self.assertFalse(
np.array_equal(c_prob.toggle_prob_vec, b.toggle_prob_vec))
self.assertTrue(np.array_equal(
c_prob.corr_switching_mat, c.corr_switching_mat))
self.assertFalse(np.array_equal(
a_prob.corr_switching_mat, b_prob.corr_switching_mat))
self.assertTrue(np.array_equal(
b_prob.bit_prob_mat, b.bit_prob_mat))
self.assertFalse(np.array_equal(
a_prob.bit_prob_mat, c.bit_prob_mat))
# testing of the multiplexing?
# testing of the class method "from_prob"
Ts, Tc, pr = [1, 2, 3]
Ts_v, Tc_m, pr_m = np.ones(4), 2*np.ones((4, 4)), 3*np.ones((4, 4))
a_prob = data.DataStreamProb.from_probs(Ts, Tc, pr, 4)
b_prob = data.DataStreamProb.from_probs(Ts_v, Tc_m, pr_m, 4)
with self.assertRaises(IndexError):
data.DataStreamProb.from_probs(Tc_m, Tc_m, pr_m, 4)
data.DataStreamProb.from_probs(Ts, Tc_m, pr_m, 4)
def test_DataStream_interleaving(self):
a = data.DataStream.from_stoch(
10000, 8, uniform=0, ro=0.4, mu=0, log2_std=6)
b = data.DataStream.from_stoch(
10000, 8, uniform=0, ro=-0.3, mu=0, log2_std=6)
c = data.DataStream.from_stoch(
10000, 10, uniform=0, ro=-0.3, mu=0, log2_std=6)
d1 = a.samples
d2 = b.samples
tr_pr_d1 = [.5, .3, .2, 0] # trans probabilities from d1 to (d1, d2, h1, h2)
tr_pr_d2 = [.5, .4, 0, .1] # trans probabilities from d2 to (d1, d2, h1, h2)
tr_pr_h1 = [.6, .3, .1, 0] # trans probabilities from h1 to (d1, d2, h1, h2)
tr_pr_h2 = [.1, .7, 0, .2] # trans probabilities from h1 to (d1, d2, h1, h2)
state = 0 # start in state d1
tr_pr = tr_pr_d1
values = np.array([d1[0]])
d1 = np.delete(d1, 0)
mux = np.zeros((4, 4))
for i in range(10000-1):
x = random.random()
if x <= tr_pr[0]:
values = np.append(values, d1[0])
d1 = np.delete(d1, 0)
next_state = 0
next_tr_pr = tr_pr_d1
elif x <= (tr_pr[0] + tr_pr[1]):
values = np.append(values, d2[0])
d2 = np.delete(d2, 0)
next_state = 1
next_tr_pr = tr_pr_d2
elif x <= (tr_pr[0] + tr_pr[1]+tr_pr[2]):
values = np.append(values, values[len(values)-1])
next_state = 2
next_tr_pr = tr_pr_h1
else:
values = np.append(values, values[len(values)-1])
next_state = 3
next_tr_pr = tr_pr_h2
mux[state, next_state] += 1
tr_pr = next_tr_pr
state = next_state
mux = mux / (10000-1)
# self.assertEqual(sum(sum(mux)), 1)
ab_muxed = data.DataStream(values, B=8, is_signed=a.is_signed)
ab_muxed_theo = data.DataStreamProb([a, b], mux)
with self.assertRaises(ValueError):
data.DataStreamProb([a, c], mux)
with self.assertRaises(IndexError):
data.DataStreamProb([a, b], [0, 0])
# the following lines will sometimes cause faulty test results due to rounding!!
np.testing.assert_almost_equal(ab_muxed.toggle_prob_vec, ab_muxed_theo.toggle_prob_vec, decimal=1,
err_msg="\nMux Toggle Misprediction")
np.testing.assert_almost_equal(ab_muxed.corr_switching_mat, ab_muxed_theo.corr_switching_mat, decimal=1,
err_msg="\nMux Toggle Misprediction")
np.testing.assert_almost_equal(ab_muxed.bit_prob_vec, ab_muxed_theo.bit_prob_vec, decimal=1,
err_msg="\nMux Toggle Misprediction")
if __name__ == '__main__':
unittest.main()
```
#### File: experiments/base1/configure.py
```python
import configparser
import xml_writers as writers
import plot_network
import os
import multiprocessing
###############################################################################
class Configuration:
""" The main configuration """
def __init__(self, path):
self.path = path
config = configparser.ConfigParser()
try:
config.read(self.path)
except Exception:
raise
self.simulationTime = int(config['Config']['simulationTime'])
self.flitsPerPacket = int(config['Config']['flitsPerPacket'])
self.bitWidth = int(config['Config']['bitWidth'])
self.benchmark = config['Config']['benchmark']
self.libDir = config['Config']['libDir']
self.simDir = config['Synthetic']['simDir']
self.basedir = os.getcwd()
self.restarts = int(config['Synthetic']['restarts'])
self.warmupStart = int(config['Synthetic']['warmupStart'])
self.warmupDuration = int(config['Synthetic']['warmupDuration'])
self.warmupRate = float(config['Synthetic']['warmupRate'])
self.runRateMin = float(config['Synthetic']['runRateMin'])
self.runRateMax = float(config['Synthetic']['runRateMax'])
self.runRateStep = float(config['Synthetic']['runRateStep'])
self.runStartAfterWarmup = int(config['Synthetic']['runStartAfterWarmup'])
self.runStart = self.warmupStart + self.warmupDuration + self.runStartAfterWarmup
self.runDuration = int(config['Synthetic']['runDuration'])
self.numCores = int(config['Synthetic']['numCores'])
if (self.numCores == -1):
self.numCores = multiprocessing.cpu_count()
self.bufferReportRouters = config['Report']['bufferReportRouters']
try:
self.bufferReportRouters = self.bufferReportRouters[1:len(self.bufferReportRouters)-1]
except Exception:
raise
self.x = config['Hardware']['x']
self.y = config['Hardware']['y']
try:
self.x = self.x[1:len(self.x)-1]
self.x = self.x.split(',')
self.x = [ int(x) for x in self.x ]
self.y = self.y[1:len(self.y)-1]
self.y = self.y.split(',')
self.y = [ int(y) for y in self.y ]
except Exception:
raise
self.z = int(config['Hardware']['z'])
self.routing = config['Hardware']['routing']
self.clockDelay = config['Hardware']['clockDelay']
try:
self.clockDelay = self.clockDelay[1:len(self.clockDelay)-1]
self.clockDelay = self.clockDelay.split(',')
self.clockDelay = [ int(cd) for cd in self.clockDelay ]
except Exception:
raise
self.bufferDepthType = config['Hardware']['bufferDepthType']
#self.bufferDepth = int(config['Hardware']['bufferDepth'])
self.bufferDepth = config['Hardware']['bufferDepth']
self.bufferDepth = self.bufferDepth[1:len(self.bufferDepth)-1]
self.bufferDepth = self.bufferDepth.split(',')
self.buffersDepths = config['Hardware']['buffersDepths']
self.buffersDepths = self.buffersDepths[1:len(self.buffersDepths)-1]
self.vcCount = int(config['Hardware']['vcCount'])
self.topologyFile = config['Hardware']['topologyFile']
self.flitSize = int(config['Hardware']['flitSize'])
self.portNum = int(config['Hardware']['portNum'])
###############################################################################
def main():
config = Configuration('config.ini')
writer = writers.ConfigWriter(config)
writer.write_config('config.xml')
writer = writers.NetworkWriter(config)
writer.write_network('network.xml')
#plot_network.main()
###############################################################################
if __name__ == '__main__':
main()
```
#### File: ratatoskr/scripts/generateAsymmetricMesh.py
```python
filename = "mesh.xml"
z = 2
x = [5, 3]
y = [5, 3]
bufferDepth = [16,16]
vcs = [4,4]
verticalNode1 = [0, 2, 4, 10, 12, 14, 20, 22, 24]
verticalNode2 = range(25,34)
verbose = False
def format(value):
return "%.3f" % value
def makeConnection(file, id, srcNode, dstNode, bufferDepth, vcs):
file.write("\t\t<con id=\""+str(id)+"\">\n")
#file.write("\t\t\t<length value = \"900\"/>\n")
#file.write("\t\t\t<width value = \"3\"/>\n")
#file.write("\t\t\t<depth value = \"1\"/>\n")
#file.write("\t\t\t<effectiveCapacityCl value = \"1.0\"/>\n")
#file.write("\t\t\t<wireCouplingCapacitanceCc value = \"1.0\"/>\n")
#file.write("\t\t\t<wireSelfCapacitanceCg value = \"1.0\"/>\n")
#file.write("\t\t\t<wireSelfCapacitancePerUnitLengthCg value = \"1.0\"/>\n")
#file.write("\t\t\t<tsvarraySelfCapacitanceC0 value = \"1.0\"/>\n")
#file.write("\t\t\t<tsvarrayNeighbourCapacitanceCd value = \"1.0\"/>\n")
#file.write("\t\t\t<tsvarrayDiagonalCapacitanceCn value = \"1.0\"/>\n")
#file.write("\t\t\t<tsvarrayEdgeCapacitanceCe value = \"1.0\"/>\n")
file.write("\t\t\t<ports>\n")
file.write("\t\t\t\t<port id =\"0\">\n")
file.write("\t\t\t\t\t<node value=\""+str(srcNode)+"\"/>\n")
file.write("\t\t\t\t\t<bufferDepth value=\""+str(bufferDepth)+"\"/>\n")
file.write("\t\t\t\t\t<vcCount value=\""+str(vcs)+"\"/>\n")
file.write("\t\t\t\t</port>\n")
file.write("\t\t\t\t<port id =\"1\">\n")
file.write("\t\t\t\t\t<node value=\""+str(dstNode)+"\"/>\n")
file.write("\t\t\t\t\t<bufferDepth value=\""+str(bufferDepth)+"\"/>\n")
file.write("\t\t\t\t\t<vcCount value=\""+str(vcs)+"\"/>\n")
file.write("\t\t\t\t</port>\n")
file.write("\t\t\t</ports>\n")
file.write("\t\t</con>\n")
assert(len(x) == z and len(y) == z and len(bufferDepth) == z and len(vcs) == z), "Dimensions do not agree."
assert(len(verticalNode1) == len(verticalNode2)), "Dimensions of vertical connections do not agree."
file = open(filename, "w")
# write header
file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
file.write("<network-on-chip xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:noNamespaceSchemaLocation=\"layer.xsd\">\n")
file.write("\t<nodeTypes>\n")
file.write("\t\t<nodeType id=\"0\">\n")
file.write("\t\t\t<routerModel value=\"XYZ\"/>\n")
file.write("\t\t\t<clockSpeed value=\"2\"/>\n")
file.write("\t\t</nodeType>\n")
file.write("\t</nodeTypes>\n\n\n")
# generate layer list
print("Generating layers...")
file.write("\t<layerTypes>\n")
for layer in range(z):
if verbose:
print ("Generating layer "+str(layer))
file.write("\t\t<layerType id=\""+str(layer)+"\">\n")
file.write("\t\t\t<technology value=\"130\"/>\n")
file.write("\t\t</layerType>\n")
file.write("\t</layerTypes>\n\n\n")
# generate node list
print("Generating nodes...")
file.write("\t<nodes>\n")
num = 0
for layer in range(z):
for xval in range(x[layer]): #sven: x/y vertauscht
for yval in range(y[layer]):
if verbose:
print("Generating node in layer "+str(layer)+" at x: "+str(xval)+" y:"+str(yval))
node = sum([a*b for a,b in zip(x[:layer],y[:layer])]) + (xval*(x[layer])+yval)
file.write("\t\t<node id=\""+str(node)+"\">\n")
file.write("\t\t\t<xPos value=\""+str(format(float(xval)/(float(x[layer]-1))))+"\"/>\n")
file.write("\t\t\t<yPos value=\""+str(format(float(yval)/(float(y[layer]-1))))+"\"/>\n")
file.write("\t\t\t<zPos value=\""+str(format(float(layer)/(float(z-1))))+"\"/>\n")
file.write("\t\t\t<nodeType value=\"0\"/>\n")
file.write("\t\t\t<layerType value=\""+str(layer)+"\"/>\n")
file.write("\t\t</node>\n")
num = num + 1
file.write("\t</nodes>\n\n\n")
# generate connections
print("Generating connections...")
file.write("\t<connections>\n")
num = 0
for layer in range(z):
for xval in range(x[layer]):
for yval in range(y[layer]):
# connections at local ports:
if verbose:
print("Generating connections at node in layer "+str(layer)+" at x: "+str(xval)+" y:"+str(yval))
node1 = sum([a*b for a,b in zip(x[:layer],y[:layer])]) + (xval*(x[layer])+yval) #index of routers at position [xval, yval, layer]
if verbose:
print(str(num)+": Connecting "+str(node1)+" locally")
file.write("\t\t<con id=\""+str(num)+"\">\n")
#file.write("\t\t\t<length value = \"0\"/>\n")
#file.write("\t\t\t<width value = \"3\"/>\n")
#file.write("\t\t\t<depth value = \"1\"/>\n")
#file.write("\t\t\t<effectiveCapacityCl value = \"1.0\"/>\n")
#file.write("\t\t\t<wireCouplingCapacitanceCc value = \"1.0\"/>\n")
#file.write("\t\t\t<wireSelfCapacitanceCg value = \"1.0\"/>\n")
#file.write("\t\t\t<wireSelfCapacitancePerUnitLengthCg value = \"1.0\"/>\n")
#file.write("\t\t\t<tsvarraySelfCapacitanceC0 value = \"1.0\"/>\n")
#file.write("\t\t\t<tsvarrayNeighbourCapacitanceCd value = \"1.0\"/>\n")
#file.write("\t\t\t<tsvarrayDiagonalCapacitanceCn value = \"1.0\"/>\n")
#file.write("\t\t\t<tsvarrayEdgeCapacitanceCe value = \"1.0\"/>\n")
file.write("\t\t\t<ports>\n")
file.write("\t\t\t\t<port id =\"0\">\n")
file.write("\t\t\t\t\t<node value=\""+str(node1)+"\"/>\n")
file.write("\t\t\t\t\t<bufferDepth value=\""+str(bufferDepth[layer])+"\"/>\n")
file.write("\t\t\t\t\t<vcCount value=\""+str(vcs[layer])+"\"/>\n")
file.write("\t\t\t\t</port>\n")
file.write("\t\t\t</ports>\n")
file.write("\t\t</con>\n")
num = num + 1
# connections within layer following mesh tolopogy
# only east and north required since links are bidirectional
for neigbour in range(2):
writeCon = False
if neigbour == 0 and yval != (y[layer]-1):
# east: node2 = node1+1
node2 = node1 + 1
writeCon = True
elif neigbour == 1 and xval != (x[layer]-1):
# north: node 2 = node1+x
node2 = node1 + (y[layer])
writeCon = True
if writeCon:
if verbose:
print(str(num)+": Connecting "+str(node1)+" with "+str(node2))
makeConnection(file, num, node1, node2, bufferDepth[layer], vcs[layer])
num = num + 1
# vertical links
for node1, node2 in zip(verticalNode1, verticalNode2):
if verbose:
print(str(num)+": Connecting "+str(node1)+" with "+str(node2))
makeConnection(file, num, node1, node2, bufferDepth[layer], vcs[layer])
num = num + 1
file.write("\t</connections>\n")
file.write("</network-on-chip>\n")
file.close
```
#### File: simulations/General/run_simulation.py
```python
import os
import sys
import shutil
import subprocess
import xml.etree.ElementTree as ET
import csv
import numpy as np
from joblib import Parallel, delayed
import multiprocessing
import pickle
import configparser
import pandas as pd
from combine_hists import combine_VC_hists, combine_Buff_hists,\
init_data_structure
###############################################################################
class Configuration:
"""
The main configuration for all of the individual simulations.
"""
def __init__(self, root_sim_folder):
self.root_sim_folder = os.path.abspath(root_sim_folder)
self.path = os.path.join(root_sim_folder, 'config.ini')
self.config = configparser.ConfigParser()
try:
self.config.read(self.path)
except Exception:
raise
self.topologyFile = self.config['DEFAULT']['topologyFile']
self.libdir = os.path.join(self.root_sim_folder,
self.config['DEFAULT']['libdir'])
self.simdir = os.path.join(self.root_sim_folder,
self.config['DEFAULT']['simdir'])
self.basedir = os.getcwd()
self.simulation_time = int(self.config['DEFAULT']['simulation_time'])
self.restarts = int(self.config['DEFAULT']['restarts'])
self.warmup_start = int(self.config['DEFAULT']['warmup_start'])
self.warmup_duration = int(self.config['DEFAULT']['warmup_duration'])
self.warmup_rate = float(self.config['DEFAULT']['warmup_rate'])
self.run_rate_min = float(self.config['DEFAULT']['run_rate_min'])
self.run_rate_max = float(self.config['DEFAULT']['run_rate_max'])
self.run_rate_step = float(self.config['DEFAULT']['run_rate_step'])
self.run_start_after_warmup = int(self.config['DEFAULT']['run_start_after_warmup'])
self.run_start = self.warmup_start + self.warmup_duration + self.run_start_after_warmup
self.run_duration = int(self.config['DEFAULT']['run_duration'])
self.num_cores = int(self.config['DEFAULT']['num_cores'])
if (self.num_cores == -1):
self.num_cores = multiprocessing.cpu_count()
###############################################################################
def write_config_file(config, configFileSrc, configFileDst, injectionRate):
"""
Write the configuration file for the urand simulation.
Parameters:
- config: configuration object.
- configFileSrc: the source of the configuration file.
- configFileDst: the destination of the config file.
- injectionRate: the injection rate.
Return:
- None.
"""
try:
configTree = ET.parse(configFileSrc)
except Exception:
raise
configTree.find('noc/nocFile').text = 'config/' + config.topologyFile + '.xml'
configTree.find('general/simulationTime').set('value', str(config.simulation_time))
configTree.find('general/outputToFile').set('value', 'true')
configTree.find('general/outputToFile').text = 'report'
for elem in list(configTree.find('application/synthetic').iter()):
if elem.get('name') == 'warmup':
elem.find('start').set('min', str(config.warmup_start))
elem.find('start').set('max', str(config.warmup_start))
elem.find('duration').set('min',
str(config.warmup_start + config.warmup_duration))
elem.find('duration').set('max',
str(config.warmup_start + config.warmup_duration))
elem.find('injectionRate').set('value', str(injectionRate))
if elem.get('name') == 'run':
elem.find('start').set('min', str(config.run_start))
elem.find('start').set('max', str(config.run_start))
elem.find('duration').set('min', str(config.run_start + config.run_duration))
elem.find('duration').set('max', str(config.run_start + config.run_duration))
elem.find('injectionRate').set('value', str(injectionRate))
configTree.write(configFileDst)
###############################################################################
def write_sim_files(config, simdir):
"""
Write the files that are associated with each run of the simulation
(the executable sim + the configuration file).
Parameters:
- config: configuration object.
- simdir: the path of the simulation directory.
Return:
- None.
"""
confdir = simdir + '/config'
shutil.rmtree(simdir, ignore_errors=True)
try:
os.makedirs(simdir)
os.makedirs(confdir)
except OSError:
raise
else:
shutil.copy(os.path.join(config.basedir, 'sim'), simdir)
shutil.copy(config.libdir + '/' + config.topologyFile + '.xml', confdir)
###############################################################################
def run_indivisual_sim(simdir, basedir):
"""
Run an individual simulation.
Parameters:
- simdir: the path to the simulatin directory.
- basedir: the path to the all sims directory.
Return:
- None.
"""
os.chdir(simdir)
args = ('./sim')
outfile = open('log', 'w')
try:
subprocess.run(args, stdout=outfile, check=True)
except subprocess.CalledProcessError:
raise
finally:
outfile.flush()
outfile.close()
os.chdir(basedir)
###############################################################################
def get_latencies(latencies_results_file):
"""
Read the resulting latencies from the csv file.
Parameters:
- results_file: the path to the result file.
Return:
- A list of the filt, packet and network latencies.
"""
latencies = []
try:
with open(latencies_results_file, newline='') as f:
spamreader = csv.reader(f, delimiter=' ', quotechar='|')
for row in spamreader:
latencies.append(row[1])
except Exception:
# Add dummy values to latencies, -1.
latencies.append(-1)
latencies.append(-1)
latencies.append(-1)
return(latencies)
###############################################################################
def begin_individual_sim(config, restart, injectionRates, injIter):
"""
Begin a simulation with a specif injection rate.
Parameters:
- config: configuration object.
- restart: the index of restarts.
- injectioRates: the list of injection rates.
- injIter: the index of the injection rate to be run.
Return:
- None.
"""
print('Simulation with injection rate: ' + str(injectionRates[injIter])
+ ' restart ' + str(restart))
currentSimDir = config.simdir + str(restart)
currentConfDir = currentSimDir + '/config'
write_sim_files(config, currentSimDir)
write_config_file(config,
os.path.join(config.root_sim_folder, 'config/config.xml'),
currentConfDir + '/config.xml',
injectionRates[injIter])
run_indivisual_sim(currentSimDir, config.root_sim_folder)
###############################################################################
def begin_all_sims(config):
"""
Begin all simulations.
Parameters:
- config: configuration object.
Retrun:
- results: a dictionary of the results.
"""
print('Generating urand simulation with injection rate from ' +
str(config.run_rate_min) + ' to ' + str(config.run_rate_max) + ' steps ' +
str(config.run_rate_step))
# Initialze the latencies.
injectionRates = np.arange(config.run_rate_min, config.run_rate_max, config.run_rate_step)
injectionRates = [round(elem, 4) for elem in injectionRates]
latenciesFlit = -np.ones((len(injectionRates), config.restarts))
latenciesPacket = -np.ones((len(injectionRates), config.restarts))
latenciesNetwork = -np.ones((len(injectionRates), config.restarts))
# Run the full simulation (for all injection rates).
injIter = 0
VCUsage = []
BuffUsage = []
for inj in injectionRates:
print('Starting Sims with ' + str(config.num_cores) + ' processes')
Parallel(n_jobs=config.num_cores)(delayed(begin_individual_sim)
(config, restart, injectionRates, injIter) for restart in range(config.restarts))
VCUsage_inj = [pd.DataFrame() for i in range(3)]
BuffUsage_inj = init_data_structure() # a dict of dicts
# Run the simulation several times for each injection rate.
for restart in range(config.restarts):
currentSimdir = os.path.join(config.root_sim_folder, 'sim' + str(restart))
lat = get_latencies(currentSimdir + '/report_Performance.csv')
latenciesFlit[injIter, restart] = lat[0]
latenciesPacket[injIter, restart] = lat[1]
latenciesNetwork[injIter, restart] = lat[2]
VCUsage_run = combine_VC_hists(currentSimdir + '/VCUsage')
if VCUsage_run is not None:
for ix, layer_df in enumerate(VCUsage_run):
VCUsage_inj[ix] = pd.concat([VCUsage_inj[ix], layer_df])
BuffUsage_run = combine_Buff_hists(currentSimdir + '/BuffUsage')
if BuffUsage_run is not None:
for l in BuffUsage_inj:
for d in BuffUsage_inj[l]:
BuffUsage_inj[l][d] = BuffUsage_inj[l][d].add(
BuffUsage_run[l][d], fill_value=0)
# input('press any key')
shutil.rmtree(currentSimdir)
# Calculate the average and std for VC usage.
VCUsage_temp = []
for df in VCUsage_inj:
if not df.empty:
VCUsage_temp.append(df.groupby(df.index).agg(['mean', 'std']))
VCUsage.append(VCUsage_temp)
# Average the buffer usage over restarts.
BuffUsage_temp = init_data_structure() # a dict of dicts
for l in BuffUsage_inj:
for d in BuffUsage_inj[l]:
BuffUsage_temp[l][d] = np.ceil(BuffUsage_inj[l][d] / config.restarts)
BuffUsage.append(BuffUsage_temp)
injIter += 1
print('Executed all sims of all injection rates.')
results = {'latenciesFlit': latenciesFlit,
'latenciesNetwork': latenciesNetwork,
'latenciesPacket': latenciesPacket,
'injectionRates': injectionRates,
'VCUsage': VCUsage,
'BuffUsage': BuffUsage}
return results
###############################################################################
def save_results(results, results_file):
"""
Save the results to a pickle file.
Parameters:
- results: a dictionary of the results.
- result_file: the path to the pickle file.
Return:
- None.
"""
with open(results_file, 'wb') as f:
pickle.dump(results, f)
###############################################################################
def main():
""" Main point of execution."""
try:
root_sim_folder = sys.argv[1]
except Exception:
raise
else:
config = Configuration(root_sim_folder)
results = begin_all_sims(config)
save_results(results, os.path.join(root_sim_folder + 'rawResults.pkl'))
###############################################################################
if __name__ == '__main__':
main()
``` |
{
"source": "joernschellhaas/kivybooth",
"score": 2
} |
#### File: joernschellhaas/kivybooth/emulation.py
```python
import os
def active():
return True if os.getenv("KBOOTH_EMULATE") else False
``` |
{
"source": "joernu76/python-skyfield",
"score": 2
} |
#### File: python-skyfield/skyfield/iokit.py
```python
from __future__ import print_function
import itertools
import os
import numpy as np
import sys
from datetime import date, datetime, timedelta
from fnmatch import fnmatch
from time import time
from .jpllib import SpiceKernel
from .sgp4lib import EarthSatellite
from .timelib import Timescale, julian_date
today = date.today
try:
from fcntl import LOCK_EX, LOCK_UN, lockf
except:
lockf = None
try:
from urllib.parse import urlparse
from urllib.request import urlopen
except:
from urlparse import urlparse
from urllib2 import urlopen
# If we are running under the built-in IDLE development environment, we
# cannot use '\r' to keep repainting the current line as a progress bar:
_running_IDLE = (sys.stderr.__class__.__name__ == 'PseudoOutputFile')
def _filename_of(url):
"""Return the last path component of a url."""
return urlparse(url).path.split('/')[-1]
_IERS = 'https://hpiers.obspm.fr/iers/bul/bulc/'
_JPL = 'ftp://ssd.jpl.nasa.gov/pub/eph/planets/bsp/'
_NAIF = 'https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/satellites/'
_USNO = 'http://maia.usno.navy.mil/ser7/'
class Loader(object):
"""A tool for downloading and opening astronomical data files.
A default `Loader` that saves data files to the current working
directory can be imported directly from the Skyfield API::
from skyfield.api import load
But users can also create a `Loader` of their own, if there is
another directory they want data files saved to, or if they want to
specify different options. The directory is created automatically
if it does not yet exist::
from skyfield.api import Loader
load = Loader('~/skyfield-data')
The options are:
``verbose``
If set to ``False``, then the loader will not print a progress bar
to the screen each time it downloads a file. (If the standard
output is not a TTY, then no progress bar is printed in any case.)
``expire``
If set to ``False``, then Skyfield will always use an existing
file on disk, instead of expiring files that are out of date and
replacing them with newly downloaded copies.
Once a `Loader` is created, it can be called like a function to
open, or else to download and open, a file whose name it recognizes::
planets = load('de405.bsp')
Each loader also supports an attribute and a few methods.
"""
def __init__(self, directory, verbose=True, expire=True):
self.directory = os.path.expanduser(directory)
self.verbose = verbose
self.expire = expire
self.events = []
if not os.path.exists(self.directory):
os.makedirs(self.directory)
# Each instance gets its own copy of these data structures,
# instead of sharing a single copy, so users can edit them
# without changing the behavior of other Loader objects:
self.urls = {
'deltat.data': _USNO,
'deltat.preds': _USNO,
'Leap_Second.dat': _IERS,
'.bsp': [
('jup*.bsp', _NAIF),
('*.bsp', _JPL),
],
}
self.parsers = {
'deltat.data': parse_deltat_data,
'deltat.preds': parse_deltat_preds,
'Leap_Second.dat': parse_leap_seconds,
}
self.openers = {
'.bsp': [
('*.bsp', SpiceKernel),
],
}
def path_to(self, filename):
"""Return the path to ``filename`` in this loader's directory."""
return os.path.join(self.directory, filename)
def __call__(self, filename):
"""Open the given file, downloading it first if necessary."""
if '://' in filename:
url = filename
filename = urlparse(url).path.split('/')[-1]
# Should this API accept full path names? It might look like:
# elif os.sep in filename:
# os.path.expanduser(directory)
# path = filename
# filename = os.path.basename(path)
# url = _search(self.urls, filename)
# directory =
else:
url = _search(self.urls, filename)
if url:
url += filename
path = self.path_to(filename)
parser = _search(self.parsers, filename)
opener = _search(self.openers, filename)
if (parser is None) and (opener is None):
raise ValueError('Skyfield does not know how to open a file'
' named {0!r}'.format(filename))
if os.path.exists(path):
self._log('Already exists: {0}', path)
if parser is not None:
self._log(' Parsing with: {0}()', parser.__name__)
with open(path, 'rb') as f:
expiration_date, data = parser(f)
if not self.expire:
self._log(' Ignoring expiration: {0}', expiration_date)
return data
if expiration_date is None:
self._log(' Does not specify an expiration date')
return data
if today() <= expiration_date:
self._log(' Does not expire til: {0}', expiration_date)
return data
self._log(' Expired on: {0}', expiration_date)
for n in itertools.count(1):
prefix, suffix = filename.rsplit('.', 1)
backup_name = '{0}.old{1}.{2}'.format(prefix, n, suffix)
if not os.path.exists(backup_name):
break
self._log(' Renaming to: {0}', backup_name)
os.rename(self.path_to(filename), self.path_to(backup_name))
else:
# Currently, openers have no concept of expiration.
self._log(' Opening with: {0}', opener.__name__)
return opener(path)
if url is None:
raise ValueError('Skyfield does not know where to download {!r}'
.format(filename))
self._log(' Downloading: {0}', url)
download(url, path, self.verbose)
if parser is not None:
self._log(' Parsing with: {0}()', parser.__name__)
with open(path, 'rb') as f:
expiration_date, data = parser(f)
return data
else:
self._log(' Opening with: {0}', opener.__name__)
return opener(path)
def _log(self, message, *args):
self.events.append(message.format(*args))
def tle(self, url, reload=False):
"""Load and parse a satellite TLE file.
Given a URL or a local path, this loads a file of three-line
records in the common Celestrak file format, where each first
line gives the name of a satellite and the following two lines
are the TLE orbital elements.
Returns a Python dictionary whose keys are satellite names and
values are :class:`~skyfield.sgp4lib.EarthSatellite` objects.
"""
with self.open(url, reload=reload) as f:
return dict(parse_celestrak_tle(f))
def open(self, url, mode='rb', reload=False):
"""Open a file, downloading it first if it does not yet exist.
Unlike when you call a loader directly like ``my_loader()``,
this ``my_loader.open()`` method does not attempt to parse or
interpret the resulting file. Instead, it simply returns an
open file object without trying to interpret the contents.
"""
filename = urlparse(url).path.split('/')[-1]
path = self.path_to(filename)
if reload or not os.path.exists(path):
download(url, path, self.verbose)
return open(path, mode)
def timescale(self, delta_t=None):
"""Open or download three time scale files, returning a `Timescale`.
This method is how most Skyfield users build a `Timescale`
object, which is necessary for building specific `Time` objects
that name specific moments.
This will open or download the three files that Skyfield needs
to measure time. UT1 is tabulated by the United States Naval
Observatory files ``deltat.data`` and ``deltat.preds``, while
UTC is defined by ``Leap_Second.dat`` from the International
Earth Rotation Service.
"""
if delta_t is not None:
delta_t_recent = np.array(((-1e99, 1e99), (delta_t, delta_t)))
else:
data = self('deltat.data')
preds = self('deltat.preds')
data_end_time = data[0, -1]
i = np.searchsorted(preds[0], data_end_time, side='right')
delta_t_recent = np.concatenate([data, preds[:,i:]], axis=1)
leap_dates, leap_offsets = self('Leap_Second.dat')
return Timescale(delta_t_recent, leap_dates, leap_offsets)
@property
def log(self):
return '\n'.join(self.events)
def _search(mapping, filename):
"""Search a Loader data structure for a filename."""
result = mapping.get(filename)
if result is not None:
return result
name, ext = os.path.splitext(filename)
result = mapping.get(ext)
if result is not None:
for pattern, result2 in result:
if fnmatch(filename, pattern):
return result2
return None
def load_file(path):
"""Open a file on your local drive, using its extension to guess its type.
This routine only works on ``.bsp`` ephemeris files right now, but
will gain support for additional file types in the future. ::
from skyfield.api import load_file
planets = load_file('~/Downloads/de421.bsp')
"""
path = os.path.expanduser(path)
base, ext = os.path.splitext(path)
if ext == '.bsp':
return SpiceKernel(path)
raise ValueError('unrecognized file extension: {}'.format(path))
def parse_deltat_data(fileobj):
"""Parse the United States Naval Observatory ``deltat.data`` file.
Each line file gives the date and the value of Delta T::
2016 2 1 68.1577
This function returns a 2xN array of raw Julian dates and matching
Delta T values.
"""
array = np.loadtxt(fileobj)
year, month, day = array[-1,:3].astype(int)
expiration_date = date(year + 1, month, day)
year, month, day, delta_t = array.T
data = np.array((julian_date(year, month, day), delta_t))
return expiration_date, data
def parse_deltat_preds(fileobj):
"""Parse the United States Naval Observatory ``deltat.preds`` file.
Each line gives a floating point year, the value of Delta T, and one
or two other fields::
2015.75 67.97 0.210 0.02
This function returns a 2xN array of raw Julian dates and matching
Delta T values.
"""
year_float, delta_t = np.loadtxt(fileobj, skiprows=3, usecols=[0, 1]).T
year = year_float.astype(int)
month = 1 + (year_float * 12.0).astype(int) % 12
expiration_date = date(year[0] + 1, month[0], 1)
data = np.array((julian_date(year, month, 1), delta_t))
return expiration_date, data
def parse_leap_seconds(fileobj):
"""Parse the IERS file ``Leap_Second.dat``.
The leap dates array can be searched with::
index = np.searchsorted(leap_dates, jd, 'right')
The resulting index allows (TAI - UTC) to be fetched with::
offset = leap_offsets[index]
"""
lines = iter(fileobj)
for line in lines:
if line.startswith(b'# File expires on'):
break
else:
raise ValueError('Leap_Second.dat is missing its expiration date')
line = line.decode('ascii')
dt = datetime.strptime(line, '# File expires on %d %B %Y\n')
# The file went out of date at the beginning of July 2016, and kept
# downloading every time a user ran a Skyfield program. So we now
# build in a grace period:
grace_period = timedelta(days=30)
expiration_date = dt.date() + grace_period
mjd, day, month, year, offsets = np.loadtxt(lines).T
leap_dates = np.ndarray(len(mjd) + 2)
leap_dates[0] = '-inf'
leap_dates[1:-1] = mjd + 2400000.5
leap_dates[-1] = 'inf'
leap_offsets = np.ndarray(len(mjd) + 2)
leap_offsets[0] = leap_offsets[1] = offsets[0]
leap_offsets[2:] = offsets
return expiration_date, (leap_dates, leap_offsets)
def parse_celestrak_tle(fileobj):
lines = iter(fileobj)
for line in lines:
name = line.decode('ascii').strip()
line1 = next(lines).decode('ascii')
line2 = next(lines).decode('ascii')
sat = EarthSatellite(line1, line2, name)
yield name, sat
if ' (' in name:
# Given `ISS (ZARYA)` or `HTV-6 (KOUNOTORI 6)`, also support
# lookup by the name inside or outside the parentheses.
short_name, secondary_name = name.split(' (')
secondary_name = secondary_name.rstrip(')')
yield short_name, sat
yield secondary_name, sat
def download(url, path, verbose=None, blocksize=128*1024):
"""Download a file from a URL, possibly displaying a progress bar.
Saves the output to the file named by `path`. If the URL cannot be
downloaded or the file cannot be written, an IOError is raised.
Normally, if the standard error output is a terminal, then a
progress bar is displayed to keep the user entertained. Specify
`verbose=True` or `verbose=False` to control this behavior.
"""
tempname = path + '.download'
try:
connection = urlopen(url)
except Exception as e:
raise IOError('cannot get {0} because {1}'.format(url, e))
if verbose is None:
verbose = sys.stderr.isatty()
bar = None
if verbose:
if _running_IDLE:
print('Downloading {0} ...'.format(os.path.basename(path)),
file=sys.stderr)
else:
bar = ProgressBar(path)
content_length = int(connection.headers.get('content-length', -1))
# Python open() provides no way to achieve O_CREAT without also
# truncating the file, which would ruin the work of another process
# that is trying to download the same file at the same time. So:
flags = getattr(os, 'O_BINARY', 0) | os.O_CREAT | os.O_RDWR
fd = os.open(tempname, flags, 0o666)
with os.fdopen(fd, 'wb') as w:
try:
if lockf is not None:
fd = w.fileno()
lockf(fd, LOCK_EX) # only one download at a time
if os.path.exists(path): # did someone else finish first?
if os.path.exists(tempname):
os.unlink(tempname)
return
w.seek(0)
length = 0
while True:
data = connection.read(blocksize)
if not data:
break
w.write(data)
length += len(data)
if bar is not None:
bar.report(length, content_length)
w.flush()
if lockf is not None:
# On Unix, rename while still protected by the lock.
try:
os.rename(tempname, path)
except Exception as e:
raise IOError('error renaming {0} to {1} - {2}'.format(
tempname, path, e))
except Exception as e:
raise IOError('error getting {0} - {1}'.format(url, e))
finally:
if lockf is not None:
lockf(fd, LOCK_UN)
if lockf is None:
# On Windows, rename here because the file needs to be closed first.
try:
os.rename(tempname, path)
except Exception as e:
raise IOError('error renaming {0} to {1} - {2}'.format(
tempname, path, e))
class ProgressBar(object):
def __init__(self, path):
self.filename = os.path.basename(path)
self.t0 = 0
def report(self, bytes_so_far, bytes_total):
if bytes_total < 0:
return
percent = 100 * bytes_so_far // bytes_total
if (percent != 100) and (time() - self.t0 < 0.5):
return
self.t0 = time()
bar = '#' * (percent // 3)
print('\r[{0:33}] {1:3}% {2}'.format(bar, percent, self.filename),
end='\n' if (percent == 100) else '', file=sys.stderr)
sys.stderr.flush()
``` |
{
"source": "joernweissenborn/pyglotaran-examples",
"score": 2
} |
#### File: pyglotaran_examples/ex_spectral_guidance/ex_spectral_guidance.py
```python
from datetime import datetime
import matplotlib.pyplot as plt # 3.3 or higher
from glotaran.analysis.optimize import optimize
from glotaran.io import load_dataset
from glotaran.io import load_model
from glotaran.io import load_parameters
from glotaran.io import save_result
from glotaran.project.scheme import Scheme
from pyglotaran_extras.io.boilerplate import setup_case_study
from pyglotaran_extras.plotting.plot_overview import plot_overview
from pyglotaran_extras.plotting.style import PlotStyle
DATA_PATH1 = "data/Npq2_220219_800target3fasea.ascii"
DATA_PATH2 = "data/trNpq2_220219_800target3fase10SAS5.ascii"
MODEL_PATH = "models/model_guidance.yml"
PARAMETERS_FILE_PATH = "models/parameters_guidance.yml"
# %% Setup necessary (output) paths
results_folder, script_folder = setup_case_study(output_folder_name="pyglotaran_examples_results")
def main():
# Load in data, model and parameters
parameters = load_parameters(script_folder.joinpath(PARAMETERS_FILE_PATH))
dataset1 = load_dataset(script_folder.joinpath(DATA_PATH1))
dataset2 = load_dataset(script_folder.joinpath(DATA_PATH2))
model = load_model(script_folder.joinpath(MODEL_PATH))
# Validate model and parameters
print(model.validate(parameters=parameters))
# %% Construct the analysis scheme
scheme = Scheme(
model,
parameters,
{"dataset1": dataset1, "dataset2": dataset2},
# optimization_method="Levenberg-Marquardt", # LM needs more nfev!
maximum_number_function_evaluations=23, # TRF needs nfev=21-23
non_negative_least_squares=True,
)
# Optimize the analysis scheme (and estimate parameters)
result = optimize(scheme)
# Basic print of results
print(result.markdown(True))
return result
def load_and_plot_results():
# Plot and save as PDF
# This set subsequent plots to the glotaran style
plot_style = PlotStyle()
plt.rc("axes", prop_cycle=plot_style.cycler)
parameter_file = results_folder.joinpath("optimized_parameters.csv")
parameters = load_parameters(str(parameter_file))
print(f"Optimized parameters:\n {parameters}")
result1 = results_folder.joinpath("dataset1.nc")
fig1 = plot_overview(result1, linlog=True, show_data=True)
timestamp = datetime.today().strftime("%y%m%d_%H%M")
fig1.savefig(
results_folder.joinpath(f"plot_overview_1of2_{timestamp}.pdf"), bbox_inches="tight"
)
result2 = results_folder.joinpath("dataset2.nc")
fig2 = plot_overview(result2, linlog=True)
timestamp = datetime.today().strftime("%y%m%d_%H%M")
fig2.savefig(
results_folder.joinpath(f"plot_overview_2of2_{timestamp}.pdf"), bbox_inches="tight"
)
plt.show()
if __name__ == "__main__":
print(f"- Using folder {results_folder.name} to read/write files for this run")
result = main()
save_result(result, results_folder, format_name="legacy", allow_overwrite=True)
load_and_plot_results()
``` |
{
"source": "JoeRomeo/quote-a-lang-crawler",
"score": 3
} |
#### File: JoeRomeo/quote-a-lang-crawler/main.py
```python
from dev.fetch.fr import French
from dev.util.authors import fr_authors, general_authors
#import random
class Main():
lang = ('fr', 'it', 'pt', 'es', 'en')
# This should be implemented in the future, as more languages will be supported
ind = 0
def selectLanguage(self):
if self.lang[self.ind] == 'fr':
f = French()
print('***** going through authors in French *******')
for i in range(len(fr_authors)):
f.urlSetUp(fr_authors[i],self.lang[self.ind])
print('***** going through general_authors in French ****** ')
for j in range(len(general_authors)):
f.urlSetUp(general_authors[j],self.lang[self.ind])
``` |
{
"source": "joerosenberg/copt-irl",
"score": 4
} |
#### File: copt-irl/irlco/masking.py
```python
import torch
from torch import Tensor
DEVICE = torch.device('cuda')
def generate_square_subsequent_mask(size: int, device=DEVICE) -> Tensor:
"""
Generates a mask that prevents actions attending to subsequent actions in the transformer decoder.
(code taken from https://pytorch.org/tutorials/beginner/transformer_tutorial.html)
Args:
size: Size of the mask (i.e. length of the solution so far.)
device: torch.device to perform computations and store tensors on.
Returns:
Mask for transformer decoder.
"""
mask = torch.eq(torch.triu(torch.ones(size, size, device=device)), 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def generate_sorted_element_mask(previous_actions, input_length: int, device=DEVICE) -> Tensor:
"""
Generates a mask that prevents actions from attending to elements of the unordered set that have already been
placed into the ordered sequence.
Args:
previous_actions: List of previous actions (in order) that we need to mask
input_length: Number of elements in the unordered sequence
device: torch.device to perform computations and store tensors on.
Returns:
Memory mask of shape (nb of previous actions + 1, input sequence length) suitable for use in transformer
"""
# Generate lower triangular matrix (creates columns for masked input elements)
# i_th column of masked_cols is equal to the {a_i}'th column of the mask
masked_cols = torch.tril(
torch.ones(len(previous_actions) + 1, len(previous_actions), device=device) * float('-inf'), diagonal=-1)
# Create empty mask
mask = torch.zeros(len(previous_actions) + 1, input_length, device=device)
# For each previous action, prevent further actions from attending to its corresponding input element
mask[:, previous_actions] = masked_cols
return mask
def generate_batch_of_sorted_element_masks(prev_actions_batch: Tensor, input_sequence_length: int, nb_heads: int,
device=DEVICE) -> Tensor:
"""
Generates a batch of masks that prevents actions from attending to elements of the encoder set that have already
been placed into the ordered sequence. A mask is produced for each attention head and batch entry.
Args:
prev_actions_batch: Batch of previous actions as a tensor of shape (batch_size, nb actions taken so far)
input_sequence_length: Length of the input to the encoder.
device: torch.device to perform computations and store tensors on.
Returns: Tensor of shape (nb_heads * batch_size, decoder input length, encoder input length) that is meant to
be used as a memory mask for the transformer.
Note that decoder input length = nb. of previous actions + 1, due to the beginning-of-sequence token.
"""
nb_actions_taken = prev_actions_batch.shape[1]
batch_size = prev_actions_batch.shape[0]
# Get mask columns
mask_cols = torch.tril(
torch.ones(nb_actions_taken + 1, nb_actions_taken, device=device) * float('-inf'), diagonal=-1)
# Create empty mask
mask = torch.zeros((batch_size, nb_actions_taken + 1, input_sequence_length), device=device)
# For each previous action, prevent further actions from attending to its corresponding input element
# Unvectorised prototype:
# TODO: Replace this with faster vectorised version using torch tensor operations. Probably no significant gains.
for i in range(batch_size):
mask[i, :, prev_actions_batch[i, :]] = mask_cols
# Need to repeat each 2D mask nb_heads times - repeats like abcd -> aaabbbcccddd, since we want to use the same mask
# across all heads for each sequence
mask = torch.repeat_interleave(mask, nb_heads, dim=0)
return mask
```
#### File: routing/data/__init__.py
```python
from torch.utils.data import Dataset
import yaml
import torch
from pathlib import Path
class CircuitSolutionDataset(Dataset):
def __init__(self, config_path: str):
# Load config
p = Path(config_path)
self.data_dir_path = p.parent
config_file = open(config_path, 'r')
self.config = yaml.load_all(config_file)
# Create empty lists to hold data from the config files
self.instances = []
self.solutions = []
self.measures = []
self.indices = {}
self.episode_indices = {}
self.nb_episodes_stored = {}
# Tracks index for each entry we add
index = 0
# Fill in empty lists with data from files specified in the config:
for i, config_entry in enumerate(self.config):
# Read data from corresponding data file
data_file = open(self.data_dir_path / Path(config_entry['output_file']), 'r')
data = yaml.load_all(data_file)
# Create empty tensors to hold data for this file:
nb_solutions = config_entry['nb_instances'] * config_entry['nb_top_solutions']
instance_size = config_entry['instance_size']
# Initialise so dimensions match (seq length, batch size, nb_features) for transformer
self.instances.append(torch.zeros(instance_size, nb_solutions, 4))
self.solutions.append(torch.zeros(instance_size, nb_solutions, dtype=torch.long))
self.measures.append(torch.zeros(nb_solutions, 1))
self.episode_indices[instance_size] = i
# Write data from this file into empty tensors & simultaneously create indices for each entry
for j, data_entry in enumerate(data):
self.instances[i][:, j, :] = torch.FloatTensor(data_entry['instance'])
self.solutions[i][:, j] = torch.LongTensor(data_entry['order'])
self.measures[i][j, 0] = float(data_entry['measure'])
self.indices[index] = (i, j)
index += 1
self.nb_episodes_stored[instance_size] = j + 1
def __len__(self):
# Read lengths of each data file from the config file and sum them to obtain total length
length = sum([config_entry['nb_instances'] * config_entry['nb_top_solutions'] for config_entry in self.config])
return length
def __getitem__(self, index):
"""
Args:
index:
Returns: Tuple of instance, solution (as connection order) and measure.
"""
assert 0 <= index < self.__len__()
i, j = self.indices[index]
return self.instances[i][:, j, :], self.solutions[i][:, j], self.measures[i][j, :]
def get_batch(self, episode_length, batch_size, device):
# Get data for episodes of requested length:
episode_index = self.episode_indices[episode_length]
dataset_size = self.nb_episodes_stored[episode_length]
instances = self.instances[episode_index]
solutions = self.solutions[episode_index]
measures = self.measures[episode_index]
# Sample indices
batch_indices = torch.randint(0, dataset_size, [batch_size])
return instances[:, batch_indices, :].to(device), solutions[:, batch_indices].to(device), \
measures[batch_indices, :].to(device)
```
#### File: irlco/routing/policy.py
```python
import copt
import torch
from irlco.masking import generate_square_subsequent_mask, generate_batch_of_sorted_element_masks
from irlco.routing.env import BatchCircuitRoutingEnv
from irlco.routing.mp_evaluation import mp_evaluate
def greedy_decode(env, initial_states, policy_net, device=torch.device('cuda'), return_all_probs=False):
episode_length = initial_states[0].shape[0]
batch_size = initial_states[0].shape[1]
nb_heads = policy_net.nhead
trajectory_probs = torch.zeros((batch_size, episode_length), device=device)
if return_all_probs:
policy_probs = torch.zeros((batch_size, episode_length, episode_length), device=device)
states = env.reset(instances=initial_states[0])
for t in range(episode_length):
base_pairs, prev_actions = states
decoder_input = states_to_action_decoder_input(states, device=device)
tgt_mask = generate_square_subsequent_mask(t + 1)
memory_masks = generate_batch_of_sorted_element_masks(prev_actions, episode_length, nb_heads)
action_probs = policy_net(base_pairs, decoder_input, tgt_mask=tgt_mask, memory_mask=memory_masks)[:, -1, :]
actions = torch.argmax(action_probs, dim=1).unsqueeze(1)
states, _ = env.step(actions)
trajectory_probs[:, t] = action_probs[torch.arange(0, batch_size), actions.squeeze()]
if return_all_probs:
policy_probs[:, t, :] = action_probs
measures, successes = evaluate_terminal_states(states, device=device)
_, actions = states
if return_all_probs:
return actions, trajectory_probs, measures, successes, policy_probs
else:
return actions, trajectory_probs, measures, successes
def beam_search_decode3(env, initial_states, policy_net, beam_width, device=torch.device('cuda')):
"""
Args:
env:
initial_states:
policy_net:
k: beam width
device:
Returns:
"""
episode_length = initial_states[0].shape[0]
batch_size = initial_states[0].shape[1]
nb_heads = policy_net.nhead
# Create tensor to store actions as we decode - we store each of the {beam_width} most probable action sequences
# at each step
action_sequences = torch.zeros((episode_length, batch_size, beam_width), device=device, dtype=torch.long)
base_pairs, _ = initial_states
rep_base_pairs = torch.repeat_interleave(base_pairs, beam_width, dim=1)
dummy_env = BatchCircuitRoutingEnv(batch_size * beam_width, 1, 100)
states = dummy_env.reset(instances=rep_base_pairs)
log_trajectory_probs = torch.zeros((batch_size, beam_width), device=device)
for t in range(episode_length):
base_pairs, prev_actions = states
decoder_input = states_to_action_decoder_input(states, device=device)
tgt_mask = generate_square_subsequent_mask(t + 1)
memory_masks = generate_batch_of_sorted_element_masks(prev_actions, episode_length, nb_heads)
# Calculate next-step action probabilities from current best action sequences for each instance
next_action_probs = policy_net(base_pairs, decoder_input, tgt_mask=tgt_mask, memory_mask=memory_masks)
# (Episode length - t) is equal to the number of remaining actions, so we shrink the number of actions to expand
# on to this if it is smaller than the beam_width argument
k = min(beam_width, episode_length - t)
# Create tensor to store log joint probabilities of trajectories after we expand:
# First index corresponds to problem instance, second index corresponds to trajectories we selected in the
# previous step, third index corresponds to the top k actions for this step
new_log_joint_trajectory_probs = log_trajectory_probs.unsqueeze(2).repeat(1, 1, k)
# Get top k most probable next actions for each
# next_action_probs has shape (episode_length, batch_size * beam_width, 1)
# topk_next_action_probs and topk_next_actions have shape (k, batch_size * beam_width, 1)
topk_next_action_probs, topk_next_actions = torch.topk(next_action_probs, k, dim=0)
# Reshape action probs to calculate log-probs of each of the trajectories we just expanded on
# new_log_joint_trajectory_probs has shape (batch_size, beam_width, k)
new_log_joint_trajectory_probs += torch.log(
topk_next_action_probs.squeeze(2).T.reshape(batch_size, beam_width, k))
# reshape again to find {beam_width} most probable trajectories for each input
log_trajectory_probs, best_trajectory_idx = torch.topk(
new_log_joint_trajectory_probs.reshape(batch_size, beam_width * k), beam_width, dim=0)
def beam_search_decode2(initial_states, policy_net, beam_width, device=torch.device('cuda')):
# Less efficient beam search decode - iterates over each entry and decodes separately.
episode_length = initial_states[0].shape[0]
batch_size = initial_states[0].shape[1]
nb_heads = policy_net.nhead
batch_action_sequences = torch.zeros((batch_size, episode_length), dtype=torch.long, device=device)
batch_trajectory_probs = torch.zeros_like(batch_action_sequences, dtype=torch.float)
for b in range(batch_size):
base_pair = initial_states[0][:, b, :]
encoder_input = base_pair.unsqueeze(1).repeat(1, beam_width, 1)
best_action_sequences = torch.zeros((beam_width, 0), dtype=torch.long, device=device)
log_joint_trajectory_probs = torch.zeros(beam_width, device=device)
for t in range(episode_length):
decoder_input = states_to_action_decoder_input((encoder_input, best_action_sequences), device=device)
tgt_mask = generate_square_subsequent_mask(t + 1)
memory_masks = generate_batch_of_sorted_element_masks(best_action_sequences, episode_length, nb_heads)
next_action_probs = policy_net(encoder_input, decoder_input,
tgt_mask=tgt_mask, memory_mask=memory_masks)[:, -1, :]
k = min(beam_width, episode_length - t)
# Get k next most probable actions + their probabilities
expanded_action_probs, expanded_actions = torch.topk(next_action_probs.unsqueeze(2), k, dim=0)
# Add the k most probable actions onto the existing trajectories to get beam_size * k possible trajectories
expanded_trajectories = torch.cat(
(torch.repeat_interleave(best_action_sequences, k, dim=0), expanded_actions.flatten()), dim=1)
# Calculate log-probabilities of the expanded trajectories
log_expanded_trajectory_probs = torch.repeat_interleave(log_joint_trajectory_probs, k, dim=0) \
+ torch.log(expanded_action_probs)
# Select beam_width most probable trajectories
log_joint_trajectory_probs, best_trajectory_idx = torch.topk(log_expanded_trajectory_probs, beam_width)
# Update chosen action sequences
best_action_sequences = expanded_trajectories[best_trajectory_idx].copy()
# Choose action sequence with largest probability
batch_action_sequences[b, :] = best_action_sequences[torch.argmax(log_joint_trajectory_probs), :].copy()
measures, successes = evaluate_terminal_states((initial_states[0], batch_action_sequences))
return batch_action_sequences, batch_trajectory_probs, measures, successes
def beam_search_decode(initial_states, policy_net, beam_width, device=torch.device('cuda')):
episode_length, batch_size, _ = initial_states[0].shape
nb_heads = policy_net.nhead
base_pairs, _ = initial_states
# Compute top {beam_width} initial actions
decoder_input = states_to_action_decoder_input(initial_states, device=device)
trajectories = torch.zeros((batch_size * beam_width, episode_length), device=device, dtype=torch.long)
tgt_mask = generate_square_subsequent_mask(1)
memory_masks = generate_batch_of_sorted_element_masks(torch.zeros((batch_size, 0), dtype=torch.long, device=device),
episode_length, nb_heads)
initial_action_probs = policy_net(base_pairs, decoder_input, tgt_mask=tgt_mask, memory_mask=memory_masks)[:, 0, :]
a, b = torch.topk(initial_action_probs, beam_width, dim=1)
# Shape of trajectory_probs is (batch_size * beam_width,)
trajectory_probs = a.flatten()
trajectories[:, 0] = b.flatten()
trajectory_log_probs = torch.log(trajectory_probs)
rep_base_pairs = base_pairs.repeat_interleave(beam_width, dim=1)
for t in range(1, episode_length):
decoder_input = states_to_action_decoder_input((rep_base_pairs, trajectories[:, :t]), device=device)
tgt_mask = generate_square_subsequent_mask(t + 1)
memory_masks = generate_batch_of_sorted_element_masks(trajectories[:, :t], episode_length, nb_heads)
# shape of next_action_probs is (batch_size * beam_width, episode_length)
next_action_probs = policy_net(rep_base_pairs, decoder_input, tgt_mask=tgt_mask, memory_mask=memory_masks)[:, -1, :]
for b in range(batch_size):
# Find the best expanded trajectories for instance b
# Calculate trajectory log-probs
# Shape of instance_next_action_probs is (beam_width, episode_length)
instance_next_action_probs = next_action_probs[b * beam_width:(b + 1) * beam_width, :]
# Shape of instance_trajs_so_far is (beam_width, t)
instance_trajs_so_far = trajectories[b * beam_width:(b + 1) * beam_width, :t]
# Shape of instance_expanded_trajs is (beam_width * episode_length, t+1)
instance_expanded_trajs = torch.cat((instance_trajs_so_far.repeat_interleave(episode_length, dim=0),
torch.arange(0, episode_length, device=device,
dtype=torch.long).repeat(beam_width).unsqueeze(1)), dim=1)
instance_trajs_so_far_log_probs = trajectory_log_probs[b * beam_width:(b + 1) * beam_width] # (beam_width,)
# Shape of instance_expanded_traj_log_probs is (beam_width * episode_length)
instance_expanded_traj_log_probs = instance_trajs_so_far_log_probs.repeat_interleave(episode_length, dim=0) \
+ torch.log(instance_next_action_probs.flatten() + 1e-8)
# Find best trajs
best_instance_expanded_traj_log_probs, best_instance_idx = torch.topk(instance_expanded_traj_log_probs, beam_width)
# Update stored trajectories and log probs
trajectory_log_probs[b*beam_width:(b+1)*beam_width] = best_instance_expanded_traj_log_probs
trajectories[b*beam_width:(b+1)*beam_width, :(t+1)] = instance_expanded_trajs[best_instance_idx, :]
# Evaluate all trajectories and return the one with the lowest cost for each instance
# Shape of measures and successes is (batch_size*beam_width, 1)
measures, successes = evaluate_terminal_states((rep_base_pairs, trajectories), device=device)
best_trajs = torch.zeros((batch_size, episode_length), dtype=torch.long, device=device)
best_measures = torch.zeros((batch_size, 1), device=device)
best_successes = torch.zeros((batch_size, 1), dtype=torch.bool, device=device)
for b in range(batch_size):
instance_measures = measures[b*beam_width:(b+1)*beam_width, 0]
instance_successes = successes[b*beam_width:(b+1)*beam_width, 0]
# If none are successful, return the trajectory with the highest log-probability
if torch.logical_not(instance_successes).all():
best_measures[b, 0] = instance_measures[0]
best_successes[b, 0] = False
best_trajs[b, :] = trajectories[b*beam_width, :]
else:
# Otherwise return the successful trajectory with the lowest measure
best_idx = torch.argmin(instance_measures.masked_fill(torch.logical_not(instance_successes), float('inf')))
best_measures[b, 0] = instance_measures[best_idx]
best_successes[b, 0] = True
best_trajs[b, :] = trajectories[b*beam_width + best_idx, :]
return best_trajs, best_measures, best_successes
def sample_best_of_n_trajectories(env, initial_states, policy_net, n_sample, device=torch.device('cuda'),
return_all_probs=False):
"""
Given a set o f
Args:
env:
initial_states:
policy_net:
n_sample:
device:
return_all_probs:
Returns:
"""
assert initial_states[1].shape[1] == 0, "The provided states are not initial states!"
episode_length = initial_states[0].shape[0]
batch_size = initial_states[0].shape[1]
nb_heads = policy_net.nhead
trajectory_probs = torch.zeros((batch_size, episode_length), device=device)
best_trajectory_probs = torch.zeros_like(trajectory_probs)
best_actions = torch.zeros_like(trajectory_probs, dtype=torch.long)
best_measures = torch.zeros((batch_size, 1), device=device) * float('inf')
best_successes = torch.zeros_like(best_measures, dtype=torch.bool)
if return_all_probs:
best_all_probs = torch.zeros((batch_size, episode_length, episode_length), device=device)
# Sample n_sample trajectories from each initial state according to the current policy, returning only the best
for n in range(n_sample):
states = env.reset(instances=initial_states[0])
for t in range(episode_length):
base_pairs, prev_actions = states
decoder_input = states_to_action_decoder_input(states, device=device)
# Create masks so that the decoder elements don't attend to future actions or base pairs that have
# already been connected
tgt_mask = generate_square_subsequent_mask(t + 1)
memory_masks = generate_batch_of_sorted_element_masks(prev_actions, episode_length, nb_heads)
if return_all_probs and t == episode_length - 1:
all_probs = policy_net(base_pairs, decoder_input, tgt_mask=tgt_mask, memory_mask=memory_masks)
action_probs = all_probs[:, -1, :]
else:
action_probs = policy_net(base_pairs, decoder_input, tgt_mask=tgt_mask, memory_mask=memory_masks)[:, -1,
:]
actions = torch.multinomial(action_probs, 1)
states, _ = env.step(actions)
# Store probabilities for the chosen actions
trajectory_probs[:, t] = action_probs[torch.arange(0, batch_size), actions.squeeze()]
# Evaluate the trajectories + add them to the best so far if they were successful and had lower measure
# than the previous best trajectories
_, actions = states
measures, successes = evaluate_terminal_states(states)
# We consider the trajectory to be an improvement if one of the following holds:
# 1. The current best trajectory is unsuccessful.
# 2. The current best trajectory and the new trajectory are successful, and the new trajectory
# has a lower measure.
is_improvement = torch.logical_or(
torch.logical_not(best_successes),
torch.logical_and(torch.lt(measures, best_measures),
torch.logical_and(best_successes, successes))
)
best_trajectory_probs = torch.where(is_improvement, trajectory_probs, best_trajectory_probs)
best_actions = torch.where(is_improvement, actions, best_actions)
best_measures = torch.where(is_improvement, measures, best_measures)
best_successes = torch.where(is_improvement, successes, best_successes)
if return_all_probs:
best_all_probs = torch.where(is_improvement.unsqueeze(2), all_probs, best_all_probs)
if return_all_probs:
return best_actions, best_trajectory_probs, best_measures, best_successes, best_all_probs
else:
return best_actions, best_trajectory_probs, best_measures, best_successes
def states_to_action_decoder_input(states, device=torch.device('cuda')):
base_pairs, prev_actions = states
batch_size = base_pairs.shape[1]
t = prev_actions.shape[1]
decoder_input = torch.zeros(t + 1, batch_size, 4, device=device)
indices = prev_actions.T.unsqueeze(2).repeat(1, 1, 4)
decoder_input[1:, :, :] = torch.gather(base_pairs, 0, indices)
return decoder_input
def evaluate_terminal_states2(terminal_states, device=torch.device('cuda')):
base_pairs, actions = terminal_states
batch_size = base_pairs.shape[1]
measures = torch.zeros((batch_size, 1))
successes = torch.zeros((batch_size, 1), dtype=torch.bool)
for i in range(batch_size):
problem = [tuple(base_pair) for base_pair in base_pairs[:, i, :].tolist()]
ordering = actions[i, :].tolist()
evaluation = copt.evaluate(problem, ordering)
measures[i, 0] = evaluation['measure']
successes[i, 0] = evaluation['success']
return measures.to(device), successes.to(device)
def evaluate_terminal_states(terminal_states, device=torch.device('cuda')):
base_pairs, actions = terminal_states
batch_size = base_pairs.shape[1]
measures = torch.zeros((batch_size, 1))
successes = torch.zeros((batch_size, 1), dtype=torch.bool)
problems = [[tuple(base_pair) for base_pair in base_pairs[:, i, :].tolist()] for i in range(batch_size)]
orderings = [actions[i, :].tolist() for i in range(batch_size)]
for i, (measure, success) in enumerate(mp_evaluate(problems, orderings)):
measures[i, 0] = measure
successes[i, 0] = success
return measures.to(device), successes.to(device)
def trajectory_action_probabilities(terminal_states, policy_net, device=torch.device('cuda')):
base_pairs, actions = terminal_states
decoder_input = states_to_action_decoder_input((base_pairs, actions[:, :-1]), device=device)
episode_length = base_pairs.shape[0]
batch_size = base_pairs.shape[1]
nb_heads = policy_net.nhead
tgt_mask = generate_square_subsequent_mask(episode_length)
memory_masks = generate_batch_of_sorted_element_masks(actions[:, :-1], episode_length, nb_heads)
action_probs = policy_net(base_pairs, decoder_input, tgt_mask=tgt_mask, memory_mask=memory_masks)
trajectory_probs = torch.gather(action_probs, 2, actions.unsqueeze(2)).transpose(0, 1)
return trajectory_probs
```
#### File: irlco/routing/reward.py
```python
import torch
from irlco.masking import generate_square_subsequent_mask
def states_to_reward_decoder_input(states, device=torch.device('cuda')):
base_pairs, actions = states
indices = actions.T.unsqueeze(2).repeat(1, 1, 4)
decoder_input = torch.gather(base_pairs, 0, indices)
return decoder_input
def compute_shaping_terms(terminal_states, reward_net, device=torch.device('cuda')):
decoder_input = states_to_reward_decoder_input(terminal_states, device=device)
base_pairs, _ = terminal_states
episode_length = base_pairs.shape[0]
tgt_mask = generate_square_subsequent_mask(episode_length)
return reward_net.shaping_terms(base_pairs, decoder_input, tgt_mask=tgt_mask)
def shaping_terms_to_rewards(shaping_terms, terminal_rewards, device=torch.device('cuda')):
episode_length = shaping_terms.shape[0]
batch_size = shaping_terms.shape[1]
rewards = torch.zeros((episode_length, batch_size, 1), device=device)
rewards += shaping_terms # + h(s')
rewards[1:] -= shaping_terms[:-1] # - h(s)
rewards[-1, :, :] += terminal_rewards # + T(s')
return rewards
```
#### File: irlco/routing/train.py
```python
from random import randint
import torch
import wandb
import irlco.pointer_transformer as pt
from irlco.routing.baselines import greedy_rollout_baselines
from irlco.routing.data import CircuitSolutionDataset
from irlco.routing.env import BatchCircuitRoutingEnv, measures_to_terminal_rewards
from irlco.routing.policy import sample_best_of_n_trajectories, trajectory_action_probabilities, greedy_decode, \
beam_search_decode, evaluate_terminal_states
import pickle
import os
from multiprocessing import freeze_support
from irlco.routing.reward import compute_shaping_terms, shaping_terms_to_rewards
def load_pickled_data(data_config_path, data_pickle_path):
if os.path.isfile(data_pickle_path):
with open(data_pickle_path, 'rb') as pickled_data:
data = pickle.load(pickled_data)
else:
data = CircuitSolutionDataset(data_config_path)
data.config = None # Get rid of yaml object so we can pickle
with open(data_pickle_path, 'wb') as pickled_data:
pickle.dump(data, pickled_data)
return data
if __name__ == '__main__':
# For multiprocessing support on Windows
freeze_support()
# Transformer model parameters
EMBEDDING_DIM = 128
NB_HEADS = 8
FF_DIM = 512
DROPOUT = 0.0
NB_ENCODER_LAYERS = 5
NB_DECODER_LAYERS = 3
# Environment parameters
MIN_INSTANCE_SIZE = 6
MAX_INSTANCE_SIZE = 9
# Training parameters
NB_INSTANCES_PER_BATCH = 4 # Number of unique circuit routing problems to consider in each batch
NB_TRAJECTORIES_PER_INSTANCE = 128 # Number of trajectories to sample for each unique circuit routing problem
BATCH_SIZE = NB_TRAJECTORIES_PER_INSTANCE * NB_INSTANCES_PER_BATCH
NB_EPISODES = 20_000
LR = 1e-5 # Optimizer learning rate
EPS = 1e-8 # Add when computing log-probabilities from probabilities to avoid numerical instability
DEVICE = torch.device('cuda')
ENTROPY_REGULARISATION_WEIGHT = 0.0
TRAIN_DECODING_METHOD = 'sample' # greedy or sample
# Qualitative training parameters
BASELINE_METHOD = 'none' # 'greedy' for greedy rollouts or 'none'
REWARD_SHAPING_METHOD = 'ail' # 'ail' for adversarial imitation learning or 'none'
SHARED_AIL_ENCODER = False # Whether or not to share the transformer encoder between the policy and discriminator
PRETRAIN_DISCRIMINATOR = False # Train discriminator to get shaped reward
NB_PRETRAIN_BATCHES = 1000
PRETRAIN_BATCH_SIZE = 512
# Adversarial imitation learning (reward shaping) parameters
NB_EXPERT_SAMPLES = BATCH_SIZE # Keep it equal to batch size for now, so that the discriminator sees an equal
# amount of expert and non-expert data
USE_ACTION_PROBS_FOR_DISCRIMINATOR = False
# PPO surrogate loss clipping parameter
PPO_EPS = 0.2
# Test parameters
TEST_INTERVAL = 25
TEST_BATCH_SIZE = 256
TEST_DECODING_METHOD = 'beam' # or 'sampling' or 'beam'
NB_TEST_SAMPLES = 128 # Number of samples to take if decoding method is 'sampling'
# Model saving interval
SAVE_INTERVAL = 25
# Data file paths
TEST_DATA_PATH = './data/test_data_config.yaml'
TEST_DATA_PICKLE_PATH = './data/pickle/test_data.pkl'
EXPERT_DATA_PATH = './data/irl_data_config.yaml'
EXPERT_DATA_PICKLE_PATH = './data/pickle/irl_data.pkl'
wandb.init(project='routing', config={
'embedding_dim': EMBEDDING_DIM,
'nb_heads': NB_HEADS,
'ff_dim': FF_DIM,
'dropout': DROPOUT,
'nb_encoder_layers': NB_ENCODER_LAYERS,
'nb_decoder_layers': NB_DECODER_LAYERS,
'min_instance_size': MIN_INSTANCE_SIZE,
'max_instance_size': MAX_INSTANCE_SIZE,
'nb_instances_per_batch': NB_INSTANCES_PER_BATCH,
'nb_trajectories_per_instance': NB_TRAJECTORIES_PER_INSTANCE,
'batch_size': BATCH_SIZE,
'learning_rate': LR,
'entropy_regularisation_weight': ENTROPY_REGULARISATION_WEIGHT,
'baseline_method': BASELINE_METHOD,
'reward_shaping_method': REWARD_SHAPING_METHOD,
'shared_ail_encoder': SHARED_AIL_ENCODER,
'nb_expert_samples': NB_EXPERT_SAMPLES,
'ppo_clipping_parameter': PPO_EPS,
'use_actions_probs_for_discriminator': USE_ACTION_PROBS_FOR_DISCRIMINATOR,
'train_decoding_method': TRAIN_DECODING_METHOD,
'test_decoding_method': TEST_DECODING_METHOD,
'nb_test_samples': NB_TEST_SAMPLES
})
# Environments for sampling unique problems, stepping forward during training, and testing
dummy_env = BatchCircuitRoutingEnv(NB_INSTANCES_PER_BATCH, MIN_INSTANCE_SIZE, MAX_INSTANCE_SIZE)
env = BatchCircuitRoutingEnv(BATCH_SIZE, MIN_INSTANCE_SIZE, MAX_INSTANCE_SIZE)
test_env = BatchCircuitRoutingEnv(TEST_BATCH_SIZE, MIN_INSTANCE_SIZE, MAX_INSTANCE_SIZE)
# Shared net for policy + shaped rewards
net = pt.TwinDecoderPointerTransformer(4, d_model=EMBEDDING_DIM, nhead=NB_HEADS, num_encoder_layers=NB_ENCODER_LAYERS,
num_decoder_layers=NB_DECODER_LAYERS, dim_feedforward=FF_DIM, dropout=DROPOUT,
shared_encoder=SHARED_AIL_ENCODER).cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=LR)
wandb.watch(net)
# Make directory for saving model
os.mkdir(f'./saved_models/{wandb.run.name}')
if BASELINE_METHOD == 'greedy':
# Create baseline net with same parameters as net
baseline_net = pt.TwinDecoderPointerTransformer(4, d_model=EMBEDDING_DIM, nhead=NB_HEADS, num_encoder_layers=NB_ENCODER_LAYERS,
num_decoder_layers=NB_DECODER_LAYERS, dim_feedforward=FF_DIM, dropout=DROPOUT,
shared_encoder=SHARED_AIL_ENCODER).cuda()
baseline_net.load_state_dict(net.state_dict())
baseline_net.eval()
elif BASELINE_METHOD == 'none':
baseline_net = None
else:
raise Exception
# Variables for tracking baseline
best_success_rate = 0.0
# Load data files - if a pickled copy of the data exists, load that instead
test_data = load_pickled_data(TEST_DATA_PATH, TEST_DATA_PICKLE_PATH)
expert_data = load_pickled_data(EXPERT_DATA_PATH, EXPERT_DATA_PICKLE_PATH)
if PRETRAIN_DISCRIMINATOR:
for i in range(NB_PRETRAIN_BATCHES):
episode_length = randint(MIN_INSTANCE_SIZE, MAX_INSTANCE_SIZE)
base_pairs, expert_actions, expert_measures = expert_data.get_batch(episode_length, PRETRAIN_BATCH_SIZE, DEVICE)
expert_actions = expert_actions.T
expert_terminal_rewards = measures_to_terminal_rewards(episode_length, expert_measures)
# Generate random actions
random_actions = torch.zeros((PRETRAIN_BATCH_SIZE, episode_length), dtype=torch.long, device=DEVICE)
for b in range(PRETRAIN_BATCH_SIZE):
random_actions[b, :] = torch.randperm(episode_length, device=DEVICE)
# Evaluate random solutions
random_measures, random_successes = evaluate_terminal_states((base_pairs, random_actions), device=DEVICE)
random_terminal_rewards = measures_to_terminal_rewards(episode_length, random_measures, successes=random_successes)
# Combine expert and random solutions into one tensor for batch computation
disc_actions = torch.cat((random_actions, expert_actions), dim=0)
disc_base_pairs = torch.cat((base_pairs, base_pairs), dim=1)
disc_terminal_rewards = torch.cat((random_terminal_rewards, expert_terminal_rewards), dim=0)
# Compute shaping terms
disc_shaping_terms = compute_shaping_terms((disc_base_pairs, disc_actions), net)
disc_rewards = shaping_terms_to_rewards(disc_shaping_terms, disc_terminal_rewards).squeeze(2).T
# Compute classifier probabilities
is_expert_transition_probs = torch.exp(disc_rewards) / (1 + torch.exp(disc_rewards))
# Compute discriminator loss
discriminator_loss = - (
torch.sum(torch.log(1 - is_expert_transition_probs[:PRETRAIN_BATCH_SIZE, :])) +
torch.sum(torch.log(is_expert_transition_probs[PRETRAIN_BATCH_SIZE:, :]))
) / (PRETRAIN_BATCH_SIZE * 2)
discriminator_loss.backward()
optimizer.step()
# Log discriminator loss
wandb.log({'pretrain_discriminator_loss': discriminator_loss})
# Copy reward encoder model weights to policy encoder
net.encoder.load_state_dict(net.reward_encoder.state_dict())
# Save pretrained model
torch.save(net.state_dict(), f'./saved_models/{wandb.run.name}/{wandb.run.name}_pretrained_model')
for i in range(NB_EPISODES):
''' Sample trajectories '''
# Sample NB_INSTANCES_PER_BATCH unique circuit routing problems
instances, _ = dummy_env.reset()
# Duplicate each problem NB_TRAJECTORIES_PER_INSTANCE times so we sample that many trajectories for each problem
states = env.reset(instances=instances.repeat(1, NB_TRAJECTORIES_PER_INSTANCE, 1))
base_pairs, _ = states
episode_length = base_pairs.shape[0]
# Sample trajectories according to policy given by net
if TRAIN_DECODING_METHOD == 'sample':
actions, action_probs, measures, successes, all_action_probs = sample_best_of_n_trajectories(env, states,
net, 1,
return_all_probs=True)
elif TRAIN_DECODING_METHOD == 'greedy':
actions, action_probs, measures, successes, all_action_probs = greedy_decode(env, states, net,
return_all_probs=True)
else:
raise Exception
''' Compute rewards and returns '''
# Compute terminal rewards for each solution
terminal_rewards = measures_to_terminal_rewards(episode_length, measures, successes=successes)
if REWARD_SHAPING_METHOD == 'ail':
# Get expert data for discriminator
expert_base_pairs, expert_actions, expert_measures = expert_data.get_batch(episode_length,
NB_EXPERT_SAMPLES, DEVICE)
expert_actions = expert_actions.T
# Get terminal rewards for expert solutions (they are guaranteed to be successful solutions, so we don't
# need to pass successes
expert_terminal_rewards = measures_to_terminal_rewards(episode_length, expert_measures)
# Concatenate policy data and expert data together so we can compute in a single batch
disc_base_pairs = torch.cat((base_pairs, expert_base_pairs), dim=1)
disc_actions = torch.cat((actions, expert_actions), dim=0)
disc_terminal_rewards = torch.cat((terminal_rewards, expert_terminal_rewards))
# trajectory_action_probabilities computes the probabilities that the current agent would take the expert's
# actions
expert_action_probs = trajectory_action_probabilities((expert_base_pairs, expert_actions), net).squeeze(2).T
disc_action_probs = torch.cat((action_probs, expert_action_probs), dim=0)
# Compute shaping terms for both agent and expert trajectories
disc_shaping_terms = compute_shaping_terms((disc_base_pairs, disc_actions), net)
# Compute rewards from shaping terms
disc_rewards = shaping_terms_to_rewards(disc_shaping_terms, disc_terminal_rewards).squeeze(2).T
# Calculate mean cross-entropy loss for the discriminator
if USE_ACTION_PROBS_FOR_DISCRIMINATOR:
is_expert_transition_probs = torch.exp(disc_rewards) / (
torch.exp(disc_rewards) + disc_action_probs.detach())
else:
is_expert_transition_probs = torch.exp(disc_rewards) / (1 + torch.exp(disc_rewards))
# Calculate misclassification rates for logging
false_positive_rate = (is_expert_transition_probs[:BATCH_SIZE, :] > 0.5).float().mean()
false_negative_rate = (is_expert_transition_probs[BATCH_SIZE:, :] < 0.5).float().mean()
wandb.log({'false_positive_rate': false_positive_rate, 'false_negative_rate': false_negative_rate},
commit=False)
discriminator_loss = - (
torch.sum(torch.log(1 - is_expert_transition_probs[:BATCH_SIZE, :])) +
torch.sum(torch.log(is_expert_transition_probs[BATCH_SIZE:, :]))
) / (BATCH_SIZE + NB_EXPERT_SAMPLES)
discriminator_loss.backward()
wandb.log({'discriminator_loss': discriminator_loss}, commit=False)
# Compute returns for agent
returns = torch.flip(torch.cumsum(torch.flip(disc_rewards[:BATCH_SIZE], [1]), 1), [1]).detach()
elif REWARD_SHAPING_METHOD == 'none':
returns = terminal_rewards.repeat(1, episode_length)
else:
raise Exception
# Compute baselines
if i > TEST_INTERVAL and BASELINE_METHOD == 'greedy':
baselines = greedy_rollout_baselines(base_pairs, actions, env, baseline_net, device=DEVICE)
else:
baselines = torch.zeros((BATCH_SIZE, episode_length), device=DEVICE)
''' Compute loss and update policy network '''
# Compute entropy penalty
if ENTROPY_REGULARISATION_WEIGHT > 0:
entropy_terms = torch.sum(all_action_probs * torch.log(all_action_probs + EPS), dim=2)
entropy_returns = torch.flip(torch.cumsum(torch.flip(entropy_terms, [1]), 1), [1])
returns = returns - ENTROPY_REGULARISATION_WEIGHT * entropy_returns
# Compute PPO loss
action_prob_ratios = action_probs / action_probs.detach()
ppo_terms = torch.min(action_prob_ratios * (returns - baselines),
torch.clamp(action_prob_ratios, 1 - PPO_EPS, 1 + PPO_EPS) * (returns - baselines))
policy_loss = - torch.sum(ppo_terms) / BATCH_SIZE
policy_loss.backward()
optimizer.step()
wandb.log({'policy_loss': policy_loss, 'mean_terminal_reward': terminal_rewards.mean(),
'success_rate': successes.float().mean()}, commit=False)
if i % TEST_INTERVAL == 0 and i != 0:
# For storing aggregate stats over all episode lengths:
overall_mean_optimality_gap = 0
overall_success_rate = 0
for test_episode_length in range(MIN_INSTANCE_SIZE, MAX_INSTANCE_SIZE + 1):
instances, solutions, test_measures = test_data.get_batch(test_episode_length, TEST_BATCH_SIZE, DEVICE)
test_states = test_env.reset(instances=instances)
with torch.no_grad():
if TEST_DECODING_METHOD == 'greedy':
_, _, measures, successes, = greedy_decode(test_env, test_states, net, device=DEVICE)
elif TEST_DECODING_METHOD == 'beam':
_, measures, successes = beam_search_decode(test_states, net, beam_width=test_episode_length, device=DEVICE)
optimality_gaps = (1 - test_measures / measures).masked_fill(torch.logical_not(successes), 1)
mean_optimality_gap = optimality_gaps.mean() # For this instance size
success_rate = successes.float().mean() # For this instance size
wandb.log({f'mean_optimality_gap_{test_episode_length}': mean_optimality_gap,
f'success_rate_{test_episode_length}': success_rate}, commit=False)
overall_mean_optimality_gap += mean_optimality_gap
overall_success_rate += success_rate
overall_mean_optimality_gap = overall_mean_optimality_gap / (MAX_INSTANCE_SIZE + 1 - MIN_INSTANCE_SIZE)
overall_success_rate = overall_success_rate / (MAX_INSTANCE_SIZE + 1 - MIN_INSTANCE_SIZE)
wandb.log({f'overall_mean_optimality_gap': overall_mean_optimality_gap,
f'overall_success_rate': overall_success_rate}, commit=False)
if overall_success_rate > best_success_rate and BASELINE_METHOD != 'none':
best_success_rate = overall_success_rate
baseline_net.load_state_dict(net.state_dict())
if i % SAVE_INTERVAL == 0 and i != 0:
torch.save(net.state_dict(), f'./saved_models/{wandb.run.name}/{wandb.run.name}_step_{i}_model')
wandb.log({}) # Update log counter
``` |
{
"source": "joerowelll/COMP0127_Robotic_Systems_Engineering-Courseworks",
"score": 3
} |
#### File: cw1q5/src/cw1q5b_node.py
```python
import rospy
import numpy as np
from sensor_msgs.msg import JointState
from tf2_ros import TransformBroadcaster
from geometry_msgs.msg import TransformStamped, Quaternion
"""
To complete this assignment, you must do the following:
- Fill the "youbot_dh_parameters" dictionary with the youbot DH parameters you found in question 5a
- Complete the definition of standard_dh().
- Complete the definition of forward_kinematics().
- Complete the definition of fkine_wrapper(). To do this you should use your implementation of standard_dh() and forward_kinematics().
- Initialise the subscriber to the topic that publishes joint states and its callback function fkine_wrapper()
In case you need to implement additional functions, define them in this file.
Complete the function implementation within the indicated area and do not modify the
assertions. The assertions are there to make sure that your code will accept and return
data with specific types and formats.
You can use code you developed during previous lab sessions, just make sure you adapt to follow this template.
** Important, you can use the cw1q5b.launch file to visualize the frames. The frames reported from
your code are not supposed to match the preloaded youbot. This is why we have hidden it intentionally.
Try to move the sliders in the joint_state_publisher gui and observe your robot's behaviour. If you code
is correct, when you specify zero angles for all the joints using the robot state publisher, you should
see all the frames stacked in the z axis (the home position).
"""
# TODO: populate the values inside the youbot_dh_parameters dictionary with the ones you found in question 5a.
youbot_dh_parameters = {'a':[, , , , ],
'alpha': [, , , , ],
'd' : [, , , , ],
'theta' : [, , , , ]}
# Function for rotation matrix to quaternion conversion
def rotmat2q(T):
q = Quaternion()
tr = np.trace(T)
if tr == 4:
q.w = 1.0
q.x = 0.0
q.y = 0.0
q.z = 0.0
return q
angle = np.arccos((T[0, 0] + T[1, 1] + T[2, 2] - 1)/2)
xr = T[2, 1] - T[1, 2]
yr = T[0, 2] - T[2, 0]
zr = T[1, 0] - T[0, 1]
x = xr/np.sqrt(np.power(xr, 2) + np.power(yr, 2) + np.power(zr, 2))
y = yr/np.sqrt(np.power(xr, 2) + np.power(yr, 2) + np.power(zr, 2))
z = zr/np.sqrt(np.power(xr, 2) + np.power(yr, 2) + np.power(zr, 2))
q.w = np.cos(angle/2)
q.x = x * np.sin(angle/2)
q.y = y * np.sin(angle/2)
q.z = z * np.sin(angle/2)
return q
def standard_dh(a, alpha, d, theta):
# TODO complete the function
"""This function computes the homogeneous 4x4 transformation matrix T_i based
on the four standard DH parameters associated with link i and joint i.
Args:
a ([int, float]): Link Length. The distance along x_i ( the common normal) between z_{i-1} and z_i
alpha ([int, float]): Link twist. The angle between z_{i-1} and z_i around x_i.
d ([int, float]): Link Offset. The distance along z_{i-1} between x_{i-1} and x_i.
theta ([int, float]): Joint angle. The angle between x_{i-1} and x_i around z_{i-1}
Returns:
[np.ndarray]: the 4x4 transformation matrix T_i describing a coordinate
transformation from the concurrent coordinate system i to the previous coordinate system i-1
"""
assert isinstance(a, (int, float)), "wrong input type for a"
assert isinstance(alpha, (int, float)), "wrong input type for =alpha"
assert isinstance(d, (int, float)), "wrong input type for d"
assert isinstance(theta, (int, float)), "wrong input type for theta"
A = np.zeros((4, 4))
# your code starts here -----------------------------
# your code ends here ------------------------------
assert isinstance(A, np.ndarray), "Output wasn't of type ndarray"
assert A.shape == (4,4), "Output had wrong dimensions"
return A
def forward_kinematics(dh_dict, joints_readings, up_to_joint=5):
# TODO complete the function
"""This function solves the forward kinematics by multiplying frame
transformations up until a specified frame number. The frame transformations
used in the computation are derived from the dh parameters and joint_readings.
Args:
dh_dict (dict): A dictionary containing the dh parameters describing the robot.
joints_readings (list): the state of the robot joints. For youbot those are revolute.
up_to_joint (int, optional): Specify up to what frame you want to compute forward kinematicks. Defaults to 5.
Returns:
np.ndarray: A 4x4 homogeneous tranformation matrix describing the pose of frame_{up_to_joint} w.r.t the base of the robot.
"""
assert isinstance(dh_dict, dict)
assert isinstance(joints_readings, list)
assert isinstance(up_to_joint, int)
assert up_to_joint>=0
assert up_to_joint<=len(dh_dict['a'])
T = np.identity(4)
# your code starts here ------------------------------
# your code ends here -------------------------------
assert isinstance(T, np.ndarray), "Output wasn't of type ndarray"
assert T.shape == (4,4), "Output had wrong dimensions"
return T
def fkine_wrapper(joint_msg, br):
# TODO complete the function
"""This function integrates your robotics code with ROS and is responsible
to listen to the topic where joint states are published. Based on this,
compute forward kinematics and publish it.
In more detail this function should perform the following actions:
- get joint angles from the rostopic that publishes joint data
- Publish a set of transformations relating the frame 'base_link' and
each frame on the arm 'arm5b_link_i' where i is the frame, using
tf messages.
Args:
joint_msg (JointState): ros msg containing the joint states of the robot
br (TransformBroadcaster): a tf broadcaster
"""
assert isinstance(joint_msg, JointState), "Node must subscribe to a topic where JointState messages are published"
# your code starts here ------------------------------
# your code ends here ------------------------------
def main():
rospy.init_node('forward_kinematic_node')
#Initialize your tf broadcaster.
br = TransformBroadcaster()
# TODO: Initialize a subscriber to the topic that
# publishes the joint angles, configure it to have fkine_wrapper
# as callback and pass the broadcaster as an additional argument to the callback
# your code starts here ------------------------------
# your code ends here ----------------------
rospy.spin()
if __name__ == "__main__":
main()
```
#### File: cw1q5/src/cw1q5d_node.py
```python
import rospy
import numpy as np
from sensor_msgs.msg import JointState
from tf2_ros import TransformBroadcaster
from cw1q5b_node import forward_kinematics
from geometry_msgs.msg import TransformStamped, Quaternion # students need to add this
"""
To complete this assignment, you must do the following:
- Fill the "youbot_dh_parameters" dictionary with the youbot DH parameters you
found in question 5c
- Fill the "youbot_joint_offsets" dictionary to account for the joint offsets
between the "youbot_dh_parameters" you found and the xarco representation.
- Complete the definition of fkine_wrapper(). In this example you can use the
functions you implemented for 5b, we already imported forward_kinematics() which
you most definitely need. If you haven't implemented forward_kinematics()
during 5b, your node will not work.
- Initialise the subscriber to the topic that publishes joint states and its callback
function fkine_wrapper()
You may need to implement additional functions, for instance to convert rotation
matrices to quaternions. If that's the case, define and implement all those functions
inside this file.
In case you need to implement additional functions, define them in this file.
Complete the function implementation within the indicated area and do not modify the
assertions. The assertions are there to make sure that your code will accept and return
data with specific types and formats.
You can use code you developed during previous lab sessions, just make sure you adapt to follow this template.
Remember, in /joint_states messages are describing joint encoder readings.
Depending on how the encoders are mounted and also how your dh parameters have been defined
You may need to modify the joint_states by either applying an offset, changing the
sign of the reported angle or both. We already asked you to define an offset dictionary
which you can apply directly to dh parameters, but you also need to change the polarity
of the angle reading in order for the robot to work properly.
Running the launch file associated with this question, you should see that your frames
fall on the exact joint positions of the Youbot.
"""
# TODO: populate the values inside the youbot_dh_parameters dictionary with the ones you found in question 5c.
youbot_dh_parameters = {'a':[, , , , ],
'alpha': [, , , , ],
'd' : [, , , , ],
'theta' : [, , , , ]}
# TODO: populate the values inside the youbot_joint_offsets dictionary with the ones you found in question 5c.
youbot_joint_offsets = [, , , , ]
youbot_dh_offset_paramters = youbot_dh_parameters.copy()
youbot_dh_offset_paramters['theta']=[theta + offset for theta, offset in zip(youbot_dh_offset_paramters['theta'], youbot_joint_offsets)]
youbot_joint_readings_polarity = [-1,1,1,1,1]
# Function for rotation matrix to quaternion conversion.
def rotmat2q(T):
q = Quaternion()
tr = np.trace(T)
if tr == 4:
q.w = 1.0
q.x = 0.0
q.y = 0.0
q.z = 0.0
return q
angle = np.arccos((T[0, 0] + T[1, 1] + T[2, 2] - 1)/2)
xr = T[2, 1] - T[1, 2]
yr = T[0, 2] - T[2, 0]
zr = T[1, 0] - T[0, 1]
x = xr/np.sqrt(np.power(xr, 2) + np.power(yr, 2) + np.power(zr, 2))
y = yr/np.sqrt(np.power(xr, 2) + np.power(yr, 2) + np.power(zr, 2))
z = zr/np.sqrt(np.power(xr, 2) + np.power(yr, 2) + np.power(zr, 2))
q.w = np.cos(angle/2)
q.x = x * np.sin(angle/2)
q.y = y * np.sin(angle/2)
q.z = z * np.sin(angle/2)
return q
def fkine_wrapper(joint_msg, br):
# TODO complete the function
"""This function integrates your robotics code with ROS and is responsible
to listen to the topic where joint states are published. Based on this,
compute forward kinematics and publish it.
In more detail this function should perform the following actions:
- get joint angles from the rostopic that publishes joint data
- Publish a set of transformations relating the frame 'base_link' and
each frame on the arm 'arm5d_link_i' where i is the frame, using
tf messages.
Args:
joint_msg (JointState): ros msg containing the joint states of the robot
br (TransformBroadcaster): a tf broadcaster
"""
assert isinstance(joint_msg, JointState), "Node must subscribe to a topic where JointState messages are published"
# your code starts here ------------------------------
#depending on the dh parameters you may need to change the sign of some angles here
# your code ends here ------------------------------
def main():
rospy.init_node('forward_kinematic_node')
#Initialize your tf broadcaster.
br = TransformBroadcaster()
# TODO: Initialize a subscriber to the topic that
# publishes the joint angles, configure it to have fkine_wrapper
# as callback and pass the broadcaster as an additional argument to the callback
# your code starts here ------------------------------
# your code ends here ----------------------
rospy.spin()
if __name__ == "__main__":
main()
```
#### File: COMP0127_Robotic_Systems_Engineering-Courseworks/cw1/Q4_client.py
```python
import rospy
import numpy as np
import random
import time
#TODO: import the SRV file from its corresponding folder, as well as its Request
from cw1q4.srv import quat2rodrigues
from cw1q4.srv import quat2rodriguesRequest
from cw1q4.srv import quat2zyx
from cw1q4.srv import quat2zyxRequest
import math
def point_rotation_client():
"rospy.wait_for_service('quat2zyx')"
rospy.wait_for_service('quat2rodrigues')
while not rospy.is_shutdown():
#TODO: Initialise the ROS service client. It takes two arguments: The name of the service, and the service definition.
"client_euler = rospy.ServiceProxy('quat2zyx', quat2zyx)"
client_axisAngle = rospy.ServiceProxy('quat2rodrigues', quat2rodrigues)
req = quat2rodriguesRequest()
"req2 = quat2zyxRequest()"
#TODO: create a random request point, and a random request quaternion
quaternion = np.random.rand(4)
quaternion = quaternion / np.linalg.norm(quaternion)
"""req2.q.x = quaternion[0]
req2.q.y = quaternion[1]
req2.q.z = quaternion[2]
req2.q.w = quaternion[3]
"""
req.q.x = quaternion[0]
req.q.y = quaternion[1]
req.q.z = quaternion[2]
req.q.w = quaternion[3]
"print(req2)"
print(req)
"""
res2 = client_euler(req2)
print('euler angle is:')
print(res2)
"""
res = client_axisAngle(req)
print('axis angle is:')
print(res)
time.sleep(3)
if __name__ == '__main__':
try:
point_rotation_client()
except rospy.ROSInterruptException:
pass
```
#### File: cw2q6/src/cw2q6_node.py
```python
import numpy as np
from scipy.linalg import expm
from scipy.linalg import logm
from scipy.linalg import inv
import rospy
import rosbag
import rospkg
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from cw2q4.youbotKineKDL import YoubotKinematicKDL
import PyKDL
from visualization_msgs.msg import Marker
from itertools import permutations
class YoubotTrajectoryPlanning(object):
def __init__(self):
# Initialize node
rospy.init_node('youbot_traj_cw2', anonymous=True)
# Save question number for check in main run method
self.kdl_youbot = YoubotKinematicKDL()
# Create trajectory publisher and a checkpoint publisher to visualize checkpoints
self.traj_pub = rospy.Publisher('/EffortJointInterface_trajectory_controller/command', JointTrajectory,
queue_size=5)
self.checkpoint_pub = rospy.Publisher("checkpoint_positions", Marker, queue_size=100)
def run(self):
"""This function is the main run function of the class. When called, it runs question 6 by calling the q6()
function to get the trajectory. Then, the message is filled out and published to the /command topic.
"""
print("run q6a")
rospy.loginfo("Waiting 5 seconds for everything to load up.")
rospy.sleep(2.0)
traj = self.q6()
traj.header.stamp = rospy.Time.now()
traj.joint_names = ["arm_joint_1", "arm_joint_2", "arm_joint_3", "arm_joint_4", "arm_joint_5"]
self.traj_pub.publish(traj)
def q6(self):
""" This is the main q6 function. Here, other methods are called to create the shortest path required for this
question. Below, a general step-by-step is given as to how to solve the problem.
Returns:
traj (JointTrajectory): A list of JointTrajectory points giving the robot joint positions to achieve in a
given time period.
"""
# Steps to solving Q6.
# 1. Load in targets from the bagfile (checkpoint data and target joint positions).
# 2. Compute the shortest path achievable visiting each checkpoint Cartesian position.
# 3. Determine intermediate checkpoints to achieve a linear path between each checkpoint and have a full list of
# checkpoints the robot must achieve. You can publish them to see if they look correct. Look at slides 39 in lecture 7
# 4. Convert all the checkpoints into joint values using an inverse kinematics solver.
# 5. Create a JointTrajectory message.
# Your code starts here ------------------------------
# TODO
#Create object (not necessary) youbot_traj_plan = YoubotTrajectoryPlanning()
#Load targets from bagfile
[target_cart_tf, target_joint_positions] = self.load_targets()
#Sort targets to find shortest path
[sorted_order, min_dist, index_shortest_dist] = self.get_shortest_path(target_cart_tf)
#FInd intermediate points between checkpoints to ensure straight line path
#num_points = 5, 5 intermediate points between checkpoints, for smooth straight movement
full_checkpoint_tfs = self.intermediate_tfs(index_shortest_dist, target_cart_tf, 5)
#This function gets a np.ndarray of transforms and publishes them in a color coded fashion to show how the
#Cartesian path of the robot end-effector.
self.publish_traj_tfs(full_checkpoint_tfs)
#This function converts checkpoint transformations (including intermediates) into joint positions
init_joint_position = np.array(target_joint_positions[:,0])
q_checkpoints = self.full_checkpoints_to_joints(full_checkpoint_tfs, init_joint_position) #What is init_joint_position in this?
traj = JointTrajectory()
dt = 2
t = 10
for i in range(q_checkpoints.shape[1]):
traj_point = JointTrajectoryPoint()
traj_point.positions = q_checkpoints[:, i]
t += dt
traj_point.time_from_start.secs = t
traj.points.append(traj_point)
#This function converts joint positions to a kdl array
#kdl_array = self.list_to_kdl_jnt_array(q_checkpoints) # is this traj??no
# Your code ends here ------------------------------
assert isinstance(traj, JointTrajectory)
return traj
def load_targets(self):
"""This function loads the checkpoint data from the 'data.bag' file. In the bag file, you will find messages
relating to the target joint positions. You need to use forward kinematics to get the goal end-effector position.
Returns:
target_cart_tf (4x4x5 np.ndarray): The target 4x4 homogenous transformations of the checkpoints found in the
bag file. There are a total of 5 transforms (4 checkpoints + 1 initial starting cartesian position).
target_joint_positions (5x5 np.ndarray): The target joint values for the 4 checkpoints + 1 initial starting
position.
"""
# Defining ros package path
rospack = rospkg.RosPack()
path = rospack.get_path('cw2q6')
# Initialize arrays for checkpoint transformations and joint positions
target_joint_positions = np.zeros((5, 5))
# Create a 4x4 transformation matrix, then stack 6 of these matrices together for each checkpoint
target_cart_tf = np.repeat(np.identity(4), 5, axis=1).reshape((4, 4, 5))
# Load path for selected question
bag = rosbag.Bag(path + '/bags/data.bag')
# Get the current starting position of the robot
target_joint_positions[:, 0] = self.kdl_youbot.kdl_jnt_array_to_list(self.kdl_youbot.current_joint_position)
# Initialize the first checkpoint as the current end effector position
target_cart_tf[:, :, 0] = self.kdl_youbot.forward_kinematics(target_joint_positions[:, 0])
# Your code starts here ------------------------------
#if len(sys.argv) != 2:
# sys.stderr.write('[ERROR] This script only takes input bag file as argument.n')
#else:
# inputFileName = sys.argv[1]
# print "[OK] Found bag: %s" % inputFileName
topicList = []
i = 1
for topic, msgs, t in bag.read_messages(['joint_data']):
target_joint_positions[:,i] = msgs.position
target_cart_tf[:,:,i] = self.kdl_youbot.forward_kinematics(target_joint_positions[:,i], 5)
i+=1
my_pt = JointTrajectoryPoint()
if topicList.count(topic) == 0:
topicList.append(topic)
#print '{0} topics found:'.format(len(topicList))
#print(target_cart_tf)
# Your code ends here ------------------------------
# Close the bag
bag.close()
assert isinstance(target_cart_tf, np.ndarray)
assert target_cart_tf.shape == (4, 4, 5)
assert isinstance(target_joint_positions, np.ndarray)
assert target_joint_positions.shape == (5, 5)
return target_cart_tf, target_joint_positions
def get_shortest_path(self, checkpoints_tf):
"""This function takes the checkpoint transformations and computes the order of checkpoints that results
in the shortest overall path.
Args:
checkpoints_tf (np.ndarray): The target checkpoint 4x4 transformations.
Returns:
sorted_order (np.array): An array of size 5 indicating the order of checkpoint
min_dist: (float): The associated distance to the sorted order giving the total estimate for travel
distance.
"""
# Your code starts here ------------------------------
#Calculate the distance between all points, then choose the shortest distances in the cost matrix
#print(checkpoints_tf.shape)
checkpoints = []
perm = permutations(checkpoints_tf)
for i in range(checkpoints_tf.shape[2]):
#checkpoints[i] = checkpoints_tf[0:3, 3, i]
#print(checkpoints_tf[0:3,3])
checkpoints.append(checkpoints_tf[0:3, 3, i])
# get checkpoint coordinates from checkpoint transformation matrix, rows 1-3 of last column
# Calculate cost matrix, distance between all n points, giving n x n matrix
checkpoints= np.array(checkpoints)
cost_matrix = np.zeros((checkpoints.shape[0], checkpoints.shape[0]))
for i in range(checkpoints.shape[0]):
for j in range(checkpoints.shape[0]):
cost_matrix[i,j] = np.sqrt((checkpoints[i][0] - checkpoints[j][0])**2 + (checkpoints[i][1] - checkpoints[j][1])**2 + (checkpoints[i][2] - checkpoints[j][2])**2)
#Make diagonals infinite so that distance between one point and itself isnt chosen
cost_matrix[i,i] = np.inf
# distance between each cartesian point
# Find shortest path using Greedy algorithm
index_shortest_dist = []
shortest_dist = cost_matrix[:,i].min() # get minimum in each column ( shortest distance from first point) and next etc
index = np.argmin(cost_matrix[:,1])
index_shortest_dist.append(index)
i = 0
min_dist = 0
while (i<6):
#for i in range(1,5):
shortest_dist = cost_matrix[:,index].min() # get minimum in each column ( shortest distance from first point) and next etc
index = np.argmin(cost_matrix[:,index])
index_shortest_dist.append(index) # add the index of the shortest distance
cost_matrix[index,:] = np.inf #remove previous row from next loop by making distance infinite
min_dist += shortest_dist # Add each shortest dist to get total min dist
i+=1
#Sort checkpoints into order dictated by index_shortest_dist
sorted_order = []
for i in range(5):
sorted_order.append(checkpoints[index_shortest_dist[i]])
# this will Append and sort checkpoints in order of shortest path
# Your code ends here ------------------------------
#assert isinstance(sorted_order, np.ndarray)
#assert sorted_order.shape == (5,)
assert isinstance(min_dist, float)
#return sorted_order
return sorted_order, min_dist, index_shortest_dist
def publish_traj_tfs(self, tfs):
"""This function gets a np.ndarray of transforms and publishes them in a color coded fashion to show how the
Cartesian path of the robot end-effector.
Args:
tfs (np.ndarray): A array of 4x4xn homogenous transformations specifying the end-effector trajectory.
"""
id = 0
for i in range(0, tfs.shape[2]): # full_checkpoint_tfs wrong type? CHanged from shape[2] to len()
marker = Marker()
marker.id = id
id += 1
marker.header.frame_id = 'base_link'
marker.header.stamp = rospy.Time.now()
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.01
marker.scale.y = 0.01
marker.scale.z = 0.01
marker.color.a = 1.0
marker.color.r = 0.0
marker.color.g = 0.0 + id * 0.05
marker.color.b = 1.0 - id * 0.05
marker.pose.orientation.w = 1.0
marker.pose.position.x = tfs[0,-1, i]
marker.pose.position.y = tfs[1,-1,i]
marker.pose.position.z = tfs[2,-1, i]
self.checkpoint_pub.publish(marker)
def intermediate_tfs(self, sorted_checkpoint_idx, target_checkpoint_tfs, num_points):
"""This function takes the target checkpoint transforms and the desired order based on the shortest path sorting,
and calls the decoupled_rot_and_trans() function.
Args:
sorted_checkpoint_idx (list): List describing order of checkpoints to follow.
target_checkpoint_tfs (np.ndarray): the state of the robot joints. In a youbot those are revolute
num_points (int): Number of intermediate points between checkpoints.
Returns:
full_checkpoint_tfs: 4x4x(4xnum_points+5) homogeneous transformations matrices describing the full desired
poses of the end-effector position.
"""
# Your code starts here ------------------------------
#TODO
full_checkpoint_tfs= np.repeat(np.identity(4), (4* num_points) +5, axis=1).reshape((4, 4, (4* num_points) +5))
full_checkpoint_tfs[:,:,0] = target_checkpoint_tfs[:,:,0]
#full_checkpoint_tfs= [target_checkpoint_tfs[0]]
sorted_checkpoint_tfs = []
#print(target_checkpoint_tfs)
for i in range(5):
sorted_checkpoint_tfs.append(target_checkpoint_tfs[:,:,sorted_checkpoint_idx[i]])
#print(sorted_checkpoint_tfs[2].shape)
for i in range(1,4):
full_checkpoint_tfs[:,:,i] = (self.decoupled_rot_and_trans(sorted_checkpoint_tfs[i], sorted_checkpoint_tfs[i+1], num_points))
full_checkpoint_tfs[:,:,i+1] = (target_checkpoint_tfs[:,:,i+1])# append target after initial point, between intermediate points.
#print(len(full_checkpoint_tfs))
# Your code ends here ------------------------------
return full_checkpoint_tfs
def decoupled_rot_and_trans(self, checkpoint_a_tf, checkpoint_b_tf, num_points):
"""This function takes two checkpoint transforms and computes the intermediate transformations
that follow a straight line path by decoupling rotation and translation.
Args:
checkpoint_a_tf (np.ndarray): 4x4 transformation describing pose of checkpoint a.
checkpoint_b_tf (np.ndarray): 4x4 transformation describing pose of checkpoint b.
num_points (int): Number of intermediate points between checkpoint a and checkpoint b.
Returns:
tfs: 4x4x(num_points) homogeneous transformations matrices describing the full desired
poses of the end-effector position from checkpoint a to checkpoint b following a linear path.
"""
# Your code starts here ------------------------------
# tfs = combined rot and trans
t = 1.0 / (num_points + 1)
a_rot = np.empty([2,2])
b_rot = np.empty([2,2])
a_trans = []
b_trans = []
#print('hello')
#print(checkpoint_a_tf)
for i in range(2):
for j in range(2):
a_rot[i,j] = checkpoint_a_tf[i,j]#[0:2,0:2]
b_rot[i,j] = checkpoint_b_tf[i,j]#[0:2,0:2]
for i in range(3):
b_trans.append(checkpoint_b_tf[i, -1])
a_trans.append(checkpoint_a_tf[i, -1])
c = [b_trans - a_trans for b_trans, a_trans in zip(b_trans, a_trans)]
c = np.array(c)
c = c.reshape((3,1))
trans = a_trans + t * c#(b_trans - a_trans)
#trans = a_trans + t * (b_trans - a_trans)
rot = np.matmul(a_rot,expm((logm(np.matmul(np.linalg.inv(a_rot) , b_rot))) * t))
tfs = np.empty([4,4])
for i in range(2):
for j in range(2):
tfs[i,j] = rot[i,j]
#tfs[0:3,0:3] = rot
for i in range(3):
tfs[i, 3] = trans[i] # for loop?? # should be 3x1
tfs[3,:] = [0,0,0,1]
tfs = np.array(tfs)
#Combine back into one matrix
# Your code ends here ------------------------------
return tfs
def full_checkpoints_to_joints(self, full_checkpoint_tfs, init_joint_position):
"""This function takes the full set of checkpoint transformations, including intermediate checkpoints,
and computes the associated joint positions by calling the ik_position_only() function.
Args:
full_checkpoint_tfs (np.ndarray, 4x4xn): 4x4xn transformations describing all the desired poses of the end-effector
to follow the desired path. (4x4x(4xnum_points+5))
init_joint_position (np.ndarray):A 5x1 array for the initial joint position of the robot.
Returns:
q_checkpoints (np.ndarray, 5xn): For each pose, the solution of the position IK to get the joint position
for that pose.
"""
# Your code starts here ------------------------------
q_checkpoints = []
iter_count = 0
q = init_joint_position
for i in range(full_checkpoint_tfs.shape[2]):
error = 10 # reset error to large for each point.
while (error >= 0.1):
[q, error] = self.ik_position_only(full_checkpoint_tfs[:,:,i], q, alpha = 0.1)
iter_count += 1
if (iter_count > 10000):
break
q_checkpoints.append(q) # CHeck the indexing of full_checjpoint_tfs
q_checkpoints = np.array(q_checkpoints)
#Append position only inverse kinematic solution to the q_checkpoints, taking one sheet of 3d matrix full_checkpoint_tfs at once.
# Your code ends here ------------------------------
return q_checkpoints
def ik_position_only(self, pose, q0, alpha = 0.1):
"""This function implements position only inverse kinematics.
Args:
pose (np.ndarray, 4x4): 4x4 transformations describing the pose of the end-effector position.
q0 (np.ndarray, 5x1):A 5x1 array for the initial starting point of the algorithm.
Returns:
q (np.ndarray, 5x1): The IK solution for the given pose.
error (float): The Cartesian error of the solution.
"""
# Some useful notes:
# We are only interested in position control - take only the position part of the pose as well as elements of the
# Jacobian that will affect the position of the error.
# Your code starts here ------------------------------
Pd = pose[:3, 3].ravel()
q = q0
q = np.array(q)
q0 = np.array(q0)
J = self.kdl_youbot.get_jacobian(q0)[:3, :]
J= np.array(J)
# Take only first 3 rows as position only solution.
P = np.array(self.kdl_youbot.forward_kinematics(q0))[:3, -1]
e = Pd - P.ravel()
e = np.array(e)
q += alpha * np.matmul(J.T, e)
error = np.linalg.norm(e)
# Your code ends here ------------------------------
return q, error
@staticmethod
def list_to_kdl_jnt_array(joints):
"""This converts a list to a KDL jnt array.
Args:
joints (joints): A list of the joint values.
Returns:
kdl_array (PyKDL.JntArray): JntArray object describing the joint position of the robot.
"""
kdl_array = PyKDL.JntArray(5)
for i in range(0, 5):
kdl_array[i] = joints[i]
return kdl_array
if __name__ == '__main__':
try:
youbot_planner = YoubotTrajectoryPlanning()
youbot_planner.run()
rospy.spin()
#h = self.list_to_kdl_jnt_array(joints)
#print(h)
except rospy.ROSInterruptException:
pass
``` |
{
"source": "JoeRu/bwh-prometheus-exporter",
"score": 2
} |
#### File: bwh-prometheus-exporter/bwh/darknet_lib.py
```python
from ctypes import *
import math
import random
import os
netMain = None
metaMain = None
altNames = None
import logging
#-------------Output Logger
# create logger
logger = logging.getLogger(os.path.basename(__file__))
#logger.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
# create console handler with a higher log level
ch = logging.StreamHandler()
#ch.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
# create file handler which logs even debug messages
fh = logging.FileHandler(os.path.basename(__file__)+'.log')
fh.setLevel(logging.ERROR)
# create formatter and add it to the handlers
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
hasGPU = True
if os.name == "nt":
cwd = os.path.dirname(__file__)
os.environ['PATH'] = cwd + ';' + os.environ['PATH']
winGPUdll = os.path.join(cwd, "yolo_cpp_dll.dll")
winNoGPUdll = os.path.join(cwd, "yolo_cpp_dll_nogpu.dll")
envKeys = list()
for k, v in os.environ.items():
envKeys.append(k)
try:
try:
tmp = os.environ["FORCE_CPU"].lower()
if tmp in ["1", "true", "yes", "on"]:
raise ValueError("ForceCPU")
else:
logger.info("Flag value '"+tmp+"' not forcing CPU mode")
except KeyError:
# We never set the flag
if 'CUDA_VISIBLE_DEVICES' in envKeys:
if int(os.environ['CUDA_VISIBLE_DEVICES']) < 0:
raise ValueError("ForceCPU")
try:
global DARKNET_FORCE_CPU
if DARKNET_FORCE_CPU:
raise ValueError("ForceCPU")
except NameError:
pass
# logger.info(os.environ.keys())
# logger.info("FORCE_CPU flag undefined, proceeding with GPU")
if not os.path.exists(winGPUdll):
raise ValueError("NoDLL")
lib = CDLL(winGPUdll, RTLD_GLOBAL)
except (KeyError, ValueError):
hasGPU = False
if os.path.exists(winNoGPUdll):
lib = CDLL(winNoGPUdll, RTLD_GLOBAL)
logger.info("Notice: CPU-only mode")
else:
# Try the other way, in case no_gpu was
# compile but not renamed
lib = CDLL(winGPUdll, RTLD_GLOBAL)
logger.info("Environment variables indicated a CPU run, but we didn't find `"+winNoGPUdll+"`. Trying a GPU run anyway.")
else:
lib = CDLL("./libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
copy_image_from_bytes = lib.copy_image_from_bytes
copy_image_from_bytes.argtypes = [IMAGE,c_char_p]
def network_width(net):
return lib.network_width(net)
def network_height(net):
return lib.network_height(net)
predict = lib.network_predict_ptr
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
if hasGPU:
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int), c_int]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict_ptr
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
load_net_custom = lib.load_network_custom
load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int]
load_net_custom.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def array_to_image(arr):
import numpy as np
# need to return old values to avoid python freeing memory
arr = arr.transpose(2,0,1)
c = arr.shape[0]
h = arr.shape[1]
w = arr.shape[2]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w,h,c,data)
return im, arr
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
if altNames is None:
nameTag = meta.names[i]
else:
nameTag = altNames[i]
res.append((nameTag, out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
"""
Performs the meat of the detection
"""
#pylint: disable= C0321
im = load_image(image, 0, 0)
logger.debug("Loaded image")
ret = detect_image(net, meta, im, thresh, hier_thresh, nms)
free_image(im)
logger.debug("freed image")
return ret
def detect_image(net, meta, im, thresh=.5, hier_thresh=.5, nms=.45):
#import cv2
#custom_image_bgr = cv2.imread(image) # use: detect(,,imagePath,)
#custom_image = cv2.cvtColor(custom_image_bgr, cv2.COLOR_BGR2RGB)
#custom_image = cv2.resize(custom_image,(lib.network_width(net), lib.network_height(net)), interpolation = cv2.INTER_LINEAR)
#import scipy.misc
#custom_image = scipy.misc.imread(image)
#im, arr = array_to_image(custom_image) # you should comment line below: free_image(im)
num = c_int(0)
logger.debug("Assigned num")
pnum = pointer(num)
logger.debug("Assigned pnum")
predict_image(net, im)
logger.debug("did prediction")
#dets = get_network_boxes(net, custom_image_bgr.shape[1], custom_image_bgr.shape[0], thresh, hier_thresh, None, 0, pnum, 0) # OpenCV
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum, 0)
logger.debug("Got dets")
num = pnum[0]
logger.debug("got zeroth index of pnum")
if nms:
do_nms_sort(dets, num, meta.classes, nms)
logger.debug("did sort")
res = []
logger.debug("about to range")
for j in range(num):
logger.debug("Ranging on "+str(j)+" of "+str(num))
logger.debug("Classes: {},{},{}".format(str(meta), meta.classes, meta.names))
for i in range(meta.classes):
logger.debug("Class-ranging on "+str(i)+" of "+str(meta.classes)+"= "+str(dets[j].prob[i]))
if dets[j].prob[i] > 0:
b = dets[j].bbox
if altNames is None:
nameTag = meta.names[i]
else:
nameTag = altNames[i]
logger.debug("Got bbox {}".format(b))
logger.debug(nameTag)
logger.debug(dets[j].prob[i])
logger.debug((b.x, b.y, b.w, b.h))
res.append((nameTag, dets[j].prob[i], (b.x, b.y, b.w, b.h)))
logger.debug("did range")
res = sorted(res, key=lambda x: -x[1])
logger.debug("did sort")
free_detections(dets, num)
logger.debug("freed detections")
return res
def performDetect(imagePath="test.jpg", thresh= 0.35, configPath = "./cfg/bwh.cfg", weightPath = "./cfg/bwh.weights", metaPath= "./cfg/bwh.data", showImage= False, makeImageOnly = False, initOnly= False):
"""
Convenience function to handle the detection and returns of objects.
Displaying bounding boxes requires libraries scikit-image and numpy
Parameters
----------------
imagePath: str
Path to the image to evaluate. Raises ValueError if not found
thresh: float (default= 0.45)
The detection threshold
configPath: str
Path to the configuration file. Raises ValueError if not found
weightPath: str
Path to the weights file. Raises ValueError if not found
metaPath: str
Path to the data file. Raises ValueError if not found
showImage: bool (default= True)
Compute (and show) bounding boxes. Changes return.
makeImageOnly: bool (default= False)
If showImage is True, this won't actually *show* the image, but will create the array and return it.
initOnly: bool (default= False)
Only initialize globals. Don't actually run a prediction.
Returns
----------------------
When showImage is False, list of tuples like
('obj_label', confidence, (bounding_box_x_px, bounding_box_y_px, bounding_box_width_px, bounding_box_height_px))
The X and Y coordinates are from the center of the bounding box. Subtract half the width or height to get the lower corner.
Otherwise, a dict with
{
"detections": as above
"image": a numpy array representing an image, compatible with scikit-image
"caption": an image caption
}
"""
# Import the global variables. This lets us instance Darknet once, then just call performDetect() again without instancing again
global metaMain, netMain, altNames #pylint: disable=W0603
assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
if not os.path.exists(configPath):
raise ValueError("Invalid config path `"+os.path.abspath(configPath)+"`")
if not os.path.exists(weightPath):
raise ValueError("Invalid weight path `"+os.path.abspath(weightPath)+"`")
if not os.path.exists(metaPath):
raise ValueError("Invalid data file path `"+os.path.abspath(metaPath)+"`")
if netMain is None:
netMain = load_net_custom(configPath.encode("ascii"), weightPath.encode("ascii"), 0, 1) # batch size = 1
if metaMain is None:
metaMain = load_meta(metaPath.encode("ascii"))
if altNames is None:
# In Python 3, the metafile default access craps out on Windows (but not Linux)
# Read the names file and create a list to feed to detect
try:
with open(metaPath) as metaFH:
metaContents = metaFH.read()
import re
match = re.search("names *= *(.*)$", metaContents, re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as namesFH:
namesList = namesFH.read().strip().split("\n")
altNames = [x.strip() for x in namesList]
except TypeError:
pass
except Exception:
pass
if initOnly:
logger.info("Initialized detector")
return None
if not os.path.exists(imagePath):
raise ValueError("Invalid image path `"+os.path.abspath(imagePath)+"`")
# Do the detection
#detections = detect(netMain, metaMain, imagePath, thresh) # if is used cv2.imread(image)
detections = detect(netMain, metaMain, imagePath.encode("ascii"), thresh)
return detections
from skimage import io, draw
import numpy as np
def make_image(imagePath, detections):
"""
returns "image": a numpy array representing an image, compatible with scikit-image
"""
image = io.imread(imagePath)
logger.info("*** "+str(len(detections))+" Results, color coded by confidence ***")
imcaption = []
for detection in detections:
label = detection[0]
confidence = detection[1]
pstring = str(label.decode("ascii"))+": "+str(np.rint(100 * confidence))+"%"
imcaption.append(pstring)
logger.info(pstring)
bounds = detection[2]
shape = image.shape
yExtent = int(bounds[3])
xEntent = int(bounds[2])
# Coordinates are around the center
xCoord = int(bounds[0] - bounds[2]/2)
yCoord = int(bounds[1] - bounds[3]/2)
boundingBox = [
[xCoord, yCoord],
[xCoord, yCoord + yExtent],
[xCoord + xEntent, yCoord + yExtent],
[xCoord + xEntent, yCoord]
]
# Wiggle it around to make a 3px border
rr, cc = draw.polygon_perimeter([x[1] for x in boundingBox], [x[0] for x in boundingBox], shape= shape)
rr2, cc2 = draw.polygon_perimeter([x[1] + 1 for x in boundingBox], [x[0] for x in boundingBox], shape= shape)
rr3, cc3 = draw.polygon_perimeter([x[1] - 1 for x in boundingBox], [x[0] for x in boundingBox], shape= shape)
rr4, cc4 = draw.polygon_perimeter([x[1] for x in boundingBox], [x[0] + 1 for x in boundingBox], shape= shape)
rr5, cc5 = draw.polygon_perimeter([x[1] for x in boundingBox], [x[0] - 1 for x in boundingBox], shape= shape)
boxColor = (int(255 * (1 - (confidence ** 2))), int(255 * (confidence ** 2)), 0)
draw.set_color(image, (rr, cc), boxColor, alpha= 0.8)
draw.set_color(image, (rr2, cc2), boxColor, alpha= 0.8)
draw.set_color(image, (rr3, cc3), boxColor, alpha= 0.8)
draw.set_color(image, (rr4, cc4), boxColor, alpha= 0.8)
draw.set_color(image, (rr5, cc5), boxColor, alpha= 0.8)
return image
``` |
{
"source": "joeryan/100days",
"score": 4
} |
#### File: joeryan/100days/calc.py
```python
from random import randint
from numbers import Number
from operator import xor
def add(num1, num2):
return dice_num(num1) + dice_num(num2)
def sub(num1, num2):
return dice_num(num1) - dice_num(num2)
def mult(num1, num2):
accum = 0
if isinstance(num1, Number) and isinstance(num2, Number):
accum = num1 * num2
if type(num1) == str and type(num2) == str:
accum = dice_num(num1) * dice_num(num2)
if type(num1) == str:
for _ in range(1,abs(num2)+1):
accum += dice_num(num1)
if num2 < 0:
accum = -accum
else:
for _ in range(1,abs(num1)+1):
accum += dice_num(num2)
if num1 < 0:
accum = -accum
return accum
def div(num1, num2):
if not (isinstance(num1, Number) and isinstance(num2, Number)):
raise ValueError("cannot use dice in divion")
return num1 / num2
def dice_num(num):
if type(num) == str:
if len(num) > 1 and num[0] == 'd':
dice = int(num[1:])
num = randint(1, dice)
else:
num = int(num)
return num
```
#### File: joeryan/100days/cybrary_menu.py
```python
import platform
import numbers
import cybrary
# display a menu of options to choose from
def menu_generator():
menu = \
"""Please select from the following options:
1. Determine if an input value is odd or even
2. Return the sum of a list of input vaules
3. Return how many of an input list of integers are even
4. Return an input string reversed
5. Determine if an input string is a palindrome
Q. Quit program
Enter choice: """
return menu
# 1. determine if the input is odd or even
def check_odd_or_even_input():
num = input("Enter a number to check: ")
result = "The entered number is "
try:
num = int(num)
if cybrary.check_even(num):
result += "Even"
else:
result += "Odd"
except ValueError:
result = "You must enter an number that can be converted to an integer."
print(result)
# 2. return the sum of two input values
def get_sum_of_input_numbers():
nums = input("Type a list of numbers separated by a space, end the list with Enter: ")
num_list = []
nums = nums.split(' ')
for num in nums:
try:
num_list.append(int(num))
except ValueError:
try:
num_list.append(float(num))
except ValueError:
print("Error: input {0} is not a valid number".format(num))
print("The sum of the numbers is {0}".format(cybrary.sum_numbers(num_list)))
# 3. given a list of integers, determine how many are even
def count_even_numbers_in_list():
nums = []
num_list = input("Type a list of inegers separated by a space. Press Enter when done\n> ")
num_list = list(num_list.split(' '))
for num in num_list:
if len(num) > 0:
nums.append(int(num.strip()))
print(f"There were {cybrary.count_even_numbers(nums)} even numbers in the list")
# 4. answer the input string backwards
def print_input_string_reversed():
input_string = input("Type a string to reverse. Press Enter when finished. ")
output_string = cybrary.reverse_string(input_string)
print(output_string)
# 5. determine if input string is a palindrome
def check_input_for_palindrome():
word = input("Type a word to check if it is a palindrome: ")
if cybrary.is_palindrome(word):
print("It is definitely a palindrome!")
else:
print("Sorry, that word is not a palindrome.")
# utility functions
def prompt_to_clear_screen():
running_os = platform.system
input("Press Enter to continue.....")
if running_os == 'Linux' or running_os == 'Darwin':
_ = os.system('clear')
elif running_os == 'Windows':
_ = os.system('cls')
else:
print("\n" * 100)
def noop():
pass
def invalid_selection():
print("Invalid selection")
choices = {
'1': check_odd_or_even_input,
'2': get_sum_of_input_numbers,
'3': count_even_numbers_in_list,
'4': print_input_string_reversed,
'5': check_input_for_palindrome,
'q': noop
}
if __name__ == '__main__':
choice = ''
while choice != 'q':
choice = input(menu_generator()).lower()
action = choices.get(choice, invalid_selection)
action()
if choice != 'q':
prompt_to_clear_screen()
```
#### File: joeryan/100days/hotel.py
```python
import pytest
class Hotel:
_guests = {}
def check_in(self, guest_name, room_number):
result = False
if(room_number not in self._guests.values()):
self._guests[guest_name] = room_number
result = True
return result
def check_out(self, guest_name):
del self._guests[guest_name]
def guests(self):
return self._guests.keys()
``` |
{
"source": "joeryan/100pythondays2019",
"score": 4
} |
#### File: 100pythondays2019/002/fibgen.py
```python
import argparse
def fibonacci_generator(limit):
count = 1
current, next = 0, 1
while count <= limit:
current, next = next, next + current
count += 1
yield current
def main():
parser = argparse.ArgumentParser(description="generate a fibonacci sequence")
parser.add_argument("limit", type=int, help="Ending number to generate fibonacci sequence")
args = parser.parse_args()
nums = fibonacci_generator(args.limit)
for num in nums:
print(num, end=', ')
if __name__ == '__main__':
main()
```
#### File: 100pythondays2019/007/test_list_user.py
```python
import json
import pytest
import requests
@pytest.mark.parametrize("userid, firstname", [(1,"George"),(2,"Janet")])
def test_list_valid_user(supply_url, userid, firstname):
url = supply_url + "/users/" + str(userid)
resp = requests.get(url)
json_resp = json.loads(resp.text)
assert json_resp['data']['id'] == userid, resp.text
assert json_resp['data']['first_name'] == firstname, resp.text
def test_list_invalid_user(supply_url):
url = supply_url + "/users/50"
resp = requests.get(url)
assert resp.status_code == 404, resp.text
```
#### File: 100pythondays2019/018/wordbuilder.py
```python
from data import DICTIONARY, POUCH, LETTER_SCORES
import random
import itertools
def load_words():
return DICTIONARY
def calc_word_value(word):
"""Calculate the value of the word entered into function
using imported constant mapping LETTER_SCORES"""
score = 0
for letter in word:
if letter.isalnum():
score += LETTER_SCORES[letter.upper()]
return score
def max_word_value(words=load_words()):
"""Calculate the word with the max value, can receive a list
of words as arg, if none provided uses default DICTIONARY"""
max_word = ''
max_word_val = 0
for word in words:
if calc_word_value(word) > max_word_val:
max_word_val = calc_word_value(word)
max_word = word
return max_word
def get_possible_dict_words(draw):
permutations = _get_permutations_draw(draw)
words = set()
for word in permutations:
if word.lower() in DICTIONARY:
words.add(word)
return words
def _get_permutations_draw(draw):
permutations = []
for i in range(1,len(draw)+1):
permutations.extend([''.join(word) for word in
itertools.permutations(draw, i)])
return permutations
def draw_letters():
letters = []
for _ in range(7):
letters.append(random.choice(POUCH))
return letters
def _validation(word, draw):
if not word in DICTIONARY:
raise ValueError("{} is not in dictionary".format(word))
for letter in word:
if not letter in draw:
raise ValueError("The letter {} was not in the draw".format(letter))
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("letters", help="Letters in the rack")
args = parser.parse_args()
rack = args.letters
print("Letter rack: {}".format(', '.join(rack)))
possible_words = get_possible_dict_words(rack)
optimal_word = max_word_value(possible_words)
max_score = calc_word_value(optimal_word)
print("Highest possible word: {} with a score of {}".format(
optimal_word, max_score
))
if __name__ == '__main__':
main()
```
#### File: joeryan/100pythondays2019/init.py
```python
import datetime
import os
LOG = 'LOG.md'
DAY_ZERO = datetime.datetime(2019, 2, 22) # = <NAME> 100 days :)
NUM_DAYS = 100
NUM_WIDTH = 3
TABLE_HEADER = '''## Progress Log
| Day | Date | Created | Learned |
| --- | --- | --- | --- |
'''
DAY = '| {0} | {1} | [TITLE]({0}) | LEARNING |\n'
INIT_FILE = '__init__.py'
AUTHOR = "__author__ = '<NAME>'\n"
def gen_days():
'''Generate day range 001...100'''
for day in range(1, NUM_DAYS + 1):
yield str(day).zfill(NUM_WIDTH)
def get_date(day):
'''Get date by offsetting nth day from day 0'''
date = DAY_ZERO + datetime.timedelta(int(day))
return date.strftime('%b %d, %Y')
def create_log():
'''Create progress log file with markdown table '''
with open(LOG, 'w') as f:
f.write(TABLE_HEADER)
for d in gen_days():
date = get_date(d)
f.write(DAY.format(d, date))
def create_init(path):
'''Create init file so each day dir is package,
and gets committed to git from the start'''
initfile = os.path.join(path, INIT_FILE)
with open(initfile, 'w') as f:
f.write(AUTHOR)
if __name__ == '__main__':
if os.path.isfile(LOG):
print('Logfile already created')
else:
print('Creating logfile')
create_log()
dirs = [d for d in gen_days() if not os.path.isdir(d)]
if not dirs:
print('All 100 days directories already created')
else:
print('Creating missing day directories')
for d in dirs:
os.makedirs(d)
create_init(d)
``` |
{
"source": "joeryan/2048-game",
"score": 3
} |
#### File: 2048-game/tests/test_merge.py
```python
import pytest
import game
def test_merge_empty_line():
assert game.merge([]) == []
def test_merge_single_item_line():
assert game.merge([2]) == [2]
def test_merge_two_item_line_mergable():
line = [2, 2]
assert game.merge(line) == [4, 0]
def test_merge_three_item_line_mergable():
line = [2, 0, 2]
assert game.merge(line) == [4, 0, 0]
def test_merge_three_item_line_not_mergable():
line = [2, 4, 2]
assert game.merge(line) == [2, 4, 2]
def test_merge_five_item_line_mergable():
line = [2, 2, 0, 4, 4]
assert game.merge(line) == [4, 8, 0, 0, 0]
def test_merge_five_item_line_mergable2():
line = [2, 2, 2, 2, 2]
assert game.merge(line) == [4, 4, 2, 0, 0]
``` |
{
"source": "joeryan/interact-python",
"score": 3
} |
#### File: interact-python/numbers/numgame_test.py
```python
import unittest
import numgame
class NumgameTestCase(unittest.TestCase):
def setUp(self):
pass
def test_new_game_100(self):
# game.new_game()
game = numgame.NumGame()
self.assertTrue(game.target >= 0)
self.assertTrue(game.target <= 100)
def test_range1000(self):
game = numgame.NumGame()
game.new_game(1000)
self.assertTrue(game.target >= 0)
self.assertTrue(game.target <= 1000)
def test_check_guess_actual(self):
game = numgame.NumGame()
game.target = 55
self.assertTrue(game.check_guess(55) == 0)
def test_check_guess_lower(self):
game = numgame.NumGame()
game.target = 55
self.assertTrue(game.check_guess(54) == -1)
def test_check_guess_higher(self):
game = numgame.NumGame()
game.target = 55
self.assertTrue(game.check_guess(56) == 1)
if __name__ == "__main__":
unittest.main()
```
#### File: interact-python/pong/pong.py
```python
import Tkinter as tk
class PongGame(tk.Frame):
FLD_H = 650
FLD_W = 1150
PAD_H = 80
PAD_W = 20
score1 = 0
score2 = 0
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.grid()
self.createWidgets()
def createWidgets(self):
self.quitButton = tk.Button(self, text = 'Quit',
command=self.quit)
self.fld = tk.Canvas(bg='black', height=self.FLD_H,
width = self.FLD_W)
self.fld.grid(padx=25, pady=15)
self.quitButton.grid()
l_gut = self.fld.create_line(self.PAD_W, 0, self.PAD_W, self.FLD_H,
width = 2, fill = 'white')
r_gut = self.fld.create_line((self.FLD_W - self.PAD_W), 0,
(self.FLD_W - self.PAD_W),
self.FLD_H, width = 2, fill = 'white')
c_line = self.fld.create_line((self.FLD_W/2), 0,
(self.FLD_W/2), self.FLD_H,
width = 2, fill = 'white')
l_pad = self.fld.create_rectangle(0, self.FLD_H/2+self.PAD_H/2,
self.PAD_W, self.FLD_H/2-self.PAD_H/2,
outline="white", fill="white",width = 2)
r_pad = self.fld.create_rectangle(self.FLD_W, self.FLD_H/2+self.PAD_H/2,
self.FLD_W - self.PAD_W, self.FLD_H/2 -
self.PAD_H/2, outline ="white",
fill="white", width = 2)
def main():
pong = PongGame()
pong.master.geometry("1240x820+300+300")
pong.master.title('Game of Pong')
pong.mainloop()
if __name__ == '__main__':
main()
#online submission url - http://www.codeskulptor.org/#user39_wJkf7WYddA_17.py
```
#### File: interact-python/reflex/reflex.py
```python
def format(t):
tenths = t % 10
minutes = int((t - tenths)/600)
seconds = int((t - (minutes*600))/10)
if seconds < 10:
str_seconds = "0" + str(seconds)
else:
str_seconds = str(seconds)
out_string = str(minutes) + ":" + str_seconds + "." + str(tenths)
return out_string
# define event handlers for buttons; "Start", "Stop", "Reset"
# define event handler for timer with 0.1 sec interval
# define draw handler
# create frame
# register event handlers
# start frame
``` |
{
"source": "joeryan/pytest-practice",
"score": 4
} |
#### File: joeryan/pytest-practice/birthday.py
```python
from datetime import date
class Birthday():
def __init__(self, year, month, day):
self._year = year
self._month = month
self._day = day
self._birthday = date(year, month, day)
def daysRemaining(self):
today = date.today()
if self._birthday < today:
self._birthday = self._birthday.replace(year=today.year + 1)
remaining = abs(self._birthday - today)
return remaining.days
def getOutput(self):
days = self.daysRemaining()
returnStr = "It looks like you were born on {day}/{mo}/{year}\n".format(day=self._day,
mo=self._month, year=self._year)
returnStr += "Looks like your birthday is in {days} days.\n".format(days=self.daysRemaining())
returnStr += "Hope you're looking forward to it!"
return returnStr
if __name__ == '__main__':
print ('-'*40)
print ("\t\tBIRTHDAY APP")
print ('-'*40)
year = int(input("What year were you born [YYYY]? "))
month = int(input("What month were you born [MM]? "))
day = int(input("What day were you born [DD]? "))
print()
bday = Birthday(year, month, day)
print(bday.getOutput())
```
#### File: joeryan/pytest-practice/journal.py
```python
import sys
from pathlib import Path
jfile = Path('./data/default.jrn')
print('-'*40 + "\n")
print("\t\tJOURNAL APP")
print('-'*40 + "\n")
journal = []
if jfile.is_file():
count = 0
print("... loading journal from ./data/default.jrn ...")
with open(jfile) as jf:
for entry in jf.readlines():
journal.append(entry.strip())
count += 1
print("... loaded {c} journal entries ...".format(c=count))
def list_entries():
count = 1
for item in journal:
print(str(count) +".\t"+item)
count += 1
while True:
choice = input("What do you want to do? [L]ist, [A]dd, or E[x]it: ")
if choice.upper() == "L":
list_entries()
if choice.upper() == "A":
entry = input("Enter your journal entry:\n")
journal.append(entry)
if choice.upper() == "X":
break
print("... saving journal to ./data/default.jrn ...")
count = 0
with open(jfile, 'w') as jf:
for entry in journal:
jf.write(entry + "\n")
count += 1
print("... saved {c} journal entries ...".format(c=count))
```
#### File: joeryan/pytest-practice/lolcat.py
```python
import requests
import os
import shutil
import subprocess
from sys import platform, argv
def print_header():
print('-'*40)
print("\tRANDOM LOL CAT APP")
print('-'*40 + "\n")
def get_cat(folder, name):
url = r"http://consuming-python-services-api.azurewebsites.net/cats/random"
data = get_data_from_url(url)
save_image(folder, name, data)
def get_data_from_url(url):
response = requests.get(url, stream=True)
return response.raw
def save_image(folder, name, data):
file_name = os.path.join(folder, name + '.jpg')
with open(file_name, 'wb') as fout:
shutil.copyfileobj(data, fout)
if __name__ == '__main__':
folder = argv[1] if len(argv) > 1 else '.'
print_header()
print("Contacting cat service for funny cat pictures ...")
for x in range(1,8):
print("Downloading Cat {} ....".format(x))
get_cat(folder, "lolcat{}".format(x))
if platform.startswith("linux"):
print("Opening folder {} in files".format(folder))
subprocess.call(['xdg-open', folder])
elif platform.startswith("win32"):
print("opening folder {} in explorer".format(folder))
subprocess.call(['explorer', folder])
elif platform.startswith("darwin"):
print("opening folder {} in finder".format(folder))
subprocess.call(['open', folder])
else:
print("Unsupported platform {}".format(str(platform)))
```
#### File: joeryan/pytest-practice/test_wallet.py
```python
import pytest
from wallet import Wallet, InsufficientAmount, NegativeTransaction
@pytest.fixture
def empty_wallet():
'''Returns a wallet with balance = 0
'''
return Wallet()
@pytest.fixture
def wallet():
'''Returns a wallet with balance = 20
'''
return Wallet(20)
def test_default_initial_amount(empty_wallet):
assert empty_wallet.balance == 0
def test_setting_initial_amount(wallet):
assert wallet.balance == 20
def test_wallet_add_cash(wallet):
wallet.add_cash(100)
assert wallet.balance == 120
def test_wallet_spend_cash(wallet):
wallet.spend_cash(10)
assert wallet.balance == 10
def test_wallet_spend_cash_raises_exception_on_insufficient_amount(wallet):
with pytest.raises(InsufficientAmount):
wallet.spend_cash(30)
def test_wallet_add_cash_negative_amount_raises_error(wallet):
with pytest.raises(NegativeTransaction):
wallet.add_cash(-10)
@pytest.mark.parametrize("earned,spent,expected", [
(30, 10, 20),
(20, 2, 18),
])
def test_transactions(earned, spent, expected):
wallet = empty_wallet()
wallet.add_cash(earned)
wallet.spend_cash(spent)
assert wallet.balance == expected
```
#### File: joeryan/pytest-practice/wallet.py
```python
class InsufficientAmount(Exception):
pass
class NegativeTransaction(Exception):
pass
class Wallet():
def __init__(self, amount = 0):
self.balance = amount
def add_cash(self, deposit):
if deposit < 0:
raise NegativeTransaction("Transaction Amount {0} cannot be negative.".format(deposit))
self.balance += deposit
def spend_cash(self, spend_amount):
if spend_amount < 0:
raise NegativeTransaction("Transaction Amount {0} cannot be negative.".format(spend_amount))
if self.balance - spend_amount < 0:
raise InsufficientAmount("Not enough available to spend {0}".format(spend_amount))
if spend_amount > 0:
self.balance -= spend_amount
else:
print("cannot be a negative amount")
``` |
{
"source": "joeryan/python-euler",
"score": 4
} |
#### File: joeryan/python-euler/prob1.py
```python
class Prob1():
def __init__(self):
self.maxNum = 0
def setMax(self, maxNum):
self.maxNum = maxNum
print "The maximum number range is: %d" % self.maxNum
def answer(self):
total = 0;
for number in range(1,self.maxNum):
if number % 3 == 0:
total += number
elif number % 5 == 0:
total += number
return total
```
#### File: joeryan/python-euler/test1.py
```python
import unittest
import prob1
class TestProb1(unittest.TestCase):
def setUp(self):
self.prob1 = prob1.Prob1()
def testInitial(self):
self.prob1.setMax(10)
assert self.prob1.answer() == (23), "incorrect answer %d" % prob1.answer()
def testFinal(self):
self.prob1.setMax(1000)
assert self.prob1.answer() == (233168), "incorrect answer %d" % self.prob1.answer()
prob1TestSuite = unittest.TestSuite()
prob1TestSuite.addTest(TestProb1("testInitial"))
prob1TestSuite.addTest(TestProb1("testFinal"))
runner = unittest.TextTestRunner()
runner.run(prob1TestSuite)
``` |
{
"source": "JoeSamyn/Dungeon_Game_Git",
"score": 3
} |
#### File: JoeSamyn/Dungeon_Game_Git/dungeon_game.py
```python
import random
import os
CELLS = [(0, 0), (1, 0), (2, 0), (3 , 0), (4, 0),
(0, 1), (1, 1), (2, 1), (3 , 1), (4, 1),
(0, 2), (1, 2), (2, 2), (3 , 2), (4, 2),
(0, 3), (1, 3), (2, 3), (3 , 3), (4, 3),
(0, 4), (1, 4), (2, 4), (3 , 4), (4, 4)
]
def print_map(player):
print(" _"*5)
tile = "|{}"
for cell in CELLS:
player_x, player_y = cell
if player_x < 4:
line_ending = ""
if cell == player:
output = tile.format("X")
else:
output = tile.format("_")
else:
line_ending = "\n"
if cell == player:
output = tile.format("X|")
else:
output = tile.format("_|")
print(output, end = line_ending)
def get_locations():
return random.sample(CELLS, 3)
def move_player(player, move):
# get the players location
player_x, player_y = player
# if move == LEFT, x-1
if move == 'LEFT' and player_x > 0:
player_x -= 1
return (player_x, player_y)
# if move == RIGHT x + 1
elif move == 'RIGHT' and player_x < 4:
player_x += 1
return (player_x, player_y)
# if move == DOWN y - 1
elif move == 'UP' and player_y > 0:
player_y -= 1
return (player_x, player_y)
# if move == UP y + 1
elif move == 'DOWN' and player_y < 4:
player_y += 1
return (player_x, player_y)
else:
print("*INVALID PLAYER MOVE*")
return (player_x, player_y)
def clear_Screen():
os.system('cls' if os.name == 'nt' else "clear")
def get_moves(player):
moves = ["LEFT", "UP", "RIGHT", "DOWN"]
player_x, player_y = player
if player_y == 0:
moves.remove("UP")
elif player_y == 4:
moves.remove("DOWN")
elif player_x == 0:
moves.remove("LEFT")
elif player_x == 4:
moves.remove("RIGHT")
elif player_x == 4 and player_y == 0:
moves.remove("RIGHT")
moves.remove("UP")
elif player_x == 0 and player_y == 0:
moves.remove("LEFT")
moves.remove("UP")
elif player_x == 0 and player_y == 4:
moves.remove("LEFT")
moves.remove("DOWN")
elif player_x == 4 and player_y == 4:
moves.remove("RIGHT")
moves.remove("DOWN")
return (", ").join(moves)
player, monster, door = get_locations()
print("Welcome to the Dungeon!")
start = input("Press enter to start or 'Q' to quit.")
if start.upper() == 'Q':
print("Okay see you next time!")
elif start == "":
clear_Screen()
while True:
print("You're currently in room {}".format(player)) #fill with player position
print("You can move {}".format(get_moves(player))) # fill with available moves
print_map(player)
move = input("> ")
move = move.upper()
if move == 'QUIT' or move == 'Q':
break
# good move? change player position
else:
clear_Screen()
player = move_player(player, move)
#print(move_player(player, move))
# hit door? They win
if player == door:
print("You made it to the door! You Win!")
break
elif player == monster:
print("You hit the monster! Sorry you lose!:(")
break;
# hit monster? they lose
``` |
{
"source": "joesanford/devs-are-lazy",
"score": 2
} |
#### File: joesanford/devs-are-lazy/data_loader.py
```python
import time
import os
import requests
from requests.auth import HTTPBasicAuth
base_url = 'https://api.github.com/search/issues?q=+language:{language}+state:{state}+type:{type}+archived:false'
languages = ['python', 'javascript', 'css', 'ruby', 'java', 'go', 'php', 'c', 'c++', 'swift', 'shell', 'objective-c']
states = ['open', 'closed']
types = ['pr', 'issue']
github_token = os.environ.get('GITHUB_TOKEN')
github_username = os.environ.get('GITHUB_USERNAME')
data = {}
def get_data():
for language in languages:
data[language] = {}
for type in types:
data[language][type] = {}
for state in states:
time.sleep(2) # to avoid GitHub Search API Rate Limits: https://developer.github.com/v3/search/#rate-limit
r = requests.get(base_url.format(language=language, type=type, state=state),
auth=HTTPBasicAuth(github_username, github_token))
data[language][type][state] = r.json().get('total_count', r.text)
return data
def get_mocked_data():
return {'python': {'pr': {'open': 159723, 'closed': 3079050}, 'issue': {'open': 1528487, 'closed': 2919859}},
'javascript': {'pr': {'open': 539227, 'closed': 5790580}, 'issue': {'open': 2361186, 'closed': 3950643}},
'css': {'pr': {'open': 57471, 'closed': 872585}, 'issue': {'open': 248292, 'closed': 460819}},
'ruby': {'pr': {'open': 521155, 'closed': 2280152}, 'issue': {'open': 369578, 'closed': 977661}},
'java': {'pr': {'open': 139682, 'closed': 2916285}, 'issue': {'open': 2185928, 'closed': 4174197}},
'go': {'pr': {'open': 26750, 'closed': 762512}, 'issue': {'open': 193987, 'closed': 476020}},
'php': {'pr': {'open': 90126, 'closed': 1707521}, 'issue': {'open': 756084, 'closed': 2101087}},
'c': {'pr': {'open': 47574, 'closed': 725236}, 'issue': {'open': 1133153, 'closed': 1613625}},
'c++': {'pr': {'open': 47574, 'closed': 725236}, 'issue': {'open': 1133153, 'closed': 1613625}},
'swift': {'pr': {'open': 25325, 'closed': 216524}, 'issue': {'open': 66887, 'closed': 137565}},
'shell': {'pr': {'open': 34215, 'closed': 552111}, 'issue': {'open': 178617, 'closed': 393265}},
'objective-c': {'pr': {'open': 23845, 'closed': 253407}, 'issue': {'open': 288751, 'closed': 501042}}}
``` |
{
"source": "joesantana/doxx",
"score": 2
} |
#### File: lib/doxx/app.py
```python
def main():
import sys
from os.path import basename
from Naked.commandline import Command
from Naked.toolshed.system import stdout, stderr, is_dir, is_file, cwd
from doxx.commands.build import Builder
from doxx.commands.make import Maker
#------------------------------------------------------------------------------------------
# [ Instantiate command line object ]
# used for all subsequent conditional logic in the CLI application
#------------------------------------------------------------------------------------------
c = Command(sys.argv[0], sys.argv[1:])
#------------------------------------------------------------------------------------------
# [ Command Suite Validation ] - early validation of appropriate command syntax
# Test that user entered at least one argument to the executable, print usage if not
#------------------------------------------------------------------------------------------
if not c.command_suite_validates():
from doxx.settings import usage as doxx_usage
print(doxx_usage)
sys.exit(1)
#------------------------------------------------------------------------------------------
# [ NAKED FRAMEWORK COMMANDS ]
# Naked framework provides default help, usage, and version commands for all applications
# --> settings for user messages are assigned in the lib/doxx/settings.py file
#------------------------------------------------------------------------------------------
if c.help(): # User requested doxx help information
from doxx.settings import help as doxx_help
print(doxx_help)
sys.exit(0)
elif c.usage(): # User requested doxx usage information
from doxx.settings import usage as doxx_usage
print(doxx_usage)
sys.exit(0)
elif c.version(): # User requested doxx version information
from doxx.settings import app_name, major_version, minor_version, patch_version
version_display_string = app_name + ' ' + major_version + '.' + minor_version + '.' + patch_version
print(version_display_string)
sys.exit(0)
#------------------------------------------------------------------------------------------
# [ PRIMARY COMMAND LOGIC ]
# Enter your command line parsing logic below
#------------------------------------------------------------------------------------------
elif c.cmd == "build":
if c.argc > 1:
key_path = c.arg1
else:
key_path = "key.yaml"
stdout("[*] doxx: Build started with the key file '" + key_path + "'...")
b = Builder(key_path)
b.run()
stdout("[*] doxx: Build complete.")
elif c.cmd == "browse":
from doxx.commands.browse import browse_docs
if c.argc > 1:
query = c.arg1
browse_docs(query)
else:
# default to open the main documentation page
query = "docs"
browse_docs(query)
elif c.cmd == "clean":
from doxx.commands.clean import run_clean
run_clean() # execute the clean routines
elif c.cmd == "make":
if c.argc > 1:
if c.cmd2 == "key": # secondary command
m = Maker()
if c.argc > 2:
m.make_key(c.arglp)
else:
m.make_key("key.yaml")
elif c.cmd2 == "template": # secondary command
m = Maker()
if c.argc > 2:
m.make_template(c.arglp)
else:
m.make_template("stub.doxt") # default name is 'stub.doxt' for new template if not specified by user
elif c.cmd2 == "project":
m = Maker()
m.make_project()
else:
stderr("Usage: doxx make [key|project|template]", exit=1)
else:
stderr("[!] doxx: Please include the secondary command 'key', 'project', or 'template' with the 'make' command.", exit=1)
elif c.cmd == "pack":
from doxx.commands.pack import tar_gzip_package_directory, zip_package_directory
if c.argc > 1:
if c.cmd2 == "zip":
if c.argc > 2: # request for zip with a directory path
if is_dir(c.arglp):
zip_package_directory(c.arglp, c.arglp)
else:
stderr("[!] doxx: '" + c.arglp + "' does not appear to be a directory. Please enter the path to your project directory.", exit=1)
else: # request for zip with current working directory
stderr("[!] doxx: Please include your project directory as an argument to the zip command", exit=1)
else: # request for tar.gz with a directory path
if is_dir(c.arglp):
tar_gzip_package_directory(c.arglp, c.arglp)
else:
stderr("[!] doxx: '" + c.arglp + "' does not appear to be a directory. Please enter the path to your project directory.", exit=1)
else: # request for tar.gz in current working directory
root_dir = cwd()
archive_name = basename(root_dir)
tar_gzip_package_directory(archive_name, root_dir)
# end of the pack command
stdout("[*] doxx: Pack complete")
elif c.cmd == "pull":
if c.argc > 1:
from doxx.commands.pull import run_pull
run_pull(c.arg1)
stdout("[*] doxx: Pull complete")
else:
stderr("[!] doxx: Please include the URL for the archive that you would like to pull.", exit=1)
elif c.cmd == "pullkey":
if c.argc > 1:
from doxx.commands.pullkey import run_pullkey
run_pullkey(c.arg1)
else:
stderr("[!] doxx: Please include a package name with the pullkey command", exit=1)
elif c.cmd == "search":
if c.argc > 1:
from doxx.commands.search import run_search
run_search(c.arg1)
else:
stderr("[!] doxx: Please include a search string after your command.", exit=1)
elif c.cmd == "unpack":
if c.argc > 1:
if is_file(c.arg1):
from doxx.commands.unpack import unpack_run, remove_compressed_archive_file
unpack_run(c.arg1)
remove_compressed_archive_file(c.arg1)
stdout("[*] doxx: Unpack complete")
else:
stderr("[!] doxx: '" + c.arg1 + "' does not appear to be a file. Please include a path to your compressed file.", exit=1)
else:
stderr("[!] doxx: Please include a path to your file.", exit=1)
elif c.cmd == "whatis":
if c.argc > 1:
from doxx.commands.whatis import run_whatis
run_whatis(c.arg1)
else:
stderr("[!] doxx: Please enter a package name following your command.", exit=1)
#------------------------------------------------------------------------------------------
# UNDOCUMENTED TESTING COMMANDS
#
#------------------------------------------------------------------------------------------
elif c.cmd == "repoupdate":
from doxx.commands.repoupdate import run_repoupdate
run_repoupdate()
#------------------------------------------------------------------------------------------
# [ DEFAULT MESSAGE FOR MATCH FAILURE ]
# Message to provide to the user when all above conditional logic fails to meet a true condition
#------------------------------------------------------------------------------------------
else:
stderr("[!] doxx: Could not complete the command that you entered. Please try again.")
sys.exit(1) #exit
if __name__ == '__main__':
main()
```
#### File: doxx/commands/pack.py
```python
import os
import shutil
import tarfile
import zipfile
from Naked.toolshed.system import stderr
def tar_gzip_package_directory(archive_name, root_dir):
try:
current_dir = os.getcwd()
archive_gz_name = archive_name + ".tar.gz"
tar = tarfile.open(archive_gz_name, mode="w:gz", compresslevel=9) # file writes to current working directory
os.chdir(root_dir) # navigate to the root directory to add the files to the archive
tar.add(".") # make tar.gz archive
tar.close()
os.chdir(current_dir) # navigate back to user's current working directory
except Exception as e:
os.chdir(current_dir) # if exception was raised, make sure that user is back in their current working directory before raising system exit
tar.close()
stderr("[!] doxx: Unable to pack the directory '" + root_dir + "'. Error: " + str(e))
def zip_package_directory(archive_name, path):
try:
current_dir = os.getcwd()
archive_name = archive_name + '.zip'
archive_file_list = []
os.chdir(path)
for root, dirs, files in os.walk(os.getcwd()):
for the_file in files:
archive_file_list.append((os.path.relpath(os.path.join(root, the_file))))
zipper = zipfile.ZipFile(archive_name, 'w')
for zip_file in archive_file_list:
zipper.write(zip_file)
zipper.close()
shutil.move(archive_name, os.path.join(current_dir, archive_name))
os.chdir(current_dir)
except Exception as e:
os.chdir(current_dir)
zipper.close()
stderr("[!] doxx: Unable to pack the directory '" + path + "'. Error: " + str(e))
```
#### File: doxx/datatypes/remotefiles.py
```python
from os import remove
import os.path
from multiprocessing import Process, Lock
from Naked.toolshed.system import stderr, stdout, file_exists
from doxx.commands.pull import pull_binary_file, pull_text_file
from doxx.commands.unpack import unpack_run
from doxx.utilities.filesystem import _create_dirs, _make_os_dependent_path
########################################
#
# [pull_textfile_runner]
# public function
# - pull remote text files
#
########################################
def pull_textfile_runner(text_url_dict):
"""pulls remote text files to local filesystem (public function)"""
file_list = list(text_url_dict) # the local outfile names in a list
number_of_files = len(file_list) # the number of files included in the list
if number_of_files > 0:
if number_of_files > 1: # multiple text file pull, each in separate process
processes = [] # list of spawned processes
outputlock = Lock() # stdout / stderr writes lock
iolock = Lock() # input/output lock
# iterate through requested files and execute pull in separate process for each one
for file_path in file_list:
p = Process(target=_pull_textfile_multiprocess, args=(file_path, text_url_dict[file_path], outputlock, iolock))
p.start()
processes.append(p)
for process in processes:
process.join(timeout=60)
else: # single text file pull
file_path = file_list[0]
_pull_textfile(file_path, text_url_dict[file_path]) # file_path is local path for write, dictionary value is the URL
else:
stderr("[!] doxx: Unable to find text files to pull in the key file", exit=0)
########################################
#
# [pull_binaryfile_runner]
# public function
# - pull remote binary files
#
########################################
def pull_binaryfile_runner(binary_url_dict):
"""pulls remote binary files to local filesystem (public function)"""
file_list = list(binary_url_dict) # the local outfile names in a list
number_of_files = len(file_list) # the number of files included in the list
if number_of_files > 0:
if number_of_files > 1: # multiple binary file pull, each in separate process
processes = [] # list of spawned processes
outputlock = Lock() # stdout / stderr writes lock
iolock = Lock() # input/output lock
# iterate through requested files and execute pull in separate process for each one
for file_path in file_list:
p = Process(target=_pull_binaryfile_multiprocess, args=(file_path, binary_url_dict[file_path], outputlock, iolock))
p.start()
processes.append(p)
for process in processes:
process.join(timeout=60)
else: # single text file pull
file_path = file_list[0]
_pull_binaryfile(file_path, binary_url_dict[file_path]) # file_path is local path for write, dictionary value is the URL
else:
stderr("[!] doxx: Unable to find binary files to pull in the key file", exit=0)
###########################################
#
# [pull_github_repo_runner]
# public function
# - pull remote Github repo archives
#
###########################################
def pull_github_repo_runner(repo_url_dict):
"""pulls remote Github repository archives to the local filesystem and unpacks (public function)"""
file_list = list(repo_url_dict) # the local outfile names in a list
number_of_files = len(file_list) # the number of files included in the list
if number_of_files > 0:
if number_of_files > 1: # multiple binary file pull, each in separate process
stdout("[*] doxx: Hang in there. Pulling " + str(number_of_files) + " entire repositories. This may take a bit of time...")
processes = [] # list of spawned processes
outputlock = Lock() # stdout / stderr writes lock
iolock = Lock() # input/output lock
# iterate through requested files and execute pull in separate process for each one
for file_path in file_list:
p = Process(target=_pull_github_repo_multiprocess, args=(file_path, repo_url_dict[file_path], outputlock, iolock))
p.start()
processes.append(p)
for process in processes:
process.join(timeout=120)
else: # single text file pull
stdout("[*] doxx: Hang in there. Pulling an entire repository. This may take a bit of time...")
file_path = file_list[0]
_pull_github_repo(file_path, repo_url_dict[file_path]) # file_path is local path for write, dictionary value is the URL
else:
stderr("[!] doxx: Unable to find binary files to pull in the key file", exit=0)
###############################################
#
# [_pull_textfile]
# private function
# - execute single process text file pulls
#
###############################################
def _pull_textfile(file_path, url):
"""executes single process text file pulls (private function)"""
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
_create_dirs(file_path)
# pull the file and write to local filesystem
try:
pull_text_file(url, file_path)
except Exception as e:
stderr("[!] doxx: Unable to pull '" + file_path + "' from '" + url + "'. Error: " + str(e), exit=1)
if file_exists(file_path):
stdout("[+] doxx: '" + file_path + "' ...check!")
else:
stderr("[!] doxx: There was an error pulling '" + file_path + "'. Error: Unable to locate local file.", exit=1)
########################################
#
# [_pull_textfile_multiprocess]
# private function
# - execute multi-file, multiprocess
# text file pulls
#
########################################
def _pull_textfile_multiprocess(file_path, url, outputlock, iolock):
"""executes multiprocess, multi-file text file pulls (private function)"""
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
iolock.acquire()
_create_dirs(file_path)
iolock.release()
# pull the file and write to local filesystem
try:
pull_text_file(url, file_path)
except Exception as e:
outputlock.acquire()
stderr("[!] doxx: Unable to pull '" + file_path + "' from '" + url + "'. Error: " + str(e), exit=0)
outputlock.release()
if file_exists(file_path):
outputlock.acquire()
stdout("[+] doxx: '" + file_path + "' ...check!")
outputlock.release()
else:
outputlock.acquire()
stderr("[!] doxx: There was an error pulling '" + file_path + "'. Error: Unable to locate local file", exit=0)
outputlock.release()
########################################
#
# [_pull_binaryfile]
# private function
# - execute single process binary
# file pulls
#
########################################
def _pull_binaryfile(file_path, url):
"""executes single process binary file pulls (private function)"""
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
_create_dirs(file_path)
# pull the file and write to local filesystem
try:
pull_binary_file(url, file_path)
except Exception as e:
stderr("[!] doxx: Unable to pull '" + file_path + "' from '" + url + "'. Error: " + str(e), exit=0)
if file_exists(file_path):
stdout("[+] doxx: '" + file_path + "' ...check!")
else:
stderr("[!] doxx: There was an error pulling '" + file_path + "'. Error: Unable to locate local file.", exit=1)
########################################
#
# [_pull_binaryfile_multiprocess]
# private function
# - execute multiprocess multi-file
# binary file pulls
#
########################################
def _pull_binaryfile_multiprocess(file_path, url, outputlock, iolock):
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
iolock.acquire()
_create_dirs(file_path)
iolock.release()
# pull the file and write to local filesystem
try:
pull_binary_file(url, file_path)
except Exception as e:
outputlock.acquire()
stderr("[!] doxx: Unable to pull '" + file_path + "' from '" + url + "'. Error: " + str(e), exit=0)
outputlock.release()
if file_exists(file_path):
outputlock.acquire()
stdout("[+] doxx: '" + file_path + "' ...check!")
outputlock.release()
else:
outputlock.acquire()
stderr("[!] doxx: There was an error pulling '" + file_path + "'. Error: Unable to locate local file", exit=0)
outputlock.release()
########################################
#
# [_pull_github_repo]
# private function
# - execute single process Github
# repository archive pulls
#
########################################
def _pull_github_repo(file_path, url):
"""executes single process Github repository archive pulls (private function)"""
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
_create_dirs(file_path)
# pull the file and write to local filesystem
try:
pull_binary_file(url, file_path)
except Exception as e:
stderr("[!] doxx: Unable to pull the archive file from the URL '" + url + "'. Error: " + str(e), exit=0)
if file_exists(file_path):
root_dir = unpack_run(file_path)
remove(file_path)
stdout("[+] doxx: '" + root_dir + "' ...check!")
else:
stderr("[!] doxx: There was an error pulling the repository file. Error: Unable to locate local archive file.", exit=1)
########################################
#
# [_pull_github_repo_multiprocess]
# private function
# - execute multiprocess multi-file
# Github repo archive pulls
#
########################################
def _pull_github_repo_multiprocess(file_path, url, outputlock, iolock):
# create OS dependent file path (if necessary)
file_path = _make_os_dependent_path(file_path)
# make directory structure if necessary for the file path
if os.path.dirname(file_path) is not "":
iolock.acquire()
_create_dirs(file_path)
iolock.release()
# pull the file and write to local filesystem
try:
pull_binary_file(url, file_path)
except Exception as e:
outputlock.acquire()
stderr("[!] doxx: Unable to pull the archive file from the URL '" + url + "'. Error: " + str(e), exit=0)
outputlock.release()
if file_exists(file_path):
root_dir = unpack_run(file_path)
remove(file_path)
outputlock.acquire()
stdout("[+] doxx: '" + root_dir + "' ...check!")
outputlock.release()
else:
outputlock.acquire()
stderr("[!] doxx: There was an error pulling the repository file. Error: Unable to locate local archive file.", exit=1)
outputlock.release()
```
#### File: doxx/utilities/fuzzysearch.py
```python
import difflib
class FuzzySearcher(object):
def __init__(self, needle):
self.needle = needle
self.needle_normalized = needle.lower().strip()
self.needle_alpha = self._get_alphabetical_string(self.needle_normalized)
self.needle_word_count = len(needle.split(" "))
self.needle_length = len(self.needle_normalized)
# Search Types
# test match to the entire haystack string
def full_string_ratio(self, haystack):
normalized_haystack = haystack.lower().strip()
return difflib.SequenceMatcher(None, self.needle_normalized, normalized_haystack).ratio()
# partial match ratio for first index item after split on '-' character
def partial_firstindexitem_dashsplit_ratio(self, haystack):
normalized_haystack = haystack.lower().strip()
haystack_tokens = normalized_haystack.split('-')
first_haystack_token = haystack_tokens[0]
return difflib.SequenceMatcher(None, self.needle_normalized, first_haystack_token).ratio()
# partial match ratio for needle length slice from the haystack
def partial_startslice_ratio(self, haystack):
normalized_haystack = haystack.lower().strip()
needle_length = len(self.needle_normalized)
if len(normalized_haystack) > needle_length:
sliced_haystack = normalized_haystack[0:needle_length]
return difflib.SequenceMatcher(None, self.needle_normalized, sliced_haystack).ratio()
else:
return difflib.SequenceMatcher(None, self.needle_normalized, normalized_haystack).ratio()
# split haystack into tokens on '-' character delimiters, return best ratio from tokens (use for needle word length = 1)
def partial_dashsplit_tokens_ratio(self, haystack):
normalized_haystack = haystack.lower().strip()
haystack_tokens = normalized_haystack.split("-")
best_ratio = 0
for token in haystack_tokens:
the_ratio = difflib.SequenceMatcher(None, self.needle_normalized, token).ratio()
if the_ratio > best_ratio:
best_ratio = the_ratio
return best_ratio
# attempt match over same number of word tokens as are present in the needle, sorted in alpha order (for multi-word searches)
def partial_nword_ratio(self, haystack):
normalized_haystack = haystack.lower().strip()
haystack_token_list = normalized_haystack.split('-')
haystack_word_count = len(haystack_token_list)
if haystack_word_count >= self.needle_word_count:
first = 0
last = self.needle_word_count
best_ratio = 0
for x in range(haystack_word_count - (self.needle_word_count - 1)):
if last <= haystack_word_count:
sub_haystack_list = haystack_token_list[first:last]
sorted_sub_haystack_list = sorted(sub_haystack_list)
sorted_sub_haystack_string = " ".join(sorted_sub_haystack_list)
token_match_ratio = difflib.SequenceMatcher(None, self.needle_alpha, sorted_sub_haystack_string).ratio()
if token_match_ratio > best_ratio:
best_ratio = token_match_ratio
first += 1 # iterate the positions of the test string slice
last += 1
return best_ratio
else:
return difflib.SequenceMatcher(None, self.needle_normalized, normalized_haystack).ratio()
# set intersection between needle and haystack tokens with addition of remaining parts of strings (use > 0.7 as threshold)
def partial_set_ratio(self, haystack):
normalized_haystack = haystack.lower().strip()
haystack_alpha = sorted(normalized_haystack.split('-'))
haystack_set = set(haystack_alpha)
needle_set = set(self.needle_alpha.split(" "))
intersection_set = sorted(needle_set.intersection(haystack_set))
needle_difference = sorted(needle_set.difference(haystack_set))
haystack_difference = sorted(haystack_set.difference(needle_set))
if len(intersection_set) > 0:
string_one = " ".join(intersection_set)
if len(needle_difference) > 0:
string_two = string_one + " " + " ".join(needle_difference)
else:
string_two = string_one # if there were no tokens in difference, it is just the intersection
if len(haystack_difference) > 0:
string_three = string_one + " " + " ".join(haystack_difference)
else:
string_three = string_one # if there were no tokens in the difference, it is just the intersection
token_match_ratio_one = difflib.SequenceMatcher(None, string_one, string_two).ratio()
token_match_ratio_two = difflib.SequenceMatcher(None, string_one, string_three).ratio()
token_match_ratio_three = difflib.SequenceMatcher(None, string_two, string_three).ratio()
# return an evenly weighted average of the match ratios
weighted_average_ratio = (0.333 * token_match_ratio_one) + (0.333 * token_match_ratio_two) + (0.333 * token_match_ratio_three)
return weighted_average_ratio
else:
return 0 # if there is no intersection tokens between the needle and haystack tokens, return 0
# Utilities
def _get_alphabetical_string(self, pre_string):
if " " in pre_string:
alpha_list = sorted(pre_string.split(" "))
alpha_string = " ".join(alpha_list)
return alpha_string
else:
return pre_string
```
#### File: doxx/tests/test_doxx-remotefiles.py
```python
import os
import shutil
import unittest
from Naked.toolshed.system import file_exists, dir_exists
from doxx.datatypes.remotefiles import pull_textfile_runner, pull_binaryfile_runner, pull_github_repo_runner
class DoxxTextFileKeyBuildSpecPullTests(unittest.TestCase):
def setUp(self):
self.cwd = os.getcwd()
self.test_dir = "pull-tests/remotefiles"
self.test_text_file_dict = {"testfile.txt": "https://raw.githubusercontent.com/bit-store/testfiles/master/doxx/testfile.txt"}
self.test_text_file_two_dict = {"testfile": "https://raw.githubusercontent.com/bit-store/testfiles/master/doxx/testfile"}
self.test_text_file_three_dict = {"existdir/testfile.txt": "https://raw.githubusercontent.com/bit-store/testfiles/master/doxx/testfile.txt"}
self.test_text_file_four_dict = {"nonexistdir/testfile.txt": "https://raw.githubusercontent.com/bit-store/testfiles/master/doxx/testfile.txt"}
self.test_multi_text_files_dict = {"testfile.txt": "https://raw.githubusercontent.com/bit-store/testfiles/master/doxx/testfile.txt", "testfile": "https://raw.githubusercontent.com/bit-store/testfiles/master/doxx/testfile"}
self.test_multi_text_files_two_dict = {"existdir/testfile.txt": "https://raw.githubusercontent.com/bit-store/testfiles/master/doxx/testfile.txt", "nonexistdir/testfile": "https://raw.githubusercontent.com/bit-store/testfiles/master/doxx/testfile"}
self.test_bad_text_file_dict = {"testfile.txt": "https://raw.githubusercontent.com/bit-store/testfiles/master/doxx/nonexistenttextfile.txt"}
# remove test files and directories if they exist from last test
if file_exists("pull-tests/remotefiles/testfile.txt"):
os.remove("pull-tests/remotefiles/testfile.txt")
self.assertFalse(file_exists("pull-tests/remotefiles/testfile.txt"))
if file_exists("pull-tests/remotefiles/existdir/testfile.txt"):
os.remove("pull-tests/remotefiles/existdir/testfile.txt")
self.assertFalse(file_exists("pull-tests/remotefiles/existdir/testfile.txt"))
if dir_exists("pull-tests/nonexistdir"):
shutil.rmtree("pull-tests/nonexistdir")
self.assertFalse(dir_exists("pull-tests/nonexistdir"))
def test_doxx_pull_single_text_file_remotefiles_module(self):
try:
os.chdir(self.test_dir)
local_writepath = "testfile.txt"
pull_textfile_runner(self.test_text_file_dict)
self.assertTrue(file_exists(local_writepath))
os.remove(local_writepath)
os.chdir(self.cwd)
except Exception as e:
os.chdir(self.cwd)
raise e
def test_doxx_pull_single_text_file_noextension_remotefiles_module(self):
try:
os.chdir(self.test_dir)
local_writepath = "testfile"
pull_textfile_runner(self.test_text_file_two_dict)
self.assertTrue(file_exists(local_writepath))
os.remove(local_writepath)
os.chdir(self.cwd)
except Exception as e:
os.chdir(self.cwd)
raise e
def test_doxx_pull_single_text_file_existdirpath_remotefiles_module(self):
try:
os.chdir(self.test_dir)
local_writepath = "existdir/testfile.txt"
pull_textfile_runner(self.test_text_file_three_dict)
self.assertTrue(file_exists(local_writepath))
os.remove(local_writepath)
os.chdir(self.cwd)
except Exception as e:
os.chdir(self.cwd)
raise e
def test_doxx_pull_single_text_file_nonexistdirpath_remotefiles_module(self):
try:
os.chdir(self.test_dir)
local_writepath = "nonexistdir/testfile.txt"
pull_textfile_runner(self.test_text_file_four_dict)
self.assertTrue(file_exists(local_writepath))
shutil.rmtree("nonexistdir")
os.chdir(self.cwd)
except Exception as e:
os.chdir(self.cwd)
raise e
def test_doxx_pull_multiple_text_files_remotefiles_module(self):
try:
os.chdir(self.test_dir)
local_writepath_one = "testfile.txt"
local_writepath_two = "testfile"
pull_textfile_runner(self.test_multi_text_files_dict)
self.assertTrue(file_exists(local_writepath_one))
self.assertTrue(file_exists(local_writepath_two))
os.remove(local_writepath_one)
os.remove(local_writepath_two)
os.chdir(self.cwd)
except Exception as e:
os.chdir(self.cwd)
raise e
def test_doxx_pull_multiple_text_files_subdirs_remotefiles_module(self):
try:
os.chdir(self.test_dir)
local_writepath_one = "existdir/testfile.txt"
local_writepath_two = "nonexistdir/testfile"
pull_textfile_runner(self.test_multi_text_files_two_dict)
self.assertTrue(file_exists(local_writepath_one))
self.assertTrue(file_exists(local_writepath_two))
os.remove(local_writepath_one)
shutil.rmtree("nonexistdir")
os.chdir(self.cwd)
except Exception as e:
os.chdir(self.cwd)
raise e
def test_doxx_pull_badURL_text_file_remotefiles_module(self):
try:
os.chdir(self.test_dir)
local_writepath = "testfile.txt"
with self.assertRaises(SystemExit):
pull_textfile_runner(self.test_bad_text_file_dict)
self.assertFalse(file_exists(local_writepath))
os.chdir(self.cwd)
except Exception as e:
os.chdir(self.cwd)
raise e
class DoxxBinaryFileKeyBuildSpecPullTest(unittest.TestCase):
def setUp(self):
self.test_binary_file = "https://github.com/bit-store/testfiles/raw/master/doxx/pull-tests/packed.tar.gz"
class DoxxGithubRepoKeyBuildSpecPullTest(unittest.TestCase):
def setUp(self):
self.test_gh_repo = "https://github.com/bit-store/testfiles/archive/master.tar.gz"
```
#### File: doxx/tests/test_doxx-utilities.py
```python
import sys
import shutil
import unittest
from doxx.utilities.filesystem import _make_os_dependent_path, _create_dirs
from Naked.toolshed.system import dir_exists
class DoxxPathUtilitiesTests(unittest.TestCase):
def setUp(self):
pass
def test_doxx_rootdir_filepath_returns_filepath(self):
standard_path = "testfile.txt"
test_path = _make_os_dependent_path(standard_path)
self.assertEqual(standard_path, test_path)
def test_doxx_posix_to_posix_paths(self):
standard_path = "this/is/some/path"
test_path = _make_os_dependent_path(standard_path)
if sys.platform == "darwin":
self.assertEqual(standard_path, test_path)
elif sys.platform.startswith("linux"):
self.assertEqual(standard_path, test_path)
elif sys.platform == "win32":
self.assertEqual(test_path, "this\is\some\path")
else:
raise(Exception, "Cannot detect operating system")
def test_doxx_posix_to_posix_paths_with_endslash(self):
standard_path = "this/is/some/path/"
test_path = _make_os_dependent_path(standard_path)
if sys.platform == "darwin":
self.assertEqual(standard_path, test_path)
elif sys.platform.startswith("linux"):
self.assertEqual(standard_path, test_path)
elif sys.platform == "win32":
self.assertEqual(test_path, "this\is\some\path")
else:
raise(Exception, "Cannot detect operating system")
def test_doxx_dos_to_posix_paths(self):
standard_path = "this\is\some\path"
test_path = _make_os_dependent_path(standard_path)
if sys.platform == "darwin":
self.assertEqual(test_path, "this/is/some/path")
elif sys.platform.startswith("linux"):
self.assertEqual(test_path, "this/is/some/path")
elif sys.platform == "win32":
self.assertEqual(test_path, "this\is\some\path")
else:
raise(Exception, "Cannot detect operating system")
class DoxxDirectoryUtilitiesTest(unittest.TestCase):
def setUp(self):
self.testpath1 = "utilities-tests/testdir1" # exists
self.testpath2 = "utilities-tests/testdir2" # does not exist
self.testpath2_missingdir = "utilities-tests/testdir2/testfile.txt"
if dir_exists(self.testpath2):
shutil.rmtree(self.testpath2)
def test_doxx_make_dirs_exists(self):
_create_dirs(self.testpath1)
self.assertTrue(dir_exists(self.testpath1))
def test_doxx_make_dirs_nonexist(self):
self.assertFalse(dir_exists(self.testpath2))
_create_dirs(self.testpath2_missingdir)
self.assertTrue(dir_exists(self.testpath2))
shutil.rmtree(self.testpath2)
``` |
{
"source": "joesaunderson/posthog",
"score": 2
} |
#### File: materialized_columns/test/test_columns.py
```python
from freezegun import freeze_time
from ee.clickhouse.materialized_columns.columns import get_materialized_columns, materialize
from ee.clickhouse.util import ClickhouseTestMixin
from posthog.test.base import BaseTest
class TestMaterializedColumns(ClickhouseTestMixin, BaseTest):
def test_get_columns_default(self):
self.assertCountEqual(get_materialized_columns("events"), [])
self.assertCountEqual(get_materialized_columns("person"), [])
def test_caching_and_materializing(self):
with freeze_time("2020-01-04T13:01:01Z"):
materialize("events", "$foo")
materialize("events", "$bar")
materialize("person", "$zeta")
self.assertCountEqual(get_materialized_columns("events", use_cache=True), ["$foo", "$bar"])
self.assertCountEqual(get_materialized_columns("person", use_cache=True), ["$zeta"])
materialize("events", "abc")
self.assertCountEqual(get_materialized_columns("events", use_cache=True), ["$foo", "$bar"])
with freeze_time("2020-01-04T14:00:01Z"):
self.assertCountEqual(get_materialized_columns("events", use_cache=True), ["$foo", "$bar", "abc"])
```
#### File: queries/test/test_paths.py
```python
from uuid import uuid4
from ee.clickhouse.materialized_columns.columns import materialize
from ee.clickhouse.models.event import create_event
from ee.clickhouse.queries.clickhouse_paths import ClickhousePaths
from ee.clickhouse.util import ClickhouseTestMixin
from posthog.constants import PAGEVIEW_EVENT, SCREEN_EVENT
from posthog.models.filters.path_filter import PathFilter
from posthog.models.person import Person
from posthog.queries.test.test_paths import paths_test_factory
def _create_event(**kwargs):
kwargs.update({"event_uuid": uuid4()})
create_event(**kwargs)
class TestClickhousePaths(ClickhouseTestMixin, paths_test_factory(ClickhousePaths, _create_event, Person.objects.create)): # type: ignore
def test_denormalized_properties(self):
materialize("events", "$current_url")
materialize("events", "$screen_name")
query, _ = ClickhousePaths().get_query(team=self.team, filter=PathFilter(data={"path_type": PAGEVIEW_EVENT}))
self.assertNotIn("json", query.lower())
query, _ = ClickhousePaths().get_query(team=self.team, filter=PathFilter(data={"path_type": SCREEN_EVENT}))
self.assertNotIn("json", query.lower())
self.test_current_url_paths_and_logic()
``` |
{
"source": "joesavage/workload-automation",
"score": 2
} |
#### File: wlauto/commands/record.py
```python
import os
import sys
import signal
from math import ceil
from wlauto import ExtensionLoader, Command, settings
from wlauto.common.resources import Executable
from wlauto.core.resource import NO_ONE
from wlauto.core.resolver import ResourceResolver
from wlauto.core.configuration import RunConfiguration
from wlauto.core.agenda import Agenda
from wlauto.utils.revent import ReventRecording, GAMEPAD_MODE
class ReventCommand(Command):
# Validate command options
def validate_args(self, args):
if args.clear and not args.package:
print "Package must be specified if you want to clear cache\n"
self.parser.print_help()
sys.exit()
# pylint: disable=W0201
def execute(self, args):
self.validate_args(args)
self.logger.info("Connecting to device...")
ext_loader = ExtensionLoader(packages=settings.extension_packages,
paths=settings.extension_paths)
# Setup config
self.config = RunConfiguration(ext_loader)
for filepath in settings.get_config_paths():
self.config.load_config(filepath)
self.config.set_agenda(Agenda())
self.config.finalize()
context = LightContext(self.config)
# Setup device
self.device = ext_loader.get_device(settings.device, **settings.device_config)
self.device.validate()
self.device.dynamic_modules = []
self.device.connect()
self.device.initialize(context)
host_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent'))
self.target_binary = self.device.install_executable(host_binary)
self.run(args)
def run(self, args):
raise NotImplementedError()
class RecordCommand(ReventCommand):
name = 'record'
description = '''Performs a revent recording
This command helps create revent recordings. It will automatically
deploy revent and even has the option of automatically opening apps.
Revent allows you to record raw inputs such as screen swipes or button presses.
This can be useful for recording inputs for workloads such as games that don't
have XML UI layouts that can be used with UIAutomator. As a drawback from this,
revent recordings are specific to the device type they were recorded on.
WA uses two parts to form the names of revent recordings in the format,
{device_name}.{suffix}.revent
- device_name can either be specified manually with the ``-d`` argument or
else the name of the device will be automatically determined. On an Android device it is obtained
from ``build.prop``, on Linux devices it is obtained from ``/proc/device-tree/model``.
- suffix is used by WA to determine which part of the app execution the
recording is for, currently either ``setup``, ``run`` or ``teardown``. This
should be specified with the ``-s`` argument.
**gamepad recording**
revent supports an alternative recording mode, where it will record events
from a single gamepad device. In this mode, revent will store the
description of this device as a part of the recording. When replaying such
a recording, revent will first create a virtual gamepad using the
description, and will replay the events into it, so a physical controller
does not need to be connected on replay. Unlike standard revent recordings,
recordings generated in this mode should be (to an extent) portable across
different devices.
note:
- The device on which a recording is being made in gamepad mode, must have
exactly one gamepad connected to it.
- The device on which a gamepad recording is being replayed must have
/dev/uinput enabled in the kernel (this interface is necessary to create
virtual gamepad).
'''
def initialize(self, context):
self.context = context
self.parser.add_argument('-d', '--device', help='The name of the device')
self.parser.add_argument('-s', '--suffix', help='The suffix of the revent file, e.g. ``setup``')
self.parser.add_argument('-o', '--output', help='Directory to save the recording in')
self.parser.add_argument('-p', '--package', help='Package to launch before recording')
self.parser.add_argument('-g', '--gamepad', help='Record from a gamepad rather than all devices.',
action="store_true")
self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it',
action="store_true")
self.parser.add_argument('-S', '--capture-screen', help='Record a screen capture after recording',
action="store_true")
def run(self, args):
if args.device:
device_name = args.device
else:
device_name = self.device.get_device_model()
if args.suffix:
args.suffix += "."
revent_file = self.device.path.join(self.device.working_directory,
'{}.{}revent'.format(device_name, args.suffix or ""))
if args.clear:
self.device.execute("pm clear {}".format(args.package))
if args.package:
self.logger.info("Starting {}".format(args.package))
self.device.execute('monkey -p {} -c android.intent.category.LAUNCHER 1'.format(args.package))
self.logger.info("Press Enter when you are ready to record...")
raw_input("")
gamepad_flag = '-g ' if args.gamepad else ''
command = "{} record {}-s {}".format(self.target_binary, gamepad_flag, revent_file)
self.device.kick_off(command)
self.logger.info("Press Enter when you have finished recording...")
raw_input("")
if args.capture_screen:
self.logger.info("Recording screen capture")
self.device.capture_screen(args.output or os.getcwdu())
self.device.killall("revent", signal.SIGINT)
self.logger.info("Waiting for revent to finish")
while self.device.get_pids_of("revent"):
pass
self.logger.info("Pulling files from device")
self.device.pull_file(revent_file, args.output or os.getcwdu())
class ReplayCommand(ReventCommand):
name = 'replay'
description = '''Replay a revent recording
Revent allows you to record raw inputs such as screen swipes or button presses.
See ``wa show record`` to see how to make an revent recording.
'''
def initialize(self, context):
self.context = context
self.parser.add_argument('revent', help='The name of the file to replay')
self.parser.add_argument('-p', '--package', help='Package to launch before recording')
self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it',
action="store_true")
# pylint: disable=W0201
def run(self, args):
self.logger.info("Pushing file to device")
self.device.push_file(args.revent, self.device.working_directory)
revent_file = self.device.path.join(self.device.working_directory, os.path.split(args.revent)[1])
if args.clear:
self.device.execute("pm clear {}".format(args.package))
if args.package:
self.logger.info("Starting {}".format(args.package))
self.device.execute('monkey -p {} -c android.intent.category.LAUNCHER 1'.format(args.package))
self.logger.info("Replaying recording")
command = "{} replay {}".format(self.target_binary, revent_file)
recording = ReventRecording(args.revent)
timeout = ceil(recording.duration) + 30
recording.close()
self.device.execute(command, timeout=timeout,
as_root=(recording.mode == GAMEPAD_MODE))
self.logger.info("Finished replay")
# Used to satisfy the API
class LightContext(object):
def __init__(self, config):
self.resolver = ResourceResolver(config)
self.resolver.load()
```
#### File: wlauto/commands/show.py
```python
import sys
import subprocess
from cStringIO import StringIO
from wlauto import Command, ExtensionLoader, settings
from wlauto.utils.doc import (get_summary, get_description, get_type_name, format_column, format_body,
format_paragraph, indent, strip_inlined_text)
from wlauto.utils.misc import get_pager
from wlauto.utils.terminalsize import get_terminal_size
class ShowCommand(Command):
name = 'show'
description = """
Display documentation for the specified extension (workload, instrument, etc.).
"""
def initialize(self, context):
self.parser.add_argument('name', metavar='EXTENSION',
help='''The name of the extension for which information will
be shown.''')
def execute(self, args):
# pylint: disable=unpacking-non-sequence
ext_loader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths)
extension = ext_loader.get_extension_class(args.name)
out = StringIO()
term_width, term_height = get_terminal_size()
format_extension(extension, out, term_width)
text = out.getvalue()
pager = get_pager()
if len(text.split('\n')) > term_height and pager:
try:
sp = subprocess.Popen(pager, stdin=subprocess.PIPE)
sp.communicate(text)
except OSError:
self.logger.warning('Could not use PAGER "{}"'.format(pager))
sys.stdout.write(text)
else:
sys.stdout.write(text)
def format_extension(extension, out, width):
format_extension_name(extension, out)
out.write('\n')
format_extension_summary(extension, out, width)
out.write('\n')
if hasattr(extension, 'supported_platforms'):
format_supported_platforms(extension, out, width)
out.write('\n')
if extension.parameters:
format_extension_parameters(extension, out, width)
out.write('\n')
format_extension_description(extension, out, width)
def format_extension_name(extension, out):
out.write('\n{}\n'.format(extension.name))
def format_extension_summary(extension, out, width):
out.write('{}\n'.format(format_body(strip_inlined_text(get_summary(extension)), width)))
def format_supported_platforms(extension, out, width):
text = 'supported on: {}'.format(', '.join(extension.supported_platforms))
out.write('{}\n'.format(format_body(text, width)))
def format_extension_description(extension, out, width):
# skip the initial paragraph of multi-paragraph description, as already
# listed above.
description = get_description(extension).split('\n\n', 1)[-1]
out.write('{}\n'.format(format_body(strip_inlined_text(description), width)))
def format_extension_parameters(extension, out, width, shift=4):
out.write('parameters:\n\n')
param_texts = []
for param in extension.parameters:
description = format_paragraph(strip_inlined_text(param.description or ''), width - shift)
param_text = '{}'.format(param.name)
if param.mandatory:
param_text += " (MANDATORY)"
param_text += '\n{}\n'.format(description)
param_text += indent('type: {}\n'.format(get_type_name(param.kind)))
if param.allowed_values:
param_text += indent('allowed values: {}\n'.format(', '.join(map(str, param.allowed_values))))
elif param.constraint:
param_text += indent('constraint: {}\n'.format(get_type_name(param.constraint)))
if param.default is not None:
param_text += indent('default: {}\n'.format(param.default))
param_texts.append(indent(param_text, shift))
out.write(format_column('\n'.join(param_texts), width))
```
#### File: wlauto/core/agenda.py
```python
import os
from copy import copy
from collections import OrderedDict, defaultdict
import yaml
from wlauto.exceptions import ConfigError
from wlauto.utils.misc import load_struct_from_yaml, LoadSyntaxError
from wlauto.utils.types import counter, reset_counter
def get_aliased_param(d, aliases, default=None, pop=True):
alias_map = [i for i, a in enumerate(aliases) if a in d]
if len(alias_map) > 1:
message = 'Only one of {} may be specified in a single entry'
raise ConfigError(message.format(aliases))
elif alias_map:
if pop:
return d.pop(aliases[alias_map[0]])
else:
return d[aliases[alias_map[0]]]
else:
return default
class AgendaEntry(object):
def to_dict(self):
return copy(self.__dict__)
class AgendaWorkloadEntry(AgendaEntry):
"""
Specifies execution of a workload, including things like the number of
iterations, device runtime_parameters configuration, etc.
"""
def __init__(self, **kwargs):
super(AgendaWorkloadEntry, self).__init__()
self.id = kwargs.pop('id')
self.workload_name = get_aliased_param(kwargs, ['workload_name', 'name'])
if not self.workload_name:
raise ConfigError('No workload name specified in entry {}'.format(self.id))
self.label = kwargs.pop('label', self.workload_name)
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params', 'params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
self.classifiers = kwargs.pop('classifiers', OrderedDict())
if kwargs:
raise ConfigError('Invalid entry(ies) in workload {}: {}'.format(self.id, ', '.join(kwargs.keys())))
class AgendaSectionEntry(AgendaEntry):
"""
Specifies execution of a workload, including things like the number of
iterations, device runtime_parameters configuration, etc.
"""
def __init__(self, agenda, **kwargs):
super(AgendaSectionEntry, self).__init__()
self.id = kwargs.pop('id')
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params', 'params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
self.classifiers = kwargs.pop('classifiers', OrderedDict())
self.workloads = []
for w in kwargs.pop('workloads', []):
self.workloads.append(agenda.get_workload_entry(w))
if kwargs:
raise ConfigError('Invalid entry(ies) in section {}: {}'.format(self.id, ', '.join(kwargs.keys())))
def to_dict(self):
d = copy(self.__dict__)
d['workloads'] = [w.to_dict() for w in self.workloads]
return d
class AgendaGlobalEntry(AgendaEntry):
"""
Workload configuration global to all workloads.
"""
def __init__(self, **kwargs):
super(AgendaGlobalEntry, self).__init__()
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params', 'params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
self.classifiers = kwargs.pop('classifiers', OrderedDict())
if kwargs:
raise ConfigError('Invalid entries in global section: {}'.format(kwargs))
class Agenda(object):
def __init__(self, source=None):
self.filepath = None
self.config = {}
self.global_ = None
self.sections = []
self.workloads = []
self._seen_ids = defaultdict(set)
if source:
try:
reset_counter('section')
reset_counter('workload')
self._load(source)
except (ConfigError, LoadSyntaxError, SyntaxError), e:
raise ConfigError(str(e))
def add_workload_entry(self, w):
entry = self.get_workload_entry(w)
self.workloads.append(entry)
def get_workload_entry(self, w):
if isinstance(w, basestring):
w = {'name': w}
if not isinstance(w, dict):
raise ConfigError('Invalid workload entry: "{}" in {}'.format(w, self.filepath))
self._assign_id_if_needed(w, 'workload')
return AgendaWorkloadEntry(**w)
def _load(self, source): # pylint: disable=too-many-branches
try:
raw = self._load_raw_from_source(source)
except ValueError as e:
name = getattr(source, 'name', '')
raise ConfigError('Error parsing agenda {}: {}'.format(name, e))
if not isinstance(raw, dict):
message = '{} does not contain a valid agenda structure; top level must be a dict.'
raise ConfigError(message.format(self.filepath))
for k, v in raw.iteritems():
if v is None:
raise ConfigError('Empty "{}" entry in {}'.format(k, self.filepath))
if k == 'config':
if not isinstance(v, dict):
raise ConfigError('Invalid agenda: "config" entry must be a dict')
self.config = v
elif k == 'global':
self.global_ = AgendaGlobalEntry(**v)
elif k == 'sections':
self._collect_existing_ids(v, 'section')
for s in v:
if not isinstance(s, dict):
raise ConfigError('Invalid section entry: "{}" in {}'.format(s, self.filepath))
self._collect_existing_ids(s.get('workloads', []), 'workload')
for s in v:
self._assign_id_if_needed(s, 'section')
self.sections.append(AgendaSectionEntry(self, **s))
elif k == 'workloads':
self._collect_existing_ids(v, 'workload')
for w in v:
self.workloads.append(self.get_workload_entry(w))
else:
raise ConfigError('Unexpected agenda entry "{}" in {}'.format(k, self.filepath))
def _load_raw_from_source(self, source):
if hasattr(source, 'read') and hasattr(source, 'name'): # file-like object
self.filepath = source.name
raw = load_struct_from_yaml(text=source.read())
elif isinstance(source, basestring):
if os.path.isfile(source):
self.filepath = source
raw = load_struct_from_yaml(filepath=self.filepath)
else: # assume YAML text
raw = load_struct_from_yaml(text=source)
else:
raise ConfigError('Unknown agenda source: {}'.format(source))
return raw
def _collect_existing_ids(self, ds, pool):
# Collection needs to take place first so that auto IDs can be
# correctly assigned, e.g. if someone explicitly specified an ID
# of '1' for one of the workloads.
for d in ds:
if isinstance(d, dict) and 'id' in d:
did = str(d['id'])
if did in self._seen_ids[pool]:
raise ConfigError('Duplicate {} ID: {}'.format(pool, did))
self._seen_ids[pool].add(did)
def _assign_id_if_needed(self, d, pool):
# Also enforces string IDs
if d.get('id') is None:
did = str(counter(pool))
while did in self._seen_ids[pool]:
did = str(counter(pool))
d['id'] = did
self._seen_ids[pool].add(did)
else:
d['id'] = str(d['id'])
# Modifying the yaml parser to use an OrderedDict, rather then regular Python
# dict for mappings. This preservers the order in which the items are
# specified. See
# http://stackoverflow.com/a/21048064
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_mapping(_mapping_tag, data.iteritems())
def dict_constructor(loader, node):
pairs = loader.construct_pairs(node)
seen_keys = set()
for k, _ in pairs:
if k in seen_keys:
raise ValueError('Duplicate entry: {}'.format(k))
seen_keys.add(k)
return OrderedDict(pairs)
yaml.add_representer(OrderedDict, dict_representer)
yaml.add_constructor(_mapping_tag, dict_constructor)
```
#### File: wlauto/core/extension_loader.py
```python
import os
import sys
import inspect
import imp
import string
import logging
from functools import partial
from collections import OrderedDict
from wlauto.core.bootstrap import settings
from wlauto.core.extension import Extension
from wlauto.exceptions import NotFoundError, LoaderError
from wlauto.utils.misc import walk_modules, load_class, merge_lists, merge_dicts, get_article
from wlauto.utils.types import identifier
MODNAME_TRANS = string.maketrans(':/\\.', '____')
class ExtensionLoaderItem(object):
def __init__(self, ext_tuple):
self.name = ext_tuple.name
self.default_package = ext_tuple.default_package
self.default_path = ext_tuple.default_path
self.cls = load_class(ext_tuple.cls)
class GlobalParameterAlias(object):
"""
Represents a "global alias" for an extension parameter. A global alias
is specified at the top-level of config rather namespaced under an extension
name.
Multiple extensions may have parameters with the same global_alias if they are
part of the same inheritance hierarchy and one parameter is an override of the
other. This class keeps track of all such cases in its extensions dict.
"""
def __init__(self, name):
self.name = name
self.extensions = {}
def iteritems(self):
for ext in self.extensions.itervalues():
yield (self.get_param(ext), ext)
def get_param(self, ext):
for param in ext.parameters:
if param.global_alias == self.name:
return param
message = 'Extension {} does not have a parameter with global alias {}'
raise ValueError(message.format(ext.name, self.name))
def update(self, other_ext):
self._validate_ext(other_ext)
self.extensions[other_ext.name] = other_ext
def _validate_ext(self, other_ext):
other_param = self.get_param(other_ext)
for param, ext in self.iteritems():
if ((not (issubclass(ext, other_ext) or issubclass(other_ext, ext))) and
other_param.kind != param.kind):
message = 'Duplicate global alias {} declared in {} and {} extensions with different types'
raise LoaderError(message.format(self.name, ext.name, other_ext.name))
if param.kind != other_param.kind:
message = 'Two params {} in {} and {} in {} both declare global alias {}, and are of different kinds'
raise LoaderError(message.format(param.name, ext.name,
other_param.name, other_ext.name, self.name))
def __str__(self):
text = 'GlobalAlias({} => {})'
extlist = ', '.join(['{}.{}'.format(e.name, p.name) for p, e in self.iteritems()])
return text.format(self.name, extlist)
class ExtensionLoader(object):
"""
Discovers, enumerates and loads available devices, configs, etc.
The loader will attempt to discover things on construction by looking
in predetermined set of locations defined by default_paths. Optionally,
additional locations may specified through paths parameter that must
be a list of additional Python module paths (i.e. dot-delimited).
"""
_instance = None
# Singleton
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(ExtensionLoader, cls).__new__(cls, *args, **kwargs)
else:
for k, v in kwargs.iteritems():
if not hasattr(cls._instance, k):
raise ValueError('Invalid parameter for ExtensionLoader: {}'.format(k))
setattr(cls._instance, k, v)
return cls._instance
def set_load_defaults(self, value):
self._load_defaults = value
if value:
self.packages = merge_lists(self.default_packages, self.packages, duplicates='last')
def get_load_defaults(self):
return self._load_defaults
load_defaults = property(get_load_defaults, set_load_defaults)
def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False, load_defaults=True):
"""
params::
:packages: List of packages to load extensions from.
:paths: List of paths to be searched for Python modules containing
WA extensions.
:ignore_paths: List of paths to ignore when search for WA extensions (these would
typically be subdirectories of one or more locations listed in
``paths`` parameter.
:keep_going: Specifies whether to keep going if an error occurs while loading
extensions.
:load_defaults: Specifies whether extension should be loaded from default locations
(WA package, and user's WA directory) as well as the packages/paths
specified explicitly in ``packages`` and ``paths`` parameters.
"""
self._load_defaults = None
self.logger = logging.getLogger('ExtensionLoader')
self.keep_going = keep_going
self.extension_kinds = {ext_tuple.name: ExtensionLoaderItem(ext_tuple)
for ext_tuple in settings.extensions}
self.default_packages = [ext.default_package for ext in self.extension_kinds.values()]
self.packages = packages or []
self.load_defaults = load_defaults
self.paths = paths or []
self.ignore_paths = ignore_paths or []
self.extensions = {}
self.aliases = {}
self.global_param_aliases = {}
# create an empty dict for each extension type to store discovered
# extensions.
for ext in self.extension_kinds.values():
setattr(self, '_' + ext.name, {})
self._load_from_packages(self.packages)
self._load_from_paths(self.paths, self.ignore_paths)
def update(self, packages=None, paths=None, ignore_paths=None):
""" Load extensions from the specified paths/packages
without clearing or reloading existing extension. """
if packages:
self.packages.extend(packages)
self._load_from_packages(packages)
if paths:
self.paths.extend(paths)
self.ignore_paths.extend(ignore_paths or [])
self._load_from_paths(paths, ignore_paths or [])
def clear(self):
""" Clear all discovered items. """
self.extensions.clear()
for ext in self.extension_kinds.values():
self._get_store(ext).clear()
def reload(self):
""" Clear all discovered items and re-run the discovery. """
self.clear()
self._load_from_packages(self.packages)
self._load_from_paths(self.paths, self.ignore_paths)
def get_extension_class(self, name, kind=None):
"""
Return the class for the specified extension if found or raises ``ValueError``.
"""
name, _ = self.resolve_alias(name)
if kind is None:
return self.extensions[name]
ext = self.extension_kinds.get(kind)
if ext is None:
raise ValueError('Unknown extension type: {}'.format(kind))
store = self._get_store(ext)
if name not in store:
raise NotFoundError('Extensions {} is not {} {}.'.format(name, get_article(kind), kind))
return store[name]
def get_extension(self, name, *args, **kwargs):
"""
Return extension of the specified kind with the specified name. Any additional
parameters will be passed to the extension's __init__.
"""
name, base_kwargs = self.resolve_alias(name)
kind = kwargs.pop('kind', None)
kwargs = merge_dicts(base_kwargs, kwargs, list_duplicates='last', dict_type=OrderedDict)
cls = self.get_extension_class(name, kind)
extension = _instantiate(cls, args, kwargs)
extension.load_modules(self)
return extension
def get_default_config(self, ext_name):
"""
Returns the default configuration for the specified extension name. The name may be an alias,
in which case, the returned config will be augmented with appropriate alias overrides.
"""
real_name, alias_config = self.resolve_alias(ext_name)
base_default_config = self.get_extension_class(real_name).get_default_config()
return merge_dicts(base_default_config, alias_config, list_duplicates='last', dict_type=OrderedDict)
def list_extensions(self, kind=None):
"""
List discovered extension classes. Optionally, only list extensions of a
particular type.
"""
if kind is None:
return self.extensions.values()
if kind not in self.extension_kinds:
raise ValueError('Unknown extension type: {}'.format(kind))
return self._get_store(self.extension_kinds[kind]).values()
def has_extension(self, name, kind=None):
"""
Returns ``True`` if an extensions with the specified ``name`` has been
discovered by the loader. If ``kind`` was specified, only returns ``True``
if the extension has been found, *and* it is of the specified kind.
"""
try:
self.get_extension_class(name, kind)
return True
except NotFoundError:
return False
def resolve_alias(self, alias_name):
"""
Try to resolve the specified name as an extension alias. Returns a
two-tuple, the first value of which is actual extension name, and the
second is a dict of parameter values for this alias. If the name passed
is already an extension name, then the result is ``(alias_name, {})``.
"""
alias_name = identifier(alias_name.lower())
if alias_name in self.extensions:
return (alias_name, {})
if alias_name in self.aliases:
alias = self.aliases[alias_name]
return (alias.extension_name, alias.params)
raise NotFoundError('Could not find extension or alias "{}"'.format(alias_name))
# Internal methods.
def __getattr__(self, name):
"""
This resolves methods for specific extensions types based on corresponding
generic extension methods. So it's possible to say things like ::
loader.get_device('foo')
instead of ::
loader.get_extension('foo', kind='device')
"""
if name.startswith('get_'):
name = name.replace('get_', '', 1)
if name in self.extension_kinds:
return partial(self.get_extension, kind=name)
if name.startswith('list_'):
name = name.replace('list_', '', 1).rstrip('s')
if name in self.extension_kinds:
return partial(self.list_extensions, kind=name)
if name.startswith('has_'):
name = name.replace('has_', '', 1)
if name in self.extension_kinds:
return partial(self.has_extension, kind=name)
raise AttributeError(name)
def _get_store(self, ext):
name = getattr(ext, 'name', ext)
return getattr(self, '_' + name)
def _load_from_packages(self, packages):
try:
for package in packages:
for module in walk_modules(package):
self._load_module(module)
except ImportError as e:
message = 'Problem loading extensions from package {}: {}'
raise LoaderError(message.format(package, e.message))
def _load_from_paths(self, paths, ignore_paths):
self.logger.debug('Loading from paths.')
for path in paths:
self.logger.debug('Checking path %s', path)
for root, _, files in os.walk(path, followlinks=True):
should_skip = False
for igpath in ignore_paths:
if root.startswith(igpath):
should_skip = True
break
if should_skip:
continue
for fname in files:
if os.path.splitext(fname)[1].lower() != '.py':
continue
filepath = os.path.join(root, fname)
try:
modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS)
module = imp.load_source(modname, filepath)
self._load_module(module)
except (SystemExit, ImportError), e:
if self.keep_going:
self.logger.warn('Failed to load {}'.format(filepath))
self.logger.warn('Got: {}'.format(e))
else:
raise LoaderError('Failed to load {}'.format(filepath), sys.exc_info())
except Exception as e:
message = 'Problem loading extensions from {}: {}'
raise LoaderError(message.format(filepath, e))
def _load_module(self, module): # NOQA pylint: disable=too-many-branches
self.logger.debug('Checking module %s', module.__name__)
for obj in vars(module).itervalues():
if inspect.isclass(obj):
if not issubclass(obj, Extension) or not hasattr(obj, 'name') or not obj.name:
continue
try:
for ext in self.extension_kinds.values():
if issubclass(obj, ext.cls):
self._add_found_extension(obj, ext)
break
else: # did not find a matching Extension type
message = 'Unknown extension type for {} (type: {})'
raise LoaderError(message.format(obj.name, obj.__class__.__name__))
except LoaderError as e:
if self.keep_going:
self.logger.warning(e)
else:
raise e
def _add_found_extension(self, obj, ext):
"""
:obj: Found extension class
:ext: matching extension item.
"""
self.logger.debug('\tAdding %s %s', ext.name, obj.name)
key = identifier(obj.name.lower())
obj.kind = ext.name
if key in self.extensions or key in self.aliases:
raise LoaderError('{} {} already exists.'.format(ext.name, obj.name))
# Extensions are tracked both, in a common extensions
# dict, and in per-extension kind dict (as retrieving
# extensions by kind is a common use case.
self.extensions[key] = obj
store = self._get_store(ext)
store[key] = obj
for alias in obj.aliases:
alias_id = identifier(alias.name)
if alias_id in self.extensions or alias_id in self.aliases:
raise LoaderError('{} {} already exists.'.format(ext.name, obj.name))
self.aliases[alias_id] = alias
# Update global aliases list. If a global alias is already in the list,
# then make sure this extension is in the same parent/child hierarchy
# as the one already found.
for param in obj.parameters:
if param.global_alias:
if param.global_alias not in self.global_param_aliases:
ga = GlobalParameterAlias(param.global_alias)
ga.update(obj)
self.global_param_aliases[ga.name] = ga
else: # global alias already exists.
self.global_param_aliases[param.global_alias].update(obj)
# Utility functions.
def _instantiate(cls, args=None, kwargs=None):
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
try:
return cls(*args, **kwargs)
except Exception:
raise LoaderError('Could not load {}'.format(cls), sys.exc_info())
```
#### File: wlauto/core/instrumentation.py
```python
import logging
import inspect
from collections import OrderedDict
import wlauto.core.signal as signal
from wlauto.core.extension import Extension
from wlauto.exceptions import WAError, DeviceNotRespondingError, TimeoutError
from wlauto.utils.misc import get_traceback, isiterable
from wlauto.utils.types import identifier
logger = logging.getLogger('instrumentation')
# Maps method names onto signals the should be registered to.
# Note: the begin/end signals are paired -- if a begin_ signal is sent,
# then the corresponding end_ signal is guaranteed to also be sent.
# Note: using OrderedDict to preserve logical ordering for the table generated
# in the documentation
SIGNAL_MAP = OrderedDict([
# Below are "aliases" for some of the more common signals to allow
# instrumentation to have similar structure to workloads
('initialize', signal.RUN_INIT),
('setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('start', signal.BEFORE_WORKLOAD_EXECUTION),
('stop', signal.AFTER_WORKLOAD_EXECUTION),
('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('finalize', signal.RUN_FIN),
('on_run_start', signal.RUN_START),
('on_run_end', signal.RUN_END),
('on_workload_spec_start', signal.WORKLOAD_SPEC_START),
('on_workload_spec_end', signal.WORKLOAD_SPEC_END),
('on_iteration_start', signal.ITERATION_START),
('on_iteration_end', signal.ITERATION_END),
('before_initial_boot', signal.BEFORE_INITIAL_BOOT),
('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT),
('after_initial_boot', signal.AFTER_INITIAL_BOOT),
('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT),
('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT),
('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT),
('before_boot', signal.BEFORE_BOOT),
('on_successful_boot', signal.SUCCESSFUL_BOOT),
('after_boot', signal.AFTER_BOOT),
('on_spec_init', signal.SPEC_INIT),
('on_run_init', signal.RUN_INIT),
('on_iteration_init', signal.ITERATION_INIT),
('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP),
('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('after_workload_setup', signal.AFTER_WORKLOAD_SETUP),
('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION),
('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION),
('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION),
('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE),
('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN),
('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN),
('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING),
('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING),
('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING),
('on_error', signal.ERROR_LOGGED),
('on_warning', signal.WARNING_LOGGED),
])
PRIORITY_MAP = OrderedDict([
('very_fast_', 20),
('fast_', 10),
('normal_', 0),
('slow_', -10),
('very_slow_', -20),
])
installed = []
def is_installed(instrument):
if isinstance(instrument, Instrument):
if instrument in installed:
return True
if instrument.name in [i.name for i in installed]:
return True
elif isinstance(instrument, type):
if instrument in [i.__class__ for i in installed]:
return True
else: # assume string
if identifier(instrument) in [identifier(i.name) for i in installed]:
return True
return False
def is_enabled(instrument):
if isinstance(instrument, Instrument) or isinstance(instrument, type):
name = instrument.name
else: # assume string
name = instrument
try:
installed_instrument = get_instrument(name)
return installed_instrument.is_enabled
except ValueError:
return False
failures_detected = False
def reset_failures():
global failures_detected # pylint: disable=W0603
failures_detected = False
def check_failures():
result = failures_detected
reset_failures()
return result
class ManagedCallback(object):
"""
This wraps instruments' callbacks to ensure that errors do interfer
with run execution.
"""
def __init__(self, instrument, callback):
self.instrument = instrument
self.callback = callback
def __call__(self, context):
if self.instrument.is_enabled:
try:
self.callback(context)
except (KeyboardInterrupt, DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception as e: # pylint: disable=W0703
logger.error('Error in instrument {}'.format(self.instrument.name))
global failures_detected # pylint: disable=W0603
failures_detected = True
if isinstance(e, WAError):
logger.error(e)
else:
tb = get_traceback()
logger.error(tb)
logger.error('{}({})'.format(e.__class__.__name__, e))
if not context.current_iteration:
# Error occureed outside of an iteration (most likely
# during intial setup or teardown). Since this would affect
# the rest of the run, mark the instument as broken so that
# it doesn't get re-enabled for subsequent iterations.
self.instrument.is_broken = True
disable(self.instrument)
# Need this to keep track of callbacks, because the dispatcher only keeps
# weak references, so if the callbacks aren't referenced elsewhere, they will
# be deallocated before they've had a chance to be invoked.
_callbacks = []
def install(instrument):
"""
This will look for methods (or any callable members) with specific names
in the instrument and hook them up to the corresponding signals.
:param instrument: Instrument instance to install.
"""
logger.debug('Installing instrument %s.', instrument)
if is_installed(instrument):
raise ValueError('Instrument {} is already installed.'.format(instrument.name))
for attr_name in dir(instrument):
priority = 0
stripped_attr_name = attr_name
for key, value in PRIORITY_MAP.iteritems():
if attr_name.startswith(key):
stripped_attr_name = attr_name[len(key):]
priority = value
break
if stripped_attr_name in SIGNAL_MAP:
attr = getattr(instrument, attr_name)
if not callable(attr):
raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument))
argspec = inspect.getargspec(attr)
arg_num = len(argspec.args)
# Instrument callbacks will be passed exactly two arguments: self
# (the instrument instance to which the callback is bound) and
# context. However, we also allow callbacks to capture the context
# in variable arguments (declared as "*args" in the definition).
if arg_num > 2 or (arg_num < 2 and argspec.varargs is None):
message = '{} must take exactly 2 positional arguments; {} given.'
raise ValueError(message.format(attr_name, arg_num))
logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name])
mc = ManagedCallback(instrument, attr)
_callbacks.append(mc)
signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority)
installed.append(instrument)
def uninstall(instrument):
instrument = get_instrument(instrument)
installed.remove(instrument)
def validate():
for instrument in installed:
instrument.validate()
def get_instrument(inst):
if isinstance(inst, Instrument):
return inst
for installed_inst in installed:
if identifier(installed_inst.name) == identifier(inst):
return installed_inst
raise ValueError('Instrument {} is not installed'.format(inst))
def disable_all():
for instrument in installed:
_disable_instrument(instrument)
def enable_all():
for instrument in installed:
_enable_instrument(instrument)
def enable(to_enable):
if isiterable(to_enable):
for inst in to_enable:
_enable_instrument(inst)
else:
_enable_instrument(to_enable)
def disable(to_disable):
if isiterable(to_disable):
for inst in to_disable:
_disable_instrument(inst)
else:
_disable_instrument(to_disable)
def _enable_instrument(inst):
inst = get_instrument(inst)
if not inst.is_broken:
logger.debug('Enabling instrument {}'.format(inst.name))
inst.is_enabled = True
else:
logger.debug('Not enabling broken instrument {}'.format(inst.name))
def _disable_instrument(inst):
inst = get_instrument(inst)
if inst.is_enabled:
logger.debug('Disabling instrument {}'.format(inst.name))
inst.is_enabled = False
def get_enabled():
return [i for i in installed if i.is_enabled]
def get_disabled():
return [i for i in installed if not i.is_enabled]
class Instrument(Extension):
"""
Base class for instrumentation implementations.
"""
def __init__(self, device, **kwargs):
super(Instrument, self).__init__(**kwargs)
self.device = device
self.is_enabled = True
self.is_broken = False
def initialize(self, context):
pass
def finalize(self, context):
pass
def __str__(self):
return self.name
def __repr__(self):
return 'Instrument({})'.format(self.name)
```
#### File: android/gem5/__init__.py
```python
import logging
import os
import time
from wlauto import AndroidDevice, Parameter
from wlauto.common.gem5.device import BaseGem5Device
from wlauto.exceptions import DeviceError
class Gem5AndroidDevice(BaseGem5Device, AndroidDevice):
"""
Implements gem5 Android device.
This class allows a user to connect WA to a simulation using gem5. The
connection to the device is made using the telnet connection of the
simulator, and is used for all commands. The simulator does not have ADB
support, and therefore we need to fall back to using standard shell
commands.
Files are copied into the simulation using a VirtIO 9P device in gem5. Files
are copied out of the simulated environment using the m5 writefile command
within the simulated system.
When starting the workload run, the simulator is automatically started by
Workload Automation, and a connection to the simulator is established. WA
will then wait for Android to boot on the simulated system (which can take
hours), prior to executing any other commands on the device. It is also
possible to resume from a checkpoint when starting the simulation. To do
this, please append the relevant checkpoint commands from the gem5
simulation script to the gem5_discription argument in the agenda.
Host system requirements:
* VirtIO support. We rely on diod on the host system. This can be
installed on ubuntu using the following command:
sudo apt-get install diod
Guest requirements:
* VirtIO support. We rely on VirtIO to move files into the simulation.
Please make sure that the following are set in the kernel
configuration:
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_9P_FS=y
CONFIG_9P_FS_POSIX_ACL=y
CONFIG_9P_FS_SECURITY=y
CONFIG_VIRTIO_BLK=y
* m5 binary. Please make sure that the m5 binary is on the device and
can by found in the path.
"""
name = 'gem5_android'
platform = 'android'
parameters = [
Parameter('core_names', default=[], override=True),
Parameter('core_clusters', default=[], override=True),
]
# Overwritten from Device. For documentation, see corresponding method in
# Device.
def __init__(self, **kwargs):
self.logger = logging.getLogger('Gem5AndroidDevice')
AndroidDevice.__init__(self, **kwargs)
BaseGem5Device.__init__(self)
def login_to_device(self):
pass
def wait_for_boot(self):
"""
Wait for the system to boot
We monitor the sys.boot_completed and service.bootanim.exit system
properties to determine when the system has finished booting. In the
event that we cannot coerce the result of service.bootanim.exit to an
integer, we assume that the boot animation was disabled and do not wait
for it to finish.
"""
self.logger.info("Waiting for Android to boot...")
while True:
booted = False
anim_finished = True # Assume boot animation was disabled on except
try:
booted = (int('0' + self.gem5_shell('getprop sys.boot_completed', check_exit_code=False).strip()) == 1)
anim_finished = (int(self.gem5_shell('getprop service.bootanim.exit', check_exit_code=False).strip()) == 1)
except ValueError:
pass
if booted and anim_finished:
break
time.sleep(60)
self.logger.info("Android booted")
def install(self, filepath, timeout=3 * 3600): # pylint: disable=W0221
""" Install an APK or a normal executable """
ext = os.path.splitext(filepath)[1].lower()
if ext == '.apk':
return self.install_apk(filepath, timeout)
else:
return self.install_executable(filepath)
def install_apk(self, filepath, timeout=3 * 3600): # pylint: disable=W0221
"""
Install an APK on the gem5 device
The APK is pushed to the device. Then the file and folder permissions
are changed to ensure that the APK can be correctly installed. The APK
is then installed on the device using 'pm'.
"""
self._check_ready()
self.logger.info("Installing {}".format(filepath))
ext = os.path.splitext(filepath)[1].lower()
if ext == '.apk':
filename = os.path.basename(filepath)
on_device_path = os.path.join('/data/local/tmp', filename)
self.push_file(filepath, on_device_path)
# We need to make sure that the folder permissions are set
# correctly, else the APK does not install correctly.
self.gem5_shell('chmod 775 /data/local/tmp')
self.gem5_shell('chmod 774 {}'.format(on_device_path))
self.logger.debug("Actually installing the APK: {}".format(on_device_path))
return self.gem5_shell("pm install {}".format(on_device_path))
else:
raise DeviceError('Can\'t install {}: unsupported format.'.format(filepath))
def install_executable(self, filepath, with_name=None):
""" Install an executable """
executable_name = os.path.basename(filepath)
on_device_file = self.path.join(self.working_directory, executable_name)
on_device_executable = self.path.join(self.binaries_directory, executable_name)
self.push_file(filepath, on_device_file)
if self.busybox:
self.execute('{} cp {} {}'.format(self.busybox, on_device_file, on_device_executable))
else:
self.execute('cat {} > {}'.format(on_device_file, on_device_executable))
self.execute('chmod 0777 {}'.format(on_device_executable))
return on_device_executable
def uninstall(self, package):
self._check_ready()
self.gem5_shell("pm uninstall {}".format(package))
def dump_logcat(self, outfile, filter_spec=None):
""" Extract logcat from simulation """
self.logger.info("Extracting logcat from the simulated system")
filename = outfile.split('/')[-1]
command = 'logcat -d > {}'.format(filename)
self.gem5_shell(command)
self.pull_file("{}".format(filename), outfile)
def clear_logcat(self):
"""Clear (flush) logcat log."""
if self._logcat_poller:
return self._logcat_poller.clear_buffer()
else:
return self.gem5_shell('logcat -c')
def disable_selinux(self):
""" Disable SELinux. Overridden as parent implementation uses ADB """
api_level = int(self.gem5_shell('getprop ro.build.version.sdk').strip())
# SELinux was added in Android 4.3 (API level 18). Trying to
# 'getenforce' in earlier versions will produce an error.
if api_level >= 18:
se_status = self.execute('getenforce', as_root=True).strip()
if se_status == 'Enforcing':
self.execute('setenforce 0', as_root=True)
def get_properties(self, context): # pylint: disable=R0801
""" Get the property files from the device """
BaseGem5Device.get_properties(self, context)
props = self._get_android_properties(context)
return props
def capture_screen(self, filepath):
if BaseGem5Device.capture_screen(self, filepath):
return
# If we didn't manage to do the above, call the parent class.
self.logger.warning("capture_screen: falling back to parent class implementation")
AndroidDevice.capture_screen(self, filepath)
def initialize(self, context):
self.resize_shell()
self.deploy_m5(context, force=False)
```
#### File: android/nexus10/__init__.py
```python
import time
from wlauto import AndroidDevice, Parameter
class Nexus10Device(AndroidDevice):
name = 'Nexus10'
description = """
Nexus10 is a 10 inch tablet device, which has dual-core A15.
To be able to use Nexus10 in WA, the following must be true:
- USB Debugging Mode is enabled.
- Generate USB debugging authorisation for the host machine
"""
default_working_directory = '/sdcard/working'
has_gpu = True
max_cores = 2
parameters = [
Parameter('core_names', default=['A15', 'A15'], override=True),
Parameter('core_clusters', default=[0, 0], override=True),
]
def initialize(self, context):
time.sleep(self.long_delay)
self.execute('svc power stayon true', check_exit_code=False)
time.sleep(self.long_delay)
self.execute('input keyevent 82')
```
#### File: external/louie/sender.py
```python
class _SENDER(type):
"""Base metaclass for sender classes."""
def __str__(cls):
return '<Sender: %s>' % (cls.__name__, )
class Any(object):
"""Used to represent either 'any sender'.
The Any class can be used with connect, disconnect, send, or
sendExact to denote that the sender paramater should react to any
sender, not just a particular sender.
"""
__metaclass__ = _SENDER
class Anonymous(object):
"""Singleton used to signal 'anonymous sender'.
The Anonymous class is used to signal that the sender of a message
is not specified (as distinct from being 'any sender').
Registering callbacks for Anonymous will only receive messages
sent without senders. Sending with anonymous will only send
messages to those receivers registered for Any or Anonymous.
Note: The default sender for connect is Any, while the default
sender for send is Anonymous. This has the effect that if you do
not specify any senders in either function then all messages are
routed as though there was a single sender (Anonymous) being used
everywhere.
"""
__metaclass__ = _SENDER
```
#### File: louie/test/test_dispatcher.py
```python
import unittest
import louie
from louie import dispatcher
def x(a):
return a
class Dummy(object):
pass
class Callable(object):
def __call__(self, a):
return a
def a(self, a):
return a
class TestDispatcher(unittest.TestCase):
def setUp(self):
louie.reset()
def _isclean(self):
"""Assert that everything has been cleaned up automatically"""
assert len(dispatcher.senders_back) == 0, dispatcher.senders_back
assert len(dispatcher.connections) == 0, dispatcher.connections
assert len(dispatcher.senders) == 0, dispatcher.senders
def test_Exact(self):
a = Dummy()
signal = 'this'
louie.connect(x, signal, a)
expected = [(x, a)]
result = louie.send('this', a, a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, signal, a)
assert len(list(louie.get_all_receivers(a, signal))) == 0
self._isclean()
def test_AnonymousSend(self):
a = Dummy()
signal = 'this'
louie.connect(x, signal)
expected = [(x, a)]
result = louie.send(signal, None, a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, signal)
assert len(list(louie.get_all_receivers(None, signal))) == 0
self._isclean()
def test_AnyRegistration(self):
a = Dummy()
signal = 'this'
louie.connect(x, signal, louie.Any)
expected = [(x, a)]
result = louie.send('this', object(), a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, signal, louie.Any)
expected = []
result = louie.send('this', object(), a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
assert len(list(louie.get_all_receivers(louie.Any, signal))) == 0
self._isclean()
def test_AllRegistration(self):
a = Dummy()
signal = 'this'
louie.connect(x, louie.All, a)
expected = [(x, a)]
result = louie.send('this', a, a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, louie.All, a)
assert len(list(louie.get_all_receivers(a, louie.All))) == 0
self._isclean()
def test_GarbageCollected(self):
a = Callable()
b = Dummy()
signal = 'this'
louie.connect(a.a, signal, b)
expected = []
del a
result = louie.send('this', b, a=b)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
assert len(list(louie.get_all_receivers(b, signal))) == 0, (
"Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
self._isclean()
def test_GarbageCollectedObj(self):
class x:
def __call__(self, a):
return a
a = Callable()
b = Dummy()
signal = 'this'
louie.connect(a, signal, b)
expected = []
del a
result = louie.send('this', b, a=b)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
assert len(list(louie.get_all_receivers(b, signal))) == 0, (
"Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
self._isclean()
def test_MultipleRegistration(self):
a = Callable()
b = Dummy()
signal = 'this'
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
result = louie.send('this', b, a=b)
assert len(result) == 1, result
assert len(list(louie.get_all_receivers(b, signal))) == 1, (
"Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
del a
del b
del result
self._isclean()
def test_robust(self):
"""Test the sendRobust function."""
def fails():
raise ValueError('this')
a = object()
signal = 'this'
louie.connect(fails, louie.All, a)
result = louie.send_robust('this', a, a=a)
err = result[0][1]
assert isinstance(err, ValueError)
assert err.args == ('this', )
```
#### File: instrumentation/acmecape/__init__.py
```python
from __future__ import division
import csv
import os
import signal
import time
from fcntl import fcntl, F_GETFL, F_SETFL
from string import Template
from subprocess import Popen, PIPE, STDOUT
from wlauto import Instrument, Parameter
from wlauto.exceptions import HostError
from wlauto.utils.misc import which
IIOCAP_CMD_TEMPLATE = Template("""
${iio_capture} -n ${host} -b ${buffer_size} -c -f ${outfile} ${iio_device}
""")
def _read_nonblock(pipe, size=1024):
fd = pipe.fileno()
flags = fcntl(fd, F_GETFL)
flags |= os.O_NONBLOCK
fcntl(fd, F_SETFL, flags)
output = ''
try:
while True:
output += pipe.read(size)
except IOError:
pass
return output
class AcmeCapeInstrument(Instrument):
name = 'acmecape'
description = """
Instrumetnation for the BayLibre ACME cape for power/energy measurment.
"""
parameters = [
Parameter('iio-capture', default=which('iio-capture'),
description="""
Path to the iio-capture binary will be taken from the
environment, if not specfied.
"""),
Parameter('host', default='baylibre-acme.local',
description="""
Host name (or IP address) of the ACME cape board.
"""),
Parameter('iio-device', default='iio:device0',
description="""
"""),
Parameter('buffer-size', kind=int, default=256,
description="""
Size of the capture buffer (in KB).
"""),
]
def initialize(self, context):
if self.iio_capture is None:
raise HostError('Missing iio-capture binary')
self.command = None
self.subprocess = None
def setup(self, context):
self.outfile = os.path.join(context.output_directory, 'acme-capture.csv')
params = dict(
iio_capture=self.iio_capture,
host=self.host,
buffer_size=self.buffer_size,
iio_device=self.iio_device,
outfile=self.outfile,
)
self.command = IIOCAP_CMD_TEMPLATE.substitute(**params)
self.logger.debug('ACME cape command: {}'.format(self.command))
def very_fast_start(self, context): # pylint: disable=unused-argument
self.subprocess = Popen(self.command.split(), stdout=PIPE, stderr=STDOUT)
def very_fast_stop(self, context): # pylint: disable=unused-argument
self.subprocess.terminate()
def update_result(self, context):
timeout_secs = 10
for _ in xrange(timeout_secs):
if self.subprocess.poll() is not None:
break
time.sleep(1)
else:
output = _read_nonblock(self.subprocess.stdout)
self.subprocess.kill()
self.logger.error('iio-capture did not terminate gracefully')
if self.subprocess.poll() is None:
msg = 'Could not terminate iio-capture:\n{}'
raise HostError(msg.format(output))
if not os.path.isfile(self.outfile):
raise HostError('Output CSV not generated.')
context.add_iteration_artifact('iio-capture', self.outfile, 'data')
if os.stat(self.outfile).st_size == 0:
self.logger.warning('"{}" appears to be empty'.format(self.outfile))
return
self._compute_stats(context)
def _compute_stats(self, context):
with open(self.outfile, 'rb') as fh:
reader = csv.reader(fh, skipinitialspace=True)
header = reader.next()
power_index = header.index('power mW')
ts_index = header.index('timestamp ms')
last_ts = 0.0
energy_uj = 0
ave_power_mw = 0.0
for i, row in enumerate(reader):
row_power_mw = float(row[power_index])
row_ts = float(row[ts_index])
if i == 0:
ave_power_mw = row_power_mw
else:
ave_power_mw = ave_power_mw + (row_power_mw - ave_power_mw) / i
energy_uj += row_power_mw * (row_ts - last_ts)
last_ts = row_ts
context.add_metric('power', ave_power_mw, 'milliwatts')
context.add_metric('energy', energy_uj / 1000000, 'joules')
```
#### File: instrumentation/misc/__init__.py
```python
import os
import re
import logging
import time
import tarfile
from itertools import izip, izip_longest
from subprocess import CalledProcessError
from wlauto import Instrument, Parameter
from wlauto.core import signal
from wlauto.exceptions import DeviceError, ConfigError
from wlauto.utils.misc import diff_tokens, write_table, check_output, as_relative
from wlauto.utils.misc import ensure_file_directory_exists as _f
from wlauto.utils.misc import ensure_directory_exists as _d
from wlauto.utils.android import ApkInfo
from wlauto.utils.types import list_of_strings
logger = logging.getLogger(__name__)
class FsExtractor(Instrument):
mount_command = 'mount -t tmpfs -o size={} tmpfs {}'
extract_timeout = 30
tarname = 'sysfs.tar'
DEVICE_PATH = 0
BEFORE_PATH = 1
AFTER_PATH = 2
DIFF_PATH = 3
parameters = [
Parameter('paths', kind=list_of_strings, mandatory=True,
description="""A list of paths to be pulled from the device. These could be directories
as well as files.""",
global_alias='sysfs_extract_dirs'),
Parameter('use_tmpfs', kind=bool, default=None,
description="""
Specifies whether tmpfs should be used to cache sysfile trees and then pull them down
as a tarball. This is significantly faster then just copying the directory trees from
the device directly, bur requres root and may not work on all devices. Defaults to
``True`` if the device is rooted and ``False`` if it is not.
"""),
Parameter('tmpfs_mount_point', default=None,
description="""Mount point for tmpfs partition used to store snapshots of paths."""),
Parameter('tmpfs_size', default='32m',
description="""Size of the tempfs partition."""),
]
def initialize_tmpfs(self, context):
if not self.device.is_rooted and self.use_tmpfs: # pylint: disable=access-member-before-definition
raise ConfigError('use_tempfs must be False for an unrooted device.')
elif self.use_tmpfs is None: # pylint: disable=access-member-before-definition
self.use_tmpfs = self.device.is_rooted
if self.use_tmpfs:
self.on_device_before = self.device.path.join(self.tmpfs_mount_point, 'before')
self.on_device_after = self.device.path.join(self.tmpfs_mount_point, 'after')
if not self.device.file_exists(self.tmpfs_mount_point):
self.device.execute('mkdir -p {}'.format(self.tmpfs_mount_point), as_root=True)
self.device.execute(self.mount_command.format(self.tmpfs_size, self.tmpfs_mount_point),
as_root=True)
def setup(self, context):
before_dirs = [
_d(os.path.join(context.output_directory, 'before', self._local_dir(d)))
for d in self.paths
]
after_dirs = [
_d(os.path.join(context.output_directory, 'after', self._local_dir(d)))
for d in self.paths
]
diff_dirs = [
_d(os.path.join(context.output_directory, 'diff', self._local_dir(d)))
for d in self.paths
]
self.device_and_host_paths = zip(self.paths, before_dirs, after_dirs, diff_dirs)
if self.use_tmpfs:
for d in self.paths:
before_dir = self.device.path.join(self.on_device_before,
self.device.path.dirname(as_relative(d)))
after_dir = self.device.path.join(self.on_device_after,
self.device.path.dirname(as_relative(d)))
if self.device.file_exists(before_dir):
self.device.execute('rm -rf {}'.format(before_dir), as_root=True)
self.device.execute('mkdir -p {}'.format(before_dir), as_root=True)
if self.device.file_exists(after_dir):
self.device.execute('rm -rf {}'.format(after_dir), as_root=True)
self.device.execute('mkdir -p {}'.format(after_dir), as_root=True)
def slow_start(self, context):
if self.use_tmpfs:
for d in self.paths:
dest_dir = self.device.path.join(self.on_device_before, as_relative(d))
if '*' in dest_dir:
dest_dir = self.device.path.dirname(dest_dir)
self.device.execute('{} cp -Hr {} {}'.format(self.device.busybox, d, dest_dir),
as_root=True, check_exit_code=False)
else: # not rooted
for dev_dir, before_dir, _, _ in self.device_and_host_paths:
self.device.pull_file(dev_dir, before_dir)
def slow_stop(self, context):
if self.use_tmpfs:
for d in self.paths:
dest_dir = self.device.path.join(self.on_device_after, as_relative(d))
if '*' in dest_dir:
dest_dir = self.device.path.dirname(dest_dir)
self.device.execute('{} cp -Hr {} {}'.format(self.device.busybox, d, dest_dir),
as_root=True, check_exit_code=False)
else: # not using tmpfs
for dev_dir, _, after_dir, _ in self.device_and_host_paths:
self.device.pull_file(dev_dir, after_dir)
def update_result(self, context):
if self.use_tmpfs:
on_device_tarball = self.device.path.join(self.device.working_directory, self.tarname)
on_host_tarball = self.device.path.join(context.output_directory, self.tarname + ".gz")
self.device.execute('{} tar cf {} -C {} .'.format(self.device.busybox,
on_device_tarball,
self.tmpfs_mount_point),
as_root=True)
self.device.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True)
self.device.execute('{} gzip -f {}'.format(self.device.busybox,
on_device_tarball))
self.device.pull_file(on_device_tarball + ".gz", on_host_tarball)
with tarfile.open(on_host_tarball, 'r:gz') as tf:
tf.extractall(context.output_directory)
self.device.delete_file(on_device_tarball + ".gz")
os.remove(on_host_tarball)
for paths in self.device_and_host_paths:
after_dir = paths[self.AFTER_PATH]
dev_dir = paths[self.DEVICE_PATH].strip('*') # remove potential trailing '*'
if (not os.listdir(after_dir) and
self.device.file_exists(dev_dir) and
self.device.listdir(dev_dir)):
self.logger.error('sysfs files were not pulled from the device.')
self.device_and_host_paths.remove(paths) # Path is removed to skip diffing it
for _, before_dir, after_dir, diff_dir in self.device_and_host_paths:
_diff_sysfs_dirs(before_dir, after_dir, diff_dir)
def teardown(self, context):
self._one_time_setup_done = []
def finalize(self, context):
if self.use_tmpfs:
try:
self.device.execute('umount {}'.format(self.tmpfs_mount_point), as_root=True)
except (DeviceError, CalledProcessError):
# assume a directory but not mount point
pass
self.device.execute('rm -rf {}'.format(self.tmpfs_mount_point),
as_root=True, check_exit_code=False)
def validate(self):
if not self.tmpfs_mount_point: # pylint: disable=access-member-before-definition
self.tmpfs_mount_point = self.device.path.join(self.device.working_directory, 'temp-fs')
def _local_dir(self, directory):
return os.path.dirname(as_relative(directory).replace(self.device.path.sep, os.sep))
class SysfsExtractor(FsExtractor):
name = 'sysfs_extractor'
description = """
Collects the contest of a set of directories, before and after workload execution
and diffs the result.
"""
def initialize(self, context):
self.initialize_tmpfs(context)
class ExecutionTimeInstrument(Instrument):
name = 'execution_time'
description = """
Measure how long it took to execute the run() methods of a Workload.
"""
priority = 15
def __init__(self, device, **kwargs):
super(ExecutionTimeInstrument, self).__init__(device, **kwargs)
self.start_time = None
self.end_time = None
def on_run_start(self, context):
signal.connect(self.get_start_time, signal.BEFORE_WORKLOAD_EXECUTION, priority=self.priority)
signal.connect(self.get_stop_time, signal.AFTER_WORKLOAD_EXECUTION, priority=self.priority)
def get_start_time(self, context):
self.start_time = time.time()
def get_stop_time(self, context):
self.end_time = time.time()
def update_result(self, context):
execution_time = self.end_time - self.start_time
context.result.add_metric('execution_time', execution_time, 'seconds')
class InterruptStatsInstrument(Instrument):
name = 'interrupts'
description = """
Pulls the ``/proc/interrupts`` file before and after workload execution and diffs them
to show what interrupts occurred during that time.
"""
def __init__(self, device, **kwargs):
super(InterruptStatsInstrument, self).__init__(device, **kwargs)
self.before_file = None
self.after_file = None
self.diff_file = None
def setup(self, context):
self.before_file = os.path.join(context.output_directory, 'before', 'proc', 'interrupts')
self.after_file = os.path.join(context.output_directory, 'after', 'proc', 'interrupts')
self.diff_file = os.path.join(context.output_directory, 'diff', 'proc', 'interrupts')
def start(self, context):
with open(_f(self.before_file), 'w') as wfh:
wfh.write(self.device.execute('cat /proc/interrupts'))
def stop(self, context):
with open(_f(self.after_file), 'w') as wfh:
wfh.write(self.device.execute('cat /proc/interrupts'))
def update_result(self, context):
# If workload execution failed, the after_file may not have been created.
if os.path.isfile(self.after_file):
_diff_interrupt_files(self.before_file, self.after_file, _f(self.diff_file))
class DynamicFrequencyInstrument(FsExtractor):
name = 'cpufreq'
description = """
Collects dynamic frequency (DVFS) settings before and after workload execution.
"""
tarname = 'cpufreq.tar'
parameters = [
Parameter('paths', mandatory=False, override=True),
]
def initialize(self, context):
self.initialize_tmpfs(context)
def setup(self, context):
self.paths = ['/sys/devices/system/cpu']
if self.use_tmpfs:
self.paths.append('/sys/class/devfreq/*') # the '*' would cause problems for adb pull.
super(DynamicFrequencyInstrument, self).setup(context)
def validate(self):
# temp-fs would have been set in super's validate, if not explicitly specified.
if not self.tmpfs_mount_point.endswith('-cpufreq'): # pylint: disable=access-member-before-definition
self.tmpfs_mount_point += '-cpufreq'
def _diff_interrupt_files(before, after, result): # pylint: disable=R0914
output_lines = []
with open(before) as bfh:
with open(after) as ofh:
for bline, aline in izip(bfh, ofh):
bchunks = bline.strip().split()
while True:
achunks = aline.strip().split()
if achunks[0] == bchunks[0]:
diffchunks = ['']
diffchunks.append(achunks[0])
diffchunks.extend([diff_tokens(b, a) for b, a
in zip(bchunks[1:], achunks[1:])])
output_lines.append(diffchunks)
break
else: # new category appeared in the after file
diffchunks = ['>'] + achunks
output_lines.append(diffchunks)
try:
aline = ofh.next()
except StopIteration:
break
# Offset heading columns by one to allow for row labels on subsequent
# lines.
output_lines[0].insert(0, '')
# Any "columns" that do not have headings in the first row are not actually
# columns -- they are a single column where space-spearated words got
# split. Merge them back together to prevent them from being
# column-aligned by write_table.
table_rows = [output_lines[0]]
num_cols = len(output_lines[0])
for row in output_lines[1:]:
table_row = row[:num_cols]
table_row.append(' '.join(row[num_cols:]))
table_rows.append(table_row)
with open(result, 'w') as wfh:
write_table(table_rows, wfh)
def _diff_sysfs_dirs(before, after, result): # pylint: disable=R0914
before_files = []
os.path.walk(before,
lambda arg, dirname, names: arg.extend([os.path.join(dirname, f) for f in names]),
before_files
)
before_files = filter(os.path.isfile, before_files)
files = [os.path.relpath(f, before) for f in before_files]
after_files = [os.path.join(after, f) for f in files]
diff_files = [os.path.join(result, f) for f in files]
for bfile, afile, dfile in zip(before_files, after_files, diff_files):
if not os.path.isfile(afile):
logger.debug('sysfs_diff: {} does not exist or is not a file'.format(afile))
continue
with open(bfile) as bfh, open(afile) as afh: # pylint: disable=C0321
with open(_f(dfile), 'w') as dfh:
for i, (bline, aline) in enumerate(izip_longest(bfh, afh), 1):
if aline is None:
logger.debug('Lines missing from {}'.format(afile))
break
bchunks = re.split(r'(\W+)', bline)
achunks = re.split(r'(\W+)', aline)
if len(bchunks) != len(achunks):
logger.debug('Token length mismatch in {} on line {}'.format(bfile, i))
dfh.write('xxx ' + bline)
continue
if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2) and
(bchunks[0] == achunks[0])):
# if there are only two columns and the first column is the
# same, assume it's a "header" column and do not diff it.
dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])]
else:
dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)]
dfh.write(''.join(dchunks))
```
#### File: wlauto/modules/reset.py
```python
import time
from wlauto import Module, Parameter
from wlauto.exceptions import DeviceError
from wlauto.utils.netio import KshellConnection
class NetioSwitchReset(Module):
#pylint: disable=E1101
name = 'netio_switch'
description = """
Enables hard reset of devices connected to a Netio ethernet power switch
"""
capabilities = ['reset_power']
parameters = [
Parameter('host', default='ippowerbar',
description='IP address or DNS name of the Netio power switch.'),
Parameter('port', kind=int, default=1234,
description='Port on which KSHELL is listening.'),
Parameter('username', default='admin',
description='User name for the administrator on the Netio.'),
Parameter('password', default='<PASSWORD>',
description='User name for the administrator on the Netio.'),
Parameter('psu', kind=int, default=1,
description='The device port number on the Netio, i.e. which '
'PSU port the device is connected to.'),
]
def hard_reset(self):
try:
conn = KshellConnection(host=self.host, port=self.port)
conn.login(self.username, self.password)
conn.disable_port(self.psu)
time.sleep(2)
conn.enable_port(self.psu)
conn.close()
except Exception as e:
raise DeviceError('Could not reset power: {}'.format(e))
```
#### File: wlauto/result_processors/mongodb.py
```python
import os
import re
import string
import tarfile
try:
import pymongo
from bson.objectid import ObjectId
from gridfs import GridFS
except ImportError:
pymongo = None
from wlauto import ResultProcessor, Parameter, Artifact
from wlauto.exceptions import ResultProcessorError
from wlauto.utils.misc import as_relative
__bad_chars = '$.'
KEY_TRANS_TABLE = string.maketrans(__bad_chars, '_' * len(__bad_chars))
BUNDLE_NAME = 'files.tar.gz'
class MongodbUploader(ResultProcessor):
name = 'mongodb'
description = """
Uploads run results to a MongoDB instance.
MongoDB is a popular document-based data store (NoSQL database).
"""
parameters = [
Parameter('uri', kind=str, default=None,
description="""Connection URI. If specified, this will be used for connecting
to the backend, and host/port parameters will be ignored."""),
Parameter('host', kind=str, default='localhost', mandatory=True,
description='IP address/name of the machinge hosting the MongoDB server.'),
Parameter('port', kind=int, default=27017, mandatory=True,
description='Port on which the MongoDB server is listening.'),
Parameter('db', kind=str, default='wa', mandatory=True,
description='Database on the server used to store WA results.'),
Parameter('extra_params', kind=dict, default={},
description='''Additional connection parameters may be specfied using this (see
pymongo documentation.'''),
Parameter('authentication', kind=dict, default={},
description='''If specified, this will be passed to db.authenticate() upon connection;
please pymongo documentaion authentication examples for detail.'''),
]
def initialize(self, context):
if pymongo is None:
raise ResultProcessorError('mongodb result processor requres pymongo package to be installed.')
try:
self.client = pymongo.MongoClient(self.host, self.port, **self.extra_params)
except pymongo.errors.PyMongoError, e:
raise ResultProcessorError('Error connecting to mongod: {}'.fromat(e))
self.dbc = self.client[self.db]
self.fs = GridFS(self.dbc)
if self.authentication:
if not self.dbc.authenticate(**self.authentication):
raise ResultProcessorError('Authentication to database {} failed.'.format(self.db))
self.run_result_dbid = ObjectId()
run_doc = context.run_info.to_dict()
wa_adapter = run_doc['device']
devprops = dict((k.translate(KEY_TRANS_TABLE), v)
for k, v in run_doc['device_properties'].iteritems())
run_doc['device'] = devprops
run_doc['device']['wa_adapter'] = wa_adapter
del run_doc['device_properties']
run_doc['output_directory'] = os.path.abspath(context.output_directory)
run_doc['artifacts'] = []
run_doc['workloads'] = context.config.to_dict()['workload_specs']
for workload in run_doc['workloads']:
workload['name'] = workload['workload_name']
del workload['workload_name']
workload['results'] = []
self.run_dbid = self.dbc.runs.insert(run_doc)
prefix = context.run_info.project if context.run_info.project else '[NOPROJECT]'
run_part = context.run_info.run_name or context.run_info.uuid.hex
self.gridfs_dir = os.path.join(prefix, run_part)
i = 0
while self.gridfs_directory_exists(self.gridfs_dir):
if self.gridfs_dir.endswith('-{}'.format(i)):
self.gridfs_dir = self.gridfs_dir[:-2]
i += 1
self.gridfs_dir += '-{}'.format(i)
# Keep track of all generated artefacts, so that we know what to
# include in the tarball. The tarball will contains raw artificats
# (other kinds would have been uploaded directly or do not contain
# new data) and all files in the results dir that have not been marked
# as artificats.
self.artifacts = []
def export_iteration_result(self, result, context):
r = {}
r['iteration'] = context.current_iteration
r['status'] = result.status
r['events'] = [e.to_dict() for e in result.events]
r['metrics'] = []
for m in result.metrics:
md = m.to_dict()
md['is_summary'] = m.name in context.workload.summary_metrics
r['metrics'].append(md)
iteration_artefacts = [self.upload_artifact(context, a) for a in context.iteration_artifacts]
r['artifacts'] = [e for e in iteration_artefacts if e is not None]
self.dbc.runs.update({'_id': self.run_dbid, 'workloads.id': context.spec.id},
{'$push': {'workloads.$.results': r}})
def export_run_result(self, result, context):
run_artifacts = [self.upload_artifact(context, a) for a in context.run_artifacts]
self.logger.debug('Generating results bundle...')
bundle = self.generate_bundle(context)
if bundle:
run_artifacts.append(self.upload_artifact(context, bundle))
else:
self.logger.debug('No untracked files found.')
run_stats = {
'status': result.status,
'events': [e.to_dict() for e in result.events],
'end_time': context.run_info.end_time,
'duration': context.run_info.duration.total_seconds(),
'artifacts': [e for e in run_artifacts if e is not None],
}
self.dbc.runs.update({'_id': self.run_dbid}, {'$set': run_stats})
def finalize(self, context):
self.client.close()
def validate(self):
if self.uri:
has_warned = False
if self.host != self.parameters['host'].default:
self.logger.warning('both uri and host specified; host will be ignored')
has_warned = True
if self.port != self.parameters['port'].default:
self.logger.warning('both uri and port specified; port will be ignored')
has_warned = True
if has_warned:
self.logger.warning('To supress this warning, please remove either uri or '
'host/port from your config.')
def upload_artifact(self, context, artifact):
artifact_path = os.path.join(context.output_directory, artifact.path)
self.artifacts.append((artifact_path, artifact))
if not os.path.exists(artifact_path):
self.logger.debug('Artifact {} has not been generated'.format(artifact_path))
return
elif artifact.kind in ['raw', 'export']:
self.logger.debug('Ignoring {} artifact {}'.format(artifact.kind, artifact_path))
return
else:
self.logger.debug('Uploading artifact {}'.format(artifact_path))
entry = artifact.to_dict()
path = entry['path']
del entry['path']
del entry['name']
del entry['level']
del entry['mandatory']
if context.workload is None:
entry['filename'] = os.path.join(self.gridfs_dir, as_relative(path))
else:
entry['filename'] = os.path.join(self.gridfs_dir,
'{}-{}-{}'.format(context.spec.id,
context.spec.label,
context.current_iteration),
as_relative(path))
with open(artifact_path, 'rb') as fh:
fsid = self.fs.put(fh, **entry)
entry['gridfs_id'] = fsid
return entry
def gridfs_directory_exists(self, path):
regex = re.compile('^{}'.format(path))
return self.fs.exists({'filename': regex})
def generate_bundle(self, context): # pylint: disable=R0914
"""
The bundle will contain files generated during the run that have not
already been processed. This includes all files for which there isn't an
explicit artifact as well as "raw" artifacts that aren't uploaded individually.
Basically, this ensures that everything that is not explicilty marked as an
"export" (which means it's guarnteed not to contain information not accessible
from other artifacts/scores) is avialable in the DB. The bundle is compressed,
so it shouldn't take up too much space, however it also means that it's not
easy to query for or get individual file (a trade off between space and convinience).
"""
to_upload = []
artpaths = []
outdir = context.output_directory
for artpath, artifact in self.artifacts:
artpaths.append(os.path.relpath(artpath, outdir))
if artifact.kind == 'raw':
to_upload.append((artpath, os.path.relpath(artpath, outdir)))
for root, _, files in os.walk(outdir):
for f in files:
path = os.path.relpath(os.path.join(root, f), outdir)
if path not in artpaths:
to_upload.append((os.path.join(outdir, path), path))
if not to_upload:
# Nothing unexpected/unprocessed has been generated during the run.
return None
else:
archive_path = os.path.join(outdir, BUNDLE_NAME)
with tarfile.open(archive_path, 'w:gz') as tf:
for fpath, arcpath in to_upload:
tf.add(fpath, arcpath)
return Artifact('mongo_bundle', BUNDLE_NAME, 'data',
description='bundle to be uploaded to mongodb.')
```
#### File: wlauto/result_processors/notify.py
```python
import collections
import sys
try:
import notify2
except ImportError:
notify2 = None
from wlauto import ResultProcessor
from wlauto.core.result import IterationResult
from wlauto.exceptions import ResultProcessorError
class NotifyProcessor(ResultProcessor):
name = 'notify'
description = '''Display a desktop notification when the run finishes
Notifications only work in linux systems. It uses the generic
freedesktop notification specification. For this results processor
to work, you need to have python-notify installed in your system.
'''
def initialize(self, context):
if sys.platform != 'linux2':
raise ResultProcessorError('Notifications are only supported in linux')
if not notify2:
raise ResultProcessorError('notify2 not installed. Please install the notify2 package')
notify2.init("Workload Automation")
def process_run_result(self, result, context):
num_iterations = sum(context.job_iteration_counts.values())
counter = collections.Counter()
for result in result.iteration_results:
counter[result.status] += 1
score_board = []
for status in IterationResult.values:
if status in counter:
score_board.append('{} {}'.format(counter[status], status))
summary = 'Workload Automation run finised'
body = 'Ran a total of {} iterations: '.format(num_iterations)
body += ', '.join(score_board)
notification = notify2.Notification(summary, body)
if not notification.show():
self.logger.warning('Notification failed to show')
```
#### File: wlauto/result_processors/uxperf.py
```python
import os
from distutils.version import LooseVersion
from wlauto import ResultProcessor, Parameter
from wlauto.instrumentation import instrument_is_enabled
from wlauto.exceptions import ResultProcessorError, ConfigError
from wlauto.utils.types import numeric, boolean
from wlauto.utils.uxperf import UxPerfParser
try:
import pandas as pd
except ImportError:
pd = None
class UxPerfResultProcessor(ResultProcessor):
name = 'uxperf'
description = '''
Parse logcat for UX_PERF markers to produce performance metrics for
workload actions using specified instrumentation.
An action represents a series of UI interactions to capture.
NOTE: The UX_PERF markers are turned off by default and must be enabled in
a agenda file by setting ``markers_enabled`` for the workload to ``True``.
'''
parameters = [
Parameter('add_timings', kind=boolean, default=True,
description='''
If set to ``True``, add per-action timings to result metrics.'
'''),
Parameter('add_frames', kind=boolean, default=False,
description='''
If set to ``True``, add per-action frame statistics to result
metrics. i.e. fps, frame_count, jank and not_at_vsync.
NOTE: This option requires the fps instrument to be enabled.
'''),
Parameter('drop_threshold', kind=numeric, default=5,
description='''
Data points below this FPS will be dropped as they do not
constitute "real" gameplay. The assumption being that while
actually running, the FPS in the game will not drop below X
frames per second, except on loading screens, menus, etc,
which should not contribute to FPS calculation.
'''),
Parameter('generate_csv', kind=boolean, default=True,
description='''
If set to ``True``, this will produce temporal per-action fps
data in the results directory, in a file named <action>_fps.csv.
Note: per-action fps data will appear as discrete step-like
values in order to produce a more meainingfull representation,
a rolling mean can be applied.
'''),
]
def initialize(self, context):
# needed for uxperf parser
if not pd or LooseVersion(pd.__version__) < LooseVersion('0.13.1'):
message = ('uxperf result processor requires pandas Python package '
'(version 0.13.1 or higher) to be installed.\n'
'You can install it with pip, e.g. "sudo pip install pandas"')
raise ResultProcessorError(message)
if self.add_frames and not instrument_is_enabled('fps'):
raise ConfigError('fps instrument must be enabled in order to add frames.')
def export_iteration_result(self, result, context):
parser = UxPerfParser(context)
logfile = os.path.join(context.output_directory, 'logcat.log')
framelog = os.path.join(context.output_directory, 'frames.csv')
self.logger.debug('Parsing logcat.log for UX_PERF markers')
parser.parse(logfile)
if self.add_timings:
self.logger.debug('Adding per-action timings')
parser.add_action_timings()
if self.add_frames:
self.logger.debug('Adding per-action frame metrics')
parser.add_action_frames(framelog, self.drop_threshold, self.generate_csv)
```
#### File: wlauto/tests/test_agenda.py
```python
import os
from StringIO import StringIO
from unittest import TestCase
from nose.tools import assert_equal, assert_in, raises
from wlauto.core.agenda import Agenda
from wlauto.exceptions import ConfigError
YAML_TEST_FILE = os.path.join(os.path.dirname(__file__), 'data', 'test-agenda.yaml')
invalid_agenda_text = """
workloads:
- id: 1
workload_parameters:
test: 1
"""
invalid_agenda = StringIO(invalid_agenda_text)
invalid_agenda.name = 'invalid1'
duplicate_agenda_text = """
global:
iterations: 1
workloads:
- id: 1
workload_name: antutu
workload_parameters:
test: 1
- id: 1
workload_name: andebench
"""
duplicate_agenda = StringIO(duplicate_agenda_text)
duplicate_agenda.name = 'invalid2'
short_agenda_text = """
workloads: [antutu, linpack, andebench]
"""
short_agenda = StringIO(short_agenda_text)
short_agenda.name = 'short'
default_ids_agenda_text = """
workloads:
- antutu
- id: 1
name: linpack
- id: test
name: andebench
params:
number_of_threads: 1
- vellamo
"""
default_ids_agenda = StringIO(default_ids_agenda_text)
default_ids_agenda.name = 'default_ids'
sectioned_agenda_text = """
sections:
- id: sec1
runtime_params:
dp: one
workloads:
- antutu
- andebench
- name: linpack
runtime_params:
dp: two
- id: sec2
runtime_params:
dp: three
workloads:
- antutu
workloads:
- nenamark
"""
sectioned_agenda = StringIO(sectioned_agenda_text)
sectioned_agenda.name = 'sectioned'
dup_sectioned_agenda_text = """
sections:
- id: sec1
workloads:
- antutu
- id: sec1
workloads:
- andebench
workloads:
- nenamark
"""
dup_sectioned_agenda = StringIO(dup_sectioned_agenda_text)
dup_sectioned_agenda.name = 'dup-sectioned'
caps_agenda_text = """
config:
device: TC2
global:
runtime_parameters:
sysfile_values:
/sys/test/MyFile: 1
/sys/test/other file: 2
workloads:
- id: 1
name: linpack
"""
caps_agenda = StringIO(caps_agenda_text)
caps_agenda.name = 'caps'
bad_syntax_agenda_text = """
config:
# tab on the following line
reboot_policy: never
workloads:
- antutu
"""
bad_syntax_agenda = StringIO(bad_syntax_agenda_text)
bad_syntax_agenda.name = 'bad_syntax'
section_ids_test_text = """
config:
device: TC2
reboot_policy: never
workloads:
- name: bbench
id: bbench
- name: audio
sections:
- id: foo
- id: bar
"""
section_ids_agenda = StringIO(section_ids_test_text)
section_ids_agenda.name = 'section_ids'
class AgendaTest(TestCase):
def test_yaml_load(self):
agenda = Agenda(YAML_TEST_FILE)
assert_equal(len(agenda.workloads), 4)
def test_duplicate_id(self):
try:
Agenda(duplicate_agenda)
except ConfigError, e:
assert_in('duplicate', e.message.lower()) # pylint: disable=E1101
else:
raise Exception('ConfigError was not raised for an agenda with duplicate ids.')
def test_yaml_missing_field(self):
try:
Agenda(invalid_agenda_text)
except ConfigError, e:
assert_in('workload name', e.message)
else:
raise Exception('ConfigError was not raised for an invalid agenda.')
def test_defaults(self):
agenda = Agenda(short_agenda)
assert_equal(len(agenda.workloads), 3)
assert_equal(agenda.workloads[0].workload_name, 'antutu')
assert_equal(agenda.workloads[0].id, '1')
def test_default_id_assignment(self):
agenda = Agenda(default_ids_agenda)
assert_equal(agenda.workloads[0].id, '2')
assert_equal(agenda.workloads[3].id, '3')
def test_sections(self):
agenda = Agenda(sectioned_agenda)
assert_equal(agenda.sections[0].workloads[0].workload_name, 'antutu')
assert_equal(agenda.sections[1].runtime_parameters['dp'], 'three')
@raises(ConfigError)
def test_dup_sections(self):
Agenda(dup_sectioned_agenda)
@raises(ConfigError)
def test_bad_syntax(self):
Agenda(bad_syntax_agenda)
```
#### File: wlauto/utils/log.py
```python
import logging
import string
import threading
import colorama
from wlauto.core.bootstrap import settings
import wlauto.core.signal as signal
COLOR_MAP = {
logging.DEBUG: colorama.Fore.BLUE,
logging.INFO: colorama.Fore.GREEN,
logging.WARNING: colorama.Fore.YELLOW,
logging.ERROR: colorama.Fore.RED,
logging.CRITICAL: colorama.Style.BRIGHT + colorama.Fore.RED,
}
RESET_COLOR = colorama.Style.RESET_ALL
def init_logging(verbosity):
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
error_handler = ErrorSignalHandler(logging.DEBUG)
root_logger.addHandler(error_handler)
console_handler = logging.StreamHandler()
if verbosity == 1:
console_handler.setLevel(logging.DEBUG)
if 'colour_enabled' in settings.logging and not settings.logging['colour_enabled']:
console_handler.setFormatter(LineFormatter(settings.logging['verbose_format']))
else:
console_handler.setFormatter(ColorFormatter(settings.logging['verbose_format']))
else:
console_handler.setLevel(logging.INFO)
if 'colour_enabled' in settings.logging and not settings.logging['colour_enabled']:
console_handler.setFormatter(LineFormatter(settings.logging['regular_format']))
else:
console_handler.setFormatter(ColorFormatter(settings.logging['regular_format']))
root_logger.addHandler(console_handler)
logging.basicConfig(level=logging.DEBUG)
def add_log_file(filepath, level=logging.DEBUG):
root_logger = logging.getLogger()
file_handler = logging.FileHandler(filepath)
file_handler.setLevel(level)
file_handler.setFormatter(LineFormatter(settings.logging['file_format']))
root_logger.addHandler(file_handler)
class ErrorSignalHandler(logging.Handler):
"""
Emits signals for ERROR and WARNING level traces.
"""
def emit(self, record):
if record.levelno == logging.ERROR:
signal.send(signal.ERROR_LOGGED, self)
elif record.levelno == logging.WARNING:
signal.send(signal.WARNING_LOGGED, self)
class ColorFormatter(logging.Formatter):
"""
Formats logging records with color and prepends record info
to each line of the message.
BLUE for DEBUG logging level
GREEN for INFO logging level
YELLOW for WARNING logging level
RED for ERROR logging level
BOLD RED for CRITICAL logging level
"""
def __init__(self, fmt=None, datefmt=None):
super(ColorFormatter, self).__init__(fmt, datefmt)
template_text = self._fmt.replace('%(message)s', RESET_COLOR + '%(message)s${color}')
template_text = '${color}' + template_text + RESET_COLOR
self.fmt_template = string.Template(template_text)
def format(self, record):
self._set_color(COLOR_MAP[record.levelno])
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
d = record.__dict__
parts = []
for line in record.message.split('\n'):
d.update({'message': line.strip('\r')})
parts.append(self._fmt % d)
return '\n'.join(parts)
def _set_color(self, color):
self._fmt = self.fmt_template.substitute(color=color)
class LineFormatter(logging.Formatter):
"""
Logs each line of the message separately.
"""
def __init__(self, fmt=None, datefmt=None):
super(LineFormatter, self).__init__(fmt, datefmt)
def format(self, record):
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
d = record.__dict__
parts = []
for line in record.message.split('\n'):
d.update({'message': line.strip('\r')})
parts.append(self._fmt % d)
return '\n'.join(parts)
class BaseLogWriter(object):
def __init__(self, name, level=logging.DEBUG):
"""
File-like object class designed to be used for logging from streams
Each complete line (terminated by new line character) gets logged
at DEBUG level. In complete lines are buffered until the next new line.
:param name: The name of the logger that will be used.
"""
self.logger = logging.getLogger(name)
self.buffer = ''
if level == logging.DEBUG:
self.do_write = self.logger.debug
elif level == logging.INFO:
self.do_write = self.logger.info
elif level == logging.WARNING:
self.do_write = self.logger.warning
elif level == logging.ERROR:
self.do_write = self.logger.error
else:
raise Exception('Unknown logging level: {}'.format(level))
def flush(self):
# Defined to match the interface expected by pexpect.
return self
def close(self):
if self.buffer:
self.logger.debug(self.buffer)
self.buffer = ''
return self
def __del__(self):
# Ensure we don't lose bufferd output
self.close()
class LogWriter(BaseLogWriter):
def write(self, data):
data = data.replace('\r\n', '\n').replace('\r', '\n')
if '\n' in data:
parts = data.split('\n')
parts[0] = self.buffer + parts[0]
for part in parts[:-1]:
self.do_write(part)
self.buffer = parts[-1]
else:
self.buffer += data
return self
class LineLogWriter(BaseLogWriter):
def write(self, data):
self.do_write(data)
class StreamLogger(threading.Thread):
"""
Logs output from a stream in a thread.
"""
def __init__(self, name, stream, level=logging.DEBUG, klass=LogWriter):
super(StreamLogger, self).__init__()
self.writer = klass(name, level)
self.stream = stream
self.daemon = True
def run(self):
line = self.stream.readline()
while line:
self.writer.write(line.rstrip('\n'))
line = self.stream.readline()
self.writer.close()
```
#### File: wlauto/utils/misc.py
```python
from __future__ import division
import os
import sys
import re
import math
import imp
import string
import threading
import signal
import pkgutil
import traceback
import logging
import random
import hashlib
import subprocess
from subprocess import CalledProcessError
from datetime import datetime, timedelta
from operator import mul, itemgetter
from StringIO import StringIO
from itertools import cycle, groupby
from functools import partial
from distutils.spawn import find_executable
import yaml
from dateutil import tz
# ABI --> architectures list
ABI_MAP = {
'armeabi': ['armeabi', 'armv7', 'armv7l', 'armv7el', 'armv7lh', 'armeabi-v7a'],
'arm64': ['arm64', 'armv8', 'arm64-v8a', 'aarch64'],
}
def preexec_function():
# Ignore the SIGINT signal by setting the handler to the standard
# signal handler SIG_IGN.
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Change process group in case we have to kill the subprocess and all of
# its children later.
# TODO: this is Unix-specific; would be good to find an OS-agnostic way
# to do this in case we wanna port WA to Windows.
os.setpgrp()
check_output_logger = logging.getLogger('check_output')
# Defined here rather than in wlauto.exceptions due to module load dependencies
class TimeoutError(Exception):
"""Raised when a subprocess command times out. This is basically a ``WAError``-derived version
of ``subprocess.CalledProcessError``, the thinking being that while a timeout could be due to
programming error (e.g. not setting long enough timers), it is often due to some failure in the
environment, and there fore should be classed as a "user error"."""
def __init__(self, command, output):
super(TimeoutError, self).__init__('Timed out: {}'.format(command))
self.command = command
self.output = output
def __str__(self):
return '\n'.join([self.message, 'OUTPUT:', self.output or ''])
class CalledProcessErrorWithStderr(CalledProcessError):
def __init__(self, *args, **kwargs):
self.output = kwargs.pop("output")
self.error = kwargs.pop("error")
super(CalledProcessErrorWithStderr, self).__init__(*args, **kwargs)
def __str__(self):
return '{}\nSTDOUT: {}\nSTDERR:{}'.format(CalledProcessError.__str__(self),
self.output, self.error)
__repr__ = __str__
def check_output(command, timeout=None, ignore=None, **kwargs):
"""This is a version of subprocess.check_output that adds a timeout parameter to kill
the subprocess if it does not return within the specified time."""
# pylint: disable=too-many-branches
if ignore is None:
ignore = []
elif isinstance(ignore, int):
ignore = [ignore]
elif not isinstance(ignore, list) and ignore != 'all':
message = 'Invalid value for ignore parameter: "{}"; must be an int or a list'
raise ValueError(message.format(ignore))
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
def callback(pid):
try:
check_output_logger.debug('{} timed out; sending SIGKILL'.format(pid))
os.killpg(pid, signal.SIGKILL)
except OSError:
pass # process may have already terminated.
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=preexec_function, **kwargs)
if timeout:
timer = threading.Timer(timeout, callback, [process.pid, ])
timer.start()
try:
output, error = process.communicate()
finally:
if timeout:
timer.cancel()
retcode = process.poll()
if retcode:
if retcode == -9: # killed, assume due to timeout callback
raise TimeoutError(command, output='\n'.join([output, error]))
elif ignore != 'all' and retcode not in ignore:
raise CalledProcessErrorWithStderr(retcode, command, output=output, error=error)
return output, error
def walk_modules(path):
"""
Given package name, return a list of all modules (including submodules, etc)
in that package.
"""
root_mod = __import__(path, {}, {}, [''])
mods = [root_mod]
for _, name, ispkg in pkgutil.iter_modules(root_mod.__path__):
submod_path = '.'.join([path, name])
if ispkg:
mods.extend(walk_modules(submod_path))
else:
submod = __import__(submod_path, {}, {}, [''])
mods.append(submod)
return mods
def ensure_directory_exists(dirpath):
"""A filter for directory paths to ensure they exist."""
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
return dirpath
def ensure_file_directory_exists(filepath):
"""
A filter for file paths to ensure the directory of the
file exists and the file can be created there. The file
itself is *not* going to be created if it doesn't already
exist.
"""
ensure_directory_exists(os.path.dirname(filepath))
return filepath
def diff_tokens(before_token, after_token):
"""
Creates a diff of two tokens.
If the two tokens are the same it just returns returns the token
(whitespace tokens are considered the same irrespective of type/number
of whitespace characters in the token).
If the tokens are numeric, the difference between the two values
is returned.
Otherwise, a string in the form [before -> after] is returned.
"""
if before_token.isspace() and after_token.isspace():
return after_token
elif before_token.isdigit() and after_token.isdigit():
try:
diff = int(after_token) - int(before_token)
return str(diff)
except ValueError:
return "[%s -> %s]" % (before_token, after_token)
elif before_token == after_token:
return after_token
else:
return "[%s -> %s]" % (before_token, after_token)
def prepare_table_rows(rows):
"""Given a list of lists, make sure they are prepared to be formatted into a table
by making sure each row has the same number of columns and stringifying all values."""
rows = [map(str, r) for r in rows]
max_cols = max(map(len, rows))
for row in rows:
pad = max_cols - len(row)
for _ in xrange(pad):
row.append('')
return rows
def write_table(rows, wfh, align='>', headers=None): # pylint: disable=R0914
"""Write a column-aligned table to the specified file object."""
if not rows:
return
rows = prepare_table_rows(rows)
num_cols = len(rows[0])
# cycle specified alignments until we have max_cols of them. This is
# consitent with how such cases are handled in R, pandas, etc.
it = cycle(align)
align = [it.next() for _ in xrange(num_cols)]
cols = zip(*rows)
col_widths = [max(map(len, c)) for c in cols]
row_format = ' '.join(['{:%s%s}' % (align[i], w) for i, w in enumerate(col_widths)])
row_format += '\n'
if headers:
wfh.write(row_format.format(*headers))
underlines = ['-' * len(h) for h in headers]
wfh.write(row_format.format(*underlines))
for row in rows:
wfh.write(row_format.format(*row))
def get_null():
"""Returns the correct null sink based on the OS."""
return 'NUL' if os.name == 'nt' else '/dev/null'
def get_traceback(exc=None):
"""
Returns the string with the traceback for the specifiec exc
object, or for the current exception exc is not specified.
"""
if exc is None:
exc = sys.exc_info()
if not exc:
return None
tb = exc[2]
sio = StringIO()
traceback.print_tb(tb, file=sio)
del tb # needs to be done explicitly see: http://docs.python.org/2/library/sys.html#sys.exc_info
return sio.getvalue()
def merge_dicts(*args, **kwargs):
if len(args) < 2:
raise ValueError('Must specify at least two dicts to merge.')
func = partial(_merge_two_dicts, **kwargs)
return reduce(func, args)
def _merge_two_dicts(base, other, list_duplicates='all', match_types=False, # pylint: disable=R0912,R0914
dict_type=dict, should_normalize=True, should_merge_lists=True):
"""Merge dicts normalizing their keys."""
merged = dict_type()
base_keys = base.keys()
other_keys = other.keys()
norm = normalize if should_normalize else lambda x, y: x
base_only = []
other_only = []
both = []
union = []
for k in base_keys:
if k in other_keys:
both.append(k)
else:
base_only.append(k)
union.append(k)
for k in other_keys:
if k in base_keys:
union.append(k)
else:
union.append(k)
other_only.append(k)
for k in union:
if k in base_only:
merged[k] = norm(base[k], dict_type)
elif k in other_only:
merged[k] = norm(other[k], dict_type)
elif k in both:
base_value = base[k]
other_value = other[k]
base_type = type(base_value)
other_type = type(other_value)
if (match_types and (base_type != other_type) and
(base_value is not None) and (other_value is not None)):
raise ValueError('Type mismatch for {} got {} ({}) and {} ({})'.format(k, base_value, base_type,
other_value, other_type))
if isinstance(base_value, dict):
merged[k] = _merge_two_dicts(base_value, other_value, list_duplicates, match_types, dict_type)
elif isinstance(base_value, list):
if should_merge_lists:
merged[k] = _merge_two_lists(base_value, other_value, list_duplicates, dict_type)
else:
merged[k] = _merge_two_lists([], other_value, list_duplicates, dict_type)
elif isinstance(base_value, set):
merged[k] = norm(base_value.union(other_value), dict_type)
else:
merged[k] = norm(other_value, dict_type)
else: # Should never get here
raise AssertionError('Unexpected merge key: {}'.format(k))
return merged
def merge_lists(*args, **kwargs):
if len(args) < 2:
raise ValueError('Must specify at least two lists to merge.')
func = partial(_merge_two_lists, **kwargs)
return reduce(func, args)
def _merge_two_lists(base, other, duplicates='all', dict_type=dict): # pylint: disable=R0912
"""
Merge lists, normalizing their entries.
parameters:
:base, other: the two lists to be merged. ``other`` will be merged on
top of base.
:duplicates: Indicates the strategy of handling entries that appear
in both lists. ``all`` will keep occurrences from both
lists; ``first`` will only keep occurrences from
``base``; ``last`` will only keep occurrences from
``other``;
.. note:: duplicate entries that appear in the *same* list
will never be removed.
"""
if not isiterable(base):
base = [base]
if not isiterable(other):
other = [other]
if duplicates == 'all':
merged_list = []
for v in normalize(base, dict_type) + normalize(other, dict_type):
if not _check_remove_item(merged_list, v):
merged_list.append(v)
return merged_list
elif duplicates == 'first':
base_norm = normalize(base, dict_type)
merged_list = normalize(base, dict_type)
for v in base_norm:
_check_remove_item(merged_list, v)
for v in normalize(other, dict_type):
if not _check_remove_item(merged_list, v):
if v not in base_norm:
merged_list.append(v) # pylint: disable=no-member
return merged_list
elif duplicates == 'last':
other_norm = normalize(other, dict_type)
merged_list = []
for v in normalize(base, dict_type):
if not _check_remove_item(merged_list, v):
if v not in other_norm:
merged_list.append(v)
for v in other_norm:
if not _check_remove_item(merged_list, v):
merged_list.append(v)
return merged_list
else:
raise ValueError('Unexpected value for list duplicates argument: {}. '.format(duplicates) +
'Must be in {"all", "first", "last"}.')
def _check_remove_item(the_list, item):
"""Helper function for merge_lists that implements checking wether an items
should be removed from the list and doing so if needed. Returns ``True`` if
the item has been removed and ``False`` otherwise."""
if not isinstance(item, basestring):
return False
if not item.startswith('~'):
return False
actual_item = item[1:]
if actual_item in the_list:
del the_list[the_list.index(actual_item)]
return True
def normalize(value, dict_type=dict):
"""Normalize values. Recursively normalizes dict keys to be lower case,
no surrounding whitespace, underscore-delimited strings."""
if isinstance(value, dict):
normalized = dict_type()
for k, v in value.iteritems():
if isinstance(k, basestring):
k = k.strip().lower().replace(' ', '_')
normalized[k] = normalize(v, dict_type)
return normalized
elif isinstance(value, list):
return [normalize(v, dict_type) for v in value]
elif isinstance(value, tuple):
return tuple([normalize(v, dict_type) for v in value])
else:
return value
VALUE_REGEX = re.compile(r'(\d+(?:\.\d+)?)\s*(\w*)')
UNITS_MAP = {
's': 'seconds',
'ms': 'milliseconds',
'us': 'microseconds',
'ns': 'nanoseconds',
'V': 'volts',
'A': 'amps',
'mA': 'milliamps',
'J': 'joules',
}
def parse_value(value_string):
"""parses a string representing a numerical value and returns
a tuple (value, units), where value will be either int or float,
and units will be a string representing the units or None."""
match = VALUE_REGEX.search(value_string)
if match:
vs = match.group(1)
value = float(vs) if '.' in vs else int(vs)
us = match.group(2)
units = UNITS_MAP.get(us, us)
return (value, units)
else:
return (value_string, None)
def get_meansd(values):
"""Returns mean and standard deviation of the specified values."""
if not values:
return float('nan'), float('nan')
mean = sum(values) / len(values)
sd = math.sqrt(sum([(v - mean) ** 2 for v in values]) / len(values))
return mean, sd
def geomean(values):
"""Returns the geometric mean of the values."""
return reduce(mul, values) ** (1.0 / len(values))
def capitalize(text):
"""Capitalises the specified text: first letter upper case,
all subsequent letters lower case."""
if not text:
return ''
return text[0].upper() + text[1:].lower()
def convert_new_lines(text):
""" Convert new lines to a common format. """
return text.replace('\r\n', '\n').replace('\r', '\n')
def escape_quotes(text):
"""Escape quotes, and escaped quotes, in the specified text."""
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\\\'').replace('\"', '\\\"')
def escape_single_quotes(text):
"""Escape single quotes, and escaped single quotes, in the specified text."""
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\'\\\'\'')
def escape_double_quotes(text):
"""Escape double quotes, and escaped double quotes, in the specified text."""
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\"', '\\\"')
def getch(count=1):
"""Read ``count`` characters from standard input."""
if os.name == 'nt':
import msvcrt # pylint: disable=F0401
return ''.join([msvcrt.getch() for _ in xrange(count)])
else: # assume Unix
import tty # NOQA
import termios # NOQA
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(count)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def isiterable(obj):
"""Returns ``True`` if the specified object is iterable and
*is not a string type*, ``False`` otherwise."""
return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
def utc_to_local(dt):
"""Convert naive datetime to local time zone, assuming UTC."""
return dt.replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
def local_to_utc(dt):
"""Convert naive datetime to UTC, assuming local time zone."""
return dt.replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc())
def as_relative(path):
"""Convert path to relative by stripping away the leading '/' on UNIX or
the equivant on other platforms."""
path = os.path.splitdrive(path)[1]
return path.lstrip(os.sep)
def get_cpu_mask(cores):
"""Return a string with the hex for the cpu mask for the specified core numbers."""
mask = 0
for i in cores:
mask |= 1 << i
return '0x{0:x}'.format(mask)
def load_class(classpath):
"""Loads the specified Python class. ``classpath`` must be a fully-qualified
class name (i.e. namspaced under module/package)."""
modname, clsname = classpath.rsplit('.', 1)
return getattr(__import__(modname), clsname)
def get_pager():
"""Returns the name of the system pager program."""
pager = os.getenv('PAGER')
if pager is None:
pager = find_executable('less')
if pager is None:
pager = find_executable('more')
return pager
def enum_metaclass(enum_param, return_name=False, start=0):
"""
Returns a ``type`` subclass that may be used as a metaclass for
an enum.
Paremeters:
:enum_param: the name of class attribute that defines enum values.
The metaclass will add a class attribute for each value in
``enum_param``. The value of the attribute depends on the type
of ``enum_param`` and on the values of ``return_name``. If
``return_name`` is ``True``, then the value of the new attribute is
the name of that attribute; otherwise, if ``enum_param`` is a ``list``
or a ``tuple``, the value will be the index of that param in
``enum_param``, optionally offset by ``start``, otherwise, it will
be assumed that ``enum_param`` implementa a dict-like inteface and
the value will be ``enum_param[attr_name]``.
:return_name: If ``True``, the enum values will the names of enum attributes. If
``False``, the default, the values will depend on the type of
``enum_param`` (see above).
:start: If ``enum_param`` is a list or a tuple, and ``return_name`` is ``False``,
this specifies an "offset" that will be added to the index of the attribute
within ``enum_param`` to form the value.
"""
class __EnumMeta(type):
def __new__(mcs, clsname, bases, attrs):
cls = type.__new__(mcs, clsname, bases, attrs)
values = getattr(cls, enum_param, [])
if return_name:
for name in values:
setattr(cls, name, name)
else:
if isinstance(values, list) or isinstance(values, tuple):
for i, name in enumerate(values):
setattr(cls, name, i + start)
else: # assume dict-like
for name in values:
setattr(cls, name, values[name])
return cls
return __EnumMeta
def which(name):
"""Platform-independent version of UNIX which utility."""
if os.name == 'nt':
paths = os.getenv('PATH').split(os.pathsep)
exts = os.getenv('PATHEXT').split(os.pathsep)
for path in paths:
testpath = os.path.join(path, name)
if os.path.isfile(testpath):
return testpath
for ext in exts:
testpathext = testpath + ext
if os.path.isfile(testpathext):
return testpathext
return None
else: # assume UNIX-like
try:
return check_output(['which', name])[0].strip()
except subprocess.CalledProcessError:
return None
_bash_color_regex = re.compile('\x1b\[[0-9;]+m')
def strip_bash_colors(text):
return _bash_color_regex.sub('', text)
def format_duration(seconds, sep=' ', order=['day', 'hour', 'minute', 'second']): # pylint: disable=dangerous-default-value
"""
Formats the specified number of seconds into human-readable duration.
"""
if isinstance(seconds, timedelta):
td = seconds
else:
td = timedelta(seconds=seconds)
dt = datetime(1, 1, 1) + td
result = []
for item in order:
value = getattr(dt, item, None)
if item is 'day':
value -= 1
if not value:
continue
suffix = '' if value == 1 else 's'
result.append('{} {}{}'.format(value, item, suffix))
return sep.join(result)
def get_article(word):
"""
Returns the appropriate indefinite article for the word (ish).
.. note:: Indefinite article assignment in English is based on
sound rather than spelling, so this will not work correctly
in all case; e.g. this will return ``"a hour"``.
"""
return'an' if word[0] in 'aoeiu' else 'a'
def get_random_string(length):
"""Returns a random ASCII string of the specified length)."""
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(length))
class LoadSyntaxError(Exception):
def __init__(self, message, filepath, lineno):
super(LoadSyntaxError, self).__init__(message)
self.filepath = filepath
self.lineno = lineno
def __str__(self):
message = 'Syntax Error in {}, line {}:\n\t{}'
return message.format(self.filepath, self.lineno, self.message)
RAND_MOD_NAME_LEN = 30
BAD_CHARS = string.punctuation + string.whitespace
TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
def to_identifier(text):
"""Converts text to a valid Python identifier by replacing all
whitespace and punctuation."""
return re.sub('_+', '_', text.translate(TRANS_TABLE))
def load_struct_from_python(filepath=None, text=None):
"""Parses a config structure from a .py file. The structure should be composed
of basic Python types (strings, ints, lists, dicts, etc.)."""
if not (filepath or text) or (filepath and text):
raise ValueError('Exactly one of filepath or text must be specified.')
try:
if filepath:
modname = to_identifier(filepath)
mod = imp.load_source(modname, filepath)
else:
modname = get_random_string(RAND_MOD_NAME_LEN)
while modname in sys.modules: # highly unlikely, but...
modname = get_random_string(RAND_MOD_NAME_LEN)
mod = imp.new_module(modname)
exec text in mod.__dict__ # pylint: disable=exec-used
return dict((k, v)
for k, v in mod.__dict__.iteritems()
if not k.startswith('_'))
except SyntaxError as e:
raise LoadSyntaxError(e.message, filepath, e.lineno)
def load_struct_from_yaml(filepath=None, text=None):
"""Parses a config structure from a .yaml file. The structure should be composed
of basic Python types (strings, ints, lists, dicts, etc.)."""
if not (filepath or text) or (filepath and text):
raise ValueError('Exactly one of filepath or text must be specified.')
try:
if filepath:
with open(filepath) as fh:
return yaml.load(fh)
else:
return yaml.load(text)
except yaml.YAMLError as e:
lineno = None
if hasattr(e, 'problem_mark'):
lineno = e.problem_mark.line # pylint: disable=no-member
raise LoadSyntaxError(e.message, filepath=filepath, lineno=lineno)
def load_struct_from_file(filepath):
"""
Attempts to parse a Python structure consisting of basic types from the specified file.
Raises a ``ValueError`` if the specified file is of unkown format; ``LoadSyntaxError`` if
there is an issue parsing the file.
"""
extn = os.path.splitext(filepath)[1].lower()
if (extn == '.py') or (extn == '.pyc') or (extn == '.pyo'):
return load_struct_from_python(filepath)
elif extn == '.yaml':
return load_struct_from_yaml(filepath)
else:
raise ValueError('Unknown format "{}": {}'.format(extn, filepath))
def unique(alist):
"""
Returns a list containing only unique elements from the input list (but preserves
order, unlike sets).
"""
result = []
for item in alist:
if item not in result:
result.append(item)
return result
def open_file(filepath):
"""
Open the specified file path with the associated launcher in an OS-agnostic way.
"""
if os.name == 'nt': # Windows
return os.startfile(filepath) # pylint: disable=no-member
elif sys.platform == 'darwin': # Mac OSX
return subprocess.call(['open', filepath])
else: # assume Linux or similar running a freedesktop-compliant GUI
return subprocess.call(['xdg-open', filepath])
def ranges_to_list(ranges_string):
"""Converts a sysfs-style ranges string, e.g. ``"0,2-4"``, into a list ,e.g ``[0,2,3,4]``"""
values = []
for rg in ranges_string.split(','):
if '-' in rg:
first, last = map(int, rg.split('-'))
values.extend(xrange(first, last + 1))
else:
values.append(int(rg))
return values
def list_to_ranges(values):
"""Converts a list, e.g ``[0,2,3,4]``, into a sysfs-style ranges string, e.g. ``"0,2-4"``"""
range_groups = []
for _, g in groupby(enumerate(values), lambda (i, x): i - x):
range_groups.append(map(itemgetter(1), g))
range_strings = []
for group in range_groups:
if len(group) == 1:
range_strings.append(str(group[0]))
else:
range_strings.append('{}-{}'.format(group[0], group[-1]))
return ','.join(range_strings)
def list_to_mask(values, base=0x0):
"""Converts the specified list of integer values into
a bit mask for those values. Optinally, the list can be
applied to an existing mask."""
for v in values:
base |= (1 << v)
return base
def mask_to_list(mask):
"""Converts the specfied integer bitmask into a list of
indexes of bits that are set in the mask."""
size = len(bin(mask)) - 2 # because of "0b"
return [size - i - 1 for i in xrange(size)
if mask & (1 << size - i - 1)]
def sha256(path, chunk=2048):
"""Calculates SHA256 hexdigest of the file at the specified path."""
h = hashlib.sha256()
with open(path, 'rb') as fh:
buf = fh.read(chunk)
while buf:
h.update(buf)
buf = fh.read(chunk)
return h.hexdigest()
def urljoin(*parts):
return '/'.join(p.rstrip('/') for p in parts)
__memo_cache = {}
def memoized(func):
"""A decorator for memoizing functions and methods."""
func_id = repr(func)
def memoize_wrapper(*args, **kwargs):
id_string = func_id + ','.join([str(id(a)) for a in args])
id_string += ','.join('{}={}'.format(k, v)
for k, v in kwargs.iteritems())
if id_string not in __memo_cache:
__memo_cache[id_string] = func(*args, **kwargs)
return __memo_cache[id_string]
return memoize_wrapper
def commonprefix(file_list, sep=os.sep):
"""
Find the lowest common base folder of a passed list of files.
"""
common_path = os.path.commonprefix(file_list)
cp_split = common_path.split(sep)
other_split = file_list[0].split(sep)
last = len(cp_split) - 1
if cp_split[last] != other_split[last]:
cp_split = cp_split[:-1]
return sep.join(cp_split)
```
#### File: wlauto/utils/statedetect.py
```python
import os
import yaml
try:
import numpy as np
except ImportError:
np = None
try:
import cv2
except ImportError:
cv2 = None
try:
import imutils
except ImportError:
imutils = None
from wlauto.exceptions import HostError
class StateDefinitionError(RuntimeError):
pass
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def check_match_state_dependencies():
if np is None or cv2 is None or imutils is None:
raise HostError("State detection requires numpy, opencv (cv2) and imutils.")
def match_state(screenshot_file, defpath, state_definitions): # pylint: disable=too-many-locals
# check dependencies
check_match_state_dependencies()
# check if file exists, then load screenshot into opencv and create edge map
if not os.path.isfile(screenshot_file):
raise StateDefinitionError("Screenshot file not found")
img_rgb = cv2.imread(screenshot_file)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
img_edge = auto_canny(img_gray)
# make a list of all templates defined in the state definitions
template_list = []
for state in state_definitions["workload_states"]:
template_list.extend(state["templates"])
# check all template PNGs exist
for template_png in template_list:
if not os.path.isfile(os.path.join(defpath, 'templates', template_png + '.png')):
raise StateDefinitionError("Missing template PNG file: " + template_png + ".png")
# try to match each PNG
matched_templates = []
for template_png in template_list:
template = cv2.imread(os.path.join(defpath, 'templates', template_png + '.png'), 0)
template_edge = auto_canny(template)
template_height, template_width = template_edge.shape[:2]
# loop over the scales of the image
for scale in np.linspace(1.4, 0.6, 61):
resized = imutils.resize(img_edge, width=int(img_edge.shape[1] * scale))
# skip if the resized image is smaller than the template
if resized.shape[0] < template_height or resized.shape[1] < template_width:
break
res = cv2.matchTemplate(resized, template_edge, cv2.TM_CCOEFF_NORMED)
threshold = 0.4
loc = np.where(res >= threshold)
zipped = zip(*loc[::-1])
if len(zipped) > 0:
matched_templates.append(template_png)
break
# determine the state according to the matched templates
matched_state = "none"
for state in state_definitions["workload_states"]:
# look in the matched templates list for each template of this state
match_count = 0
for template in state["templates"]:
if template in matched_templates:
match_count += 1
if match_count >= state["matches"]:
# we have a match
matched_state = state["state_name"]
break
return matched_state
def verify_state(screenshot_file, state_defs_path, workload_phase):
# load and parse state definition file
statedefs_file = os.path.join(state_defs_path, 'definition.yaml')
if not os.path.isfile(statedefs_file):
raise StateDefinitionError("Missing state definitions yaml file: " + statedefs_file)
with open(statedefs_file) as fh:
state_definitions = yaml.load(fh)
# find what the expected state is for the given workload phase
expected_state = None
for phase in state_definitions["workload_phases"]:
if phase["phase_name"] == workload_phase:
expected_state = phase["expected_state"]
if expected_state is None:
raise StateDefinitionError("Phase not defined")
# run a match on the screenshot
matched_state = match_state(screenshot_file, state_defs_path, state_definitions)
return expected_state == matched_state
```
#### File: wlauto/utils/trace_cmd.py
```python
import re
import logging
from itertools import chain
from wlauto.utils.misc import isiterable, memoized
from wlauto.utils.types import numeric
logger = logging.getLogger('trace-cmd')
# These markers can be injected into trace to identify the "interesting"
# portion.
TRACE_MARKER_START = 'TRACE_MARKER_START'
TRACE_MARKER_STOP = 'TRACE_MARKER_STOP'
class TraceCmdEvent(object):
"""
A single trace-cmd event. This will appear in the trace cmd report in the format ::
<idle>-0 [000] 3284.126993: sched_rq_runnable_load: cpu=0 load=54
| | | | |___________|
| | | | |
thread cpu timestamp name body
"""
__slots__ = ['thread', 'reporting_cpu_id', 'timestamp', 'name', 'text', '_fields', '_parser']
@property
def fields(self):
if self._fields is not None:
return self._fields
self._fields = {}
if self._parser:
try:
self._parser(self, self.text)
except Exception: # pylint: disable=broad-except
# unknown format assume user does not care or know how to
# parse self.text
pass
return self._fields
def __init__(self, thread, cpu_id, ts, name, body, parser=None):
"""
parameters:
:thread: thread which generated the event
:cpu: cpu on which the event has occurred
:ts: timestamp of the event
:name: the name of the event
:bodytext: a string with the rest of the event text
:parser: optionally, a function that will parse bodytext to populate
this event's attributes
The parser can be any callable that can be invoked with
parser(event, text)
Where ``event`` is this TraceCmdEvent instance, and ``text`` is the body text to be
parsed. The parser should updated the passed event instance and not return anything
(the return value will be ignored). Any exceptions raised by the parser will be silently
ignored (note that this means that the event's attributes may be partially initialized).
"""
self.thread = thread
self.reporting_cpu_id = int(cpu_id)
self.timestamp = numeric(ts)
self.name = name
self.text = body
self._fields = None
self._parser = parser
def __getattr__(self, name):
try:
return self.fields[name]
except KeyError:
raise AttributeError(name)
def __str__(self):
return 'TE({} @ {})'.format(self.name, self.timestamp)
__repr__ = __str__
class DroppedEventsEvent(object):
__slots__ = ['thread', 'reporting_cpu_id', 'timestamp', 'name', 'text', 'fields']
def __init__(self, cpu_id):
self.thread = None
self.reporting_cpu_id = None
self.timestamp = None
self.name = 'DROPPED EVENTS DETECTED'
self.text = None
self.fields = {'cpu_id': int(cpu_id)}
def __getattr__(self, name):
try:
return self.fields[name]
except KeyError:
raise AttributeError(name)
def __str__(self):
return 'DROPPED_EVENTS_ON_CPU{}'.format(self.cpu_id)
__repr__ = __str__
def try_convert_to_numeric(v):
try:
if isiterable(v):
return map(numeric, v)
else:
return numeric(v)
except ValueError:
return v
def default_body_parser(event, text):
"""
Default parser to attempt to use to parser body text for the event (i.e. after
the "header" common to all events has been parsed). This assumes that the body is
a whitespace-separated list of key=value pairs. The parser will attempt to convert
the value into a numeric type, and failing that, keep it as string.
"""
parts = [e.rsplit(' ', 1) for e in text.strip().split('=')]
parts = [p.strip() for p in chain.from_iterable(parts)]
if not len(parts) % 2:
i = iter(parts)
for k, v in zip(i, i):
try:
v = int(v)
except ValueError:
pass
event._fields[k] = v
def regex_body_parser(regex, flags=0):
"""
Creates an event body parser form the specified regular expression (could be an
``re.RegexObject``, or a string). The regular expression should contain some named
groups, as those will be extracted as the event attributes (unnamed groups and the
reset of the match will be ignored).
If the specified regex is a string, it will be compiled, in which case ``flags`` may
be provided for the resulting regex object (see ``re`` standard module documentation).
If regex is a pre-compiled object, flags will be ignored.
"""
if isinstance(regex, basestring):
regex = re.compile(regex, flags)
def regex_parser_func(event, text):
match = regex.search(text)
if match:
for k, v in match.groupdict().iteritems():
try:
event._fields[k] = int(v)
except ValueError:
event._fields[k] = v
return regex_parser_func
def sched_switch_parser(event, text):
"""
Sched switch output may be presented in a couple of different formats. One is handled
by a regex. The other format can *almost* be handled by the default parser, if it
weren't for the ``==>`` that appears in the middle.
"""
if text.count('=') == 2: # old format
regex = re.compile(
r'(?P<prev_comm>\S.*):(?P<prev_pid>\d+) \[(?P<prev_prio>\d+)\] (?P<status>\S+)'
r' ==> '
r'(?P<next_comm>\S.*):(?P<next_pid>\d+) \[(?P<next_prio>\d+)\]'
)
parser_func = regex_body_parser(regex)
return parser_func(event, text)
else: # there are more than two "=" -- new format
return default_body_parser(event, text.replace('==>', ''))
def sched_stat_parser(event, text):
"""
sched_stat_* events unclude the units, "[ns]", in an otherwise
regular key=value sequence; so the units need to be stripped out first.
"""
return default_body_parser(event, text.replace(' [ns]', ''))
def sched_wakeup_parser(event, text):
regex = re.compile(r'(?P<comm>\S+):(?P<pid>\d+) \[(?P<prio>\d+)\] success=(?P<success>\d) CPU:(?P<cpu>\d+)')
parse_func = regex_body_parser(regex)
return parse_func(event, text)
# Maps event onto the corresponding parser for its body text. A parser may be
# a callable with signature
#
# parser(event, bodytext)
#
# a re.RegexObject, or a string (in which case it will be compiled into a
# regex). In case of a string/regex, its named groups will be used to populate
# the event's attributes.
EVENT_PARSER_MAP = {
'sched_stat_blocked': sched_stat_parser,
'sched_stat_iowait': sched_stat_parser,
'sched_stat_runtime': sched_stat_parser,
'sched_stat_sleep': sched_stat_parser,
'sched_stat_wait': sched_stat_parser,
'sched_switch': sched_switch_parser,
'sched_wakeup': sched_wakeup_parser,
'sched_wakeup_new': sched_wakeup_parser,
}
HEADER_REGEX = re.compile(r'^\s*(?:version|cpus)\s*=\s*([\d.]+)\s*$')
DROPPED_EVENTS_REGEX = re.compile(r'CPU:(?P<cpu_id>\d+) \[\d*\s*EVENTS DROPPED\]')
EMPTY_CPU_REGEX = re.compile(r'CPU \d+ is empty')
def split_trace_event_line(line):
"""
Split a trace-cmd event line into the preamble (containing the task, cpu id
and timestamp), the event name, and the event body. Each of these is
delimited by a ': ' (optionally followed by more whitespace), however ': '
may also appear in the body of the event and in the thread name. This
attempts to identify the correct split by ensureing the there is a '['
(used to mark the cpu id and not a valid character for a task name) in the
peramble.
"""
parts = line.split(': ')
if len(parts) <= 3:
return parts
preamble = parts.pop(0)
while '[' not in preamble:
preamble += ': ' + parts.pop(0)
event_name = parts.pop(0)
return (preamble, event_name, ': '.join(parts))
class TraceCmdTrace(object):
@property
@memoized
def has_start_marker(self):
with open(self.file_path) as fh:
for line in fh:
if TRACE_MARKER_START in line:
return True
return False
def __init__(self, file_path, names=None, filter_markers=True):
self.filter_markers = filter_markers
self.file_path = file_path
self.names = names or []
def parse(self): # pylint: disable=too-many-branches,too-many-locals
"""
This is a generator for the trace event stream.
"""
inside_marked_region = False
filters = [re.compile('^{}$'.format(n)) for n in self.names or []]
with open(self.file_path) as fh:
for line in fh:
# if processing trace markers, skip marker lines as well as all
# lines outside marked region
if self.filter_markers:
if not inside_marked_region:
if TRACE_MARKER_START in line:
inside_marked_region = True
continue
elif TRACE_MARKER_STOP in line:
break
if 'EVENTS DROPPED' in line:
match = DROPPED_EVENTS_REGEX.search(line)
if match:
yield DroppedEventsEvent(match.group('cpu_id'))
continue
if line.startswith('version') or line.startswith('cpus') or\
line.startswith('CPU:'):
matched = False
for rx in [HEADER_REGEX, EMPTY_CPU_REGEX]:
match = rx.search(line)
if match:
logger.debug(line.strip())
matched = True
break
if matched:
continue
# <thread/cpu/timestamp>: <event name>: <event body>
parts = split_trace_event_line(line)
if len(parts) != 3:
continue
event_name = parts[1].strip()
if filters:
found = False
for f in filters:
if f.search(event_name):
found = True
break
if not found:
continue
thread_string, rest = parts[0].rsplit(' [', 1)
cpu_id, ts_string = rest.split('] ')
body = parts[2].strip()
body_parser = EVENT_PARSER_MAP.get(event_name, default_body_parser)
if isinstance(body_parser, basestring) or isinstance(body_parser, re._pattern_type): # pylint: disable=protected-access
body_parser = regex_body_parser(body_parser)
yield TraceCmdEvent(
thread=thread_string.strip(),
cpu_id=cpu_id,
ts=ts_string.strip(),
name=event_name,
body=body,
parser=body_parser,
)
else:
if self.filter_markers and inside_marked_region:
logger.warning('Did not encounter a stop marker in trace')
```
#### File: wlauto/utils/types.py
```python
import os
import re
import math
import shlex
from collections import defaultdict
from urllib import quote, unquote
from wlauto.utils.misc import isiterable, to_identifier
def identifier(text):
"""Converts text to a valid Python identifier by replacing all
whitespace and punctuation."""
return to_identifier(text)
def boolean(value):
"""
Returns bool represented by the value. This is different from
calling the builtin bool() in that it will interpret string representations.
e.g. boolean('0') and boolean('false') will both yield False.
"""
false_strings = ['', '0', 'n', 'no', 'off']
if isinstance(value, basestring):
value = value.lower()
if value in false_strings or 'false'.startswith(value):
return False
return bool(value)
def integer(value):
"""Handles conversions for string respresentations of binary, octal and hex."""
if isinstance(value, basestring):
return int(value, 0)
else:
return int(value)
def numeric(value):
"""
Returns the value as number (int if possible, or float otherwise), or
raises ``ValueError`` if the specified ``value`` does not have a straight
forward numeric conversion.
"""
if isinstance(value, int):
return value
try:
fvalue = float(value)
except ValueError:
raise ValueError('Not numeric: {}'.format(value))
if not math.isnan(fvalue) and not math.isinf(fvalue):
ivalue = int(fvalue)
if ivalue == fvalue: # yeah, yeah, I know. Whatever. This is best-effort.
return ivalue
return fvalue
def file_path(value):
"""Handles expansion of paths containing '~'"""
return os.path.expanduser(value)
def list_of_strs(value):
"""
Value must be iterable. All elements will be converted to strings.
"""
if not isiterable(value):
raise ValueError(value)
return map(str, value)
list_of_strings = list_of_strs
def list_of_ints(value):
"""
Value must be iterable. All elements will be converted to ``int``\ s.
"""
if not isiterable(value):
raise ValueError(value)
return map(int, value)
list_of_integers = list_of_ints
def list_of_numbers(value):
"""
Value must be iterable. All elements will be converted to numbers (either ``ints`` or
``float``\ s depending on the elements).
"""
if not isiterable(value):
raise ValueError(value)
return map(numeric, value)
def list_of_bools(value, interpret_strings=True):
"""
Value must be iterable. All elements will be converted to ``bool``\ s.
.. note:: By default, ``boolean()`` conversion function will be used, which means that
strings like ``"0"`` or ``"false"`` will be interpreted as ``False``. If this
is undesirable, set ``interpret_strings`` to ``False``.
"""
if not isiterable(value):
raise ValueError(value)
if interpret_strings:
return map(boolean, value)
else:
return map(bool, value)
def list_of(type_):
"""Generates a "list of" callable for the specified type. The callable
attempts to convert all elements in the passed value to the specifed
``type_``, raising ``ValueError`` on error."""
def __init__(self, values):
list.__init__(self, map(type_, values))
def append(self, value):
list.append(self, type_(value))
def extend(self, other):
list.extend(self, map(type_, other))
def __setitem__(self, idx, value):
list.__setitem__(self, idx, type_(value))
return type('list_of_{}s'.format(type_.__name__),
(list, ), {
"__init__": __init__,
"__setitem__": __setitem__,
"append": append,
"extend": extend,
})
def list_or_string(value):
"""
Converts the value into a list of strings. If the value is not iterable,
a one-element list with stringified value will be returned.
"""
if isinstance(value, basestring):
return [value]
else:
try:
return map(str, value)
except ValueError:
return [str(value)]
def list_or_caseless_string(value):
"""
Converts the value into a list of ``caseless_string``'s. If the value is not iterable
a one-element list with stringified value will be returned.
"""
if isinstance(value, basestring):
return [caseless_string(value)]
else:
try:
return map(caseless_string, value)
except ValueError:
return [caseless_string(value)]
def list_or(type_):
"""
Generator for "list or" types. These take either a single value or a list values
and return a list of the specfied ``type_`` performing the conversion on the value
(if a single value is specified) or each of the elemented of the specified list.
"""
list_type = list_of(type_)
class list_or_type(list_type):
def __init__(self, value):
# pylint: disable=non-parent-init-called,super-init-not-called
if isiterable(value):
list_type.__init__(self, value)
else:
list_type.__init__(self, [value])
return list_or_type
list_or_integer = list_or(integer)
list_or_number = list_or(numeric)
list_or_bool = list_or(boolean)
regex_type = type(re.compile(''))
def regex(value):
"""
Regular expression. If value is a string, it will be complied with no flags. If you
want to specify flags, value must be precompiled.
"""
if isinstance(value, regex_type):
return value
else:
return re.compile(value)
__counters = defaultdict(int)
def reset_counter(name=None):
__counters[name] = 0
def counter(name=None):
"""
An auto incremeting value (kind of like an AUTO INCREMENT field in SQL).
Optionally, the name of the counter to be used is specified (each counter
increments separately).
Counts start at 1, not 0.
"""
__counters[name] += 1
value = __counters[name]
return value
class caseless_string(str):
"""
Just like built-in Python string except case-insensitive on comparisons. However, the
case is preserved otherwise.
"""
def __eq__(self, other):
if isinstance(other, basestring):
other = other.lower()
return self.lower() == other
def __ne__(self, other):
return not self.__eq__(other)
def __cmp__(self, other):
if isinstance(basestring, other):
other = other.lower()
return cmp(self.lower(), other)
def format(self, *args, **kwargs):
return caseless_string(super(caseless_string, self).format(*args, **kwargs))
class arguments(list):
"""
Represents command line arguments to be passed to a program.
"""
def __init__(self, value=None):
if isiterable(value):
super(arguments, self).__init__(map(str, value))
elif isinstance(value, basestring):
posix = os.name != 'nt'
super(arguments, self).__init__(shlex.split(value, posix=posix))
elif value is None:
super(arguments, self).__init__()
else:
super(arguments, self).__init__([str(value)])
def append(self, value):
return super(arguments, self).append(str(value))
def extend(self, values):
return super(arguments, self).extend(map(str, values))
def __str__(self):
return ' '.join(self)
class range_dict(dict):
"""
This dict allows you to specify mappings with a range.
If a key is not in the dict it will search downward until
the next key and return its value. E.g:
If:
a[5] = "Hello"
a[10] = "There"
Then:
a[2] == None
a[7] == "Hello"
a[999] == "There"
"""
def __getitem__(self, i):
key = int(i)
while key not in self and key > 0:
key -= 1
if key <= 0:
raise KeyError(i)
return dict.__getitem__(self, key)
def __setitem__(self, i, v):
i = int(i)
super(range_dict, self).__setitem__(i, v)
class ParameterDict(dict):
"""
A dict-like object that automatically encodes various types into a url safe string,
and enforces a single type for the contents in a list.
Each value is first prefixed with 2 letters to preserve type when encoding to a string.
The format used is "value_type, value_dimension" e.g a 'list of floats' would become 'fl'.
"""
# Function to determine the appropriate prefix based on the parameters type
@staticmethod
def _get_prefix(obj):
if isinstance(obj, basestring):
prefix = 's'
elif isinstance(obj, float):
prefix = 'f'
elif isinstance(obj, long):
prefix = 'd'
elif isinstance(obj, bool):
prefix = 'b'
elif isinstance(obj, int):
prefix = 'i'
elif obj is None:
prefix = 'n'
else:
raise ValueError('Unable to encode {} {}'.format(obj, type(obj)))
return prefix
# Function to add prefix and urlencode a provided parameter.
@staticmethod
def _encode(obj):
if isinstance(obj, list):
t = type(obj[0])
prefix = ParameterDict._get_prefix(obj[0]) + 'l'
for item in obj:
if not isinstance(item, t):
msg = 'Lists must only contain a single type, contains {} and {}'
raise ValueError(msg.format(t, type(item)))
obj = '0newelement0'.join(str(x) for x in obj)
else:
prefix = ParameterDict._get_prefix(obj) + 's'
return quote(prefix + str(obj))
# Function to decode a string and return a value of the original parameter type.
# pylint: disable=too-many-return-statements
@staticmethod
def _decode(string):
value_type = string[:1]
value_dimension = string[1:2]
value = unquote(string[2:])
if value_dimension == 's':
if value_type == 's':
return str(value)
elif value_type == 'b':
return boolean(value)
elif value_type == 'd':
return long(value)
elif value_type == 'f':
return float(value)
elif value_type == 'i':
return int(value)
elif value_type == 'n':
return None
elif value_dimension == 'l':
return [ParameterDict._decode(value_type + 's' + x)
for x in value.split('0newelement0')]
else:
raise ValueError('Unknown {} {}'.format(type(string), string))
def __init__(self, *args, **kwargs):
for k, v in kwargs.iteritems():
self.__setitem__(k, v)
dict.__init__(self, *args)
def __setitem__(self, name, value):
dict.__setitem__(self, name, self._encode(value))
def __getitem__(self, name):
return self._decode(dict.__getitem__(self, name))
def __contains__(self, item):
return dict.__contains__(self, self._encode(item))
def __iter__(self):
return iter((k, self._decode(v)) for (k, v) in self.items())
def iteritems(self):
return self.__iter__()
def get(self, name):
return self._decode(dict.get(self, name))
def pop(self, key):
return self._decode(dict.pop(self, key))
def popitem(self):
key, value = dict.popitem(self)
return (key, self._decode(value))
def iter_encoded_items(self):
return dict.iteritems(self)
def get_encoded_value(self, name):
return dict.__getitem__(self, name)
def values(self):
return [self[k] for k in dict.keys(self)]
def update(self, *args, **kwargs):
for d in list(args) + [kwargs]:
if isinstance(d, ParameterDict):
dict.update(self, d)
else:
for k, v in d.iteritems():
self[k] = v
```
#### File: workloads/cameracapture/__init__.py
```python
from wlauto import UiAutomatorWorkload, Parameter
from wlauto.utils.types import range_dict
class Cameracapture(UiAutomatorWorkload):
name = 'cameracapture'
description = """
Uses in-built Android camera app to take photos.
"""
package = 'com.google.android.gallery3d'
activity = 'com.android.camera.CameraActivity'
api_packages = range_dict()
api_packages[1] = 'com.google.android.gallery3d'
api_packages[23] = 'com.google.android.GoogleCamera'
parameters = [
Parameter('no_of_captures', kind=int, default=5,
description='Number of photos to be taken.'),
Parameter('time_between_captures', kind=int, default=5,
description='Time, in seconds, between two consecutive camera clicks.'),
]
def initialize(self, context):
api = self.device.get_sdk_version()
self.uiauto_params['no_of_captures'] = self.no_of_captures
self.uiauto_params['time_between_captures'] = self.time_between_captures
self.uiauto_params['api_level'] = api
self.package = self.api_packages[api]
version = self.device.get_installed_package_version(self.package) or ''
version = version.replace(' ', '_')
self.uiauto_params['version'] = version
def setup(self, context):
super(Cameracapture, self).setup(context)
self.device.execute('am start -n {}/{}'.format(self.package, self.activity))
def teardown(self, context):
self.device.execute('am force-stop {}'.format(self.package))
super(Cameracapture, self).teardown(context)
```
#### File: workloads/glbcorp/__init__.py
```python
from __future__ import division
import os
import re
import select
import json
import threading
import subprocess
from wlauto import ApkWorkload, Parameter, Alias
from wlauto.exceptions import WorkloadError
DELAY = 2
OLD_RESULT_START_REGEX = re.compile(r'I/TfwActivity\s*\(\s*\d+\):\s+\S+\s+result: {')
NEW_RESULT_START_REGEX = re.compile(r'[\d\s:.-]+I\sTfwActivity(\s*\(\s*\d+\))?:\s+\S+\s+result: {')
OLD_PREAMBLE_REGEX = re.compile(r'I/TfwActivity\s*\(\s*\d+\):\s+')
NEW_PREAMBLE_REGEX = re.compile(r'[\d\s:.-]+I\sTfwActivity(\s*\(\s*\d+\))?:')
class GlbCorp(ApkWorkload):
name = 'glb_corporate'
description = """
GFXBench GL (a.k.a. GLBench) v3.0 Corporate version.
This is a version of GLBench available through a corporate license (distinct
from the version available in Google Play store).
"""
package = 'net.kishonti.gfxbench'
activity = 'net.kishonti.benchui.TestActivity'
result_start_regex = None
preamble_regex = None
valid_test_ids = [
'gl_alu',
'gl_alu_off',
'gl_blending',
'gl_blending_off',
'gl_driver',
'gl_driver_off',
'gl_fill',
'gl_fill_off',
'gl_manhattan',
'gl_manhattan_off',
'gl_trex',
'gl_trex_battery',
'gl_trex_off',
'gl_trex_qmatch',
'gl_trex_qmatch_highp',
]
supported_resolutions = {
'720p': {
'-ei -w': 1280,
'-ei -h': 720,
},
'1080p': {
'-ei -w': 1920,
'-ei -h': 1080,
}
}
parameters = [
Parameter('times', kind=int, default=1, constraint=lambda x: x > 0,
description=('Specifies the number of times the benchmark will be run in a "tight '
'loop", i.e. without performaing setup/teardown inbetween.')),
Parameter('resolution', default=None, allowed_values=['720p', '1080p', '720', '1080'],
description=('Explicitly specifies the resultion under which the benchmark will '
'be run. If not specfied, device\'s native resoution will used.')),
Parameter('test_id', default='gl_manhattan_off', allowed_values=valid_test_ids,
description='ID of the GFXBench test to be run.'),
Parameter('run_timeout', kind=int, default=10 * 60,
description="""
Time out for workload execution. The workload will be killed if it hasn't completed
withint this period.
"""),
]
aliases = [
Alias('manhattan', test_id='gl_manhattan'),
Alias('manhattan_off', test_id='gl_manhattan_off'),
Alias('manhattan_offscreen', test_id='gl_manhattan_off'),
]
def setup(self, context):
super(GlbCorp, self).setup(context)
self.command = self._build_command()
self.monitor = GlbRunMonitor(self.device)
self.monitor.start()
def launch_package(self):
# Unlike with most other APK workloads, we're invoking the use case
# directly by starting the activity with appropriate parameters on the
# command line during execution, so we dont' need to start activity
# during setup.
pass
def run(self, context):
for _ in xrange(self.times):
result = self.device.execute(self.command, timeout=self.run_timeout)
if 'FAILURE' in result:
raise WorkloadError(result)
else:
self.logger.debug(result)
self.device.sleep(DELAY)
self.monitor.wait_for_run_end(self.run_timeout)
def update_result(self, context): # NOQA
super(GlbCorp, self).update_result(context)
self.monitor.stop()
iteration = 0
results = []
with open(self.logcat_log) as fh:
try:
line = fh.next()
result_lines = []
while True:
if OLD_RESULT_START_REGEX.search(line):
self.preamble_regex = OLD_PREAMBLE_REGEX
self.result_start_regex = OLD_RESULT_START_REGEX
elif NEW_RESULT_START_REGEX.search(line):
self.preamble_regex = NEW_PREAMBLE_REGEX
self.result_start_regex = NEW_RESULT_START_REGEX
if self.result_start_regex and self.result_start_regex.search(line):
result_lines.append('{')
line = fh.next()
while self.preamble_regex.search(line):
result_lines.append(self.preamble_regex.sub('', line))
line = fh.next()
try:
result = json.loads(''.join(result_lines))
results.append(result)
if iteration:
suffix = '_{}'.format(iteration)
else:
suffix = ''
for sub_result in result['results']:
frames = sub_result['score']
elapsed_time = sub_result['elapsed_time'] / 1000
fps = frames / elapsed_time
context.result.add_metric('score' + suffix, frames, 'frames')
context.result.add_metric('fps' + suffix, fps)
except ValueError:
self.logger.warning('Could not parse result for iteration {}'.format(iteration))
result_lines = []
iteration += 1
line = fh.next()
except StopIteration:
pass # EOF
if results:
outfile = os.path.join(context.output_directory, 'glb-results.json')
with open(outfile, 'wb') as wfh:
json.dump(results, wfh, indent=4)
def _build_command(self):
command_params = []
command_params.append('-e test_ids "{}"'.format(self.test_id))
if self.resolution:
if not self.resolution.endswith('p'):
self.resolution += 'p'
for k, v in self.supported_resolutions[self.resolution].iteritems():
command_params.append('{} {}'.format(k, v))
return 'am start -W -S -n {}/{} {}'.format(self.package,
self.activity,
' '.join(command_params))
class GlbRunMonitor(threading.Thread):
old_regex = re.compile(r'I/Runner\s+\(\s*\d+\): finished:')
new_regex = re.compile(r'I Runner\s*:\s*finished:')
def __init__(self, device):
super(GlbRunMonitor, self).__init__()
self.device = device
self.daemon = True
self.run_ended = threading.Event()
self.stop_event = threading.Event()
# Not using clear_logcat() because command collects directly, i.e. will
# ignore poller.
self.device.execute('logcat -c')
if self.device.adb_name:
self.command = ['adb', '-s', self.device.adb_name, 'logcat']
else:
self.command = ['adb', 'logcat']
def run(self):
proc = subprocess.Popen(self.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while not self.stop_event.is_set():
if self.run_ended.is_set():
self.device.sleep(DELAY)
else:
ready, _, _ = select.select([proc.stdout, proc.stderr], [], [], 2)
if ready:
line = ready[0].readline()
if self.new_regex.search(line) or self.old_regex.search(line):
self.run_ended.set()
def stop(self):
self.stop_event.set()
self.join()
def wait_for_run_end(self, timeout):
self.run_ended.wait(timeout)
self.run_ended.clear()
```
#### File: workloads/linpack_cli/__init__.py
```python
import os
from wlauto import Workload, Parameter, Executable
class LinpackCliWorkload(Workload):
name = 'linpack-cli'
description = """
linpack benchmark with a command line interface
Benchmarks FLOPS (floating point operations per second).
This is the oldschool version of the bencmark. Source may be viewed here:
http://www.netlib.org/benchmark/linpackc.new
"""
parameters = [
Parameter('array_size', kind=int, default=200,
description='size of arrays to be used by the benchmark.'),
]
binary = None # set during initialization
def initialize(self, context):
host_exe = context.resolver.get(Executable(self, self.device.abi, 'linpack'))
LinpackCliWorkload.binary = self.device.install(host_exe)
def setup(self, context):
self.command = '(echo {}; echo q) | {}'.format(self.array_size, self.binary)
def run(self, context):
self.raw_output = self.device.execute(self.command,
timeout=(self.array_size / 10) ** 2,
check_exit_code=False)
def update_result(self, context):
raw_outfile = os.path.join(context.output_directory, 'linpack-raw.txt')
with open(raw_outfile, 'w') as wfh:
wfh.write(self.raw_output)
context.add_artifact('linpack-raw', raw_outfile, kind='raw')
marker = '--------------------'
lines = iter(self.raw_output.split('\n'))
for line in lines:
if marker in line:
break
for line in lines:
line = line.strip()
if not line:
break
parts = line.split()
classifiers = {'reps': int(parts[0])}
context.add_metric('time', float(parts[1]), 'seconds',
lower_is_better=True, classifiers=classifiers)
context.add_metric('KFLOPS', float(parts[5]), 'KFLOPS',
lower_is_better=True, classifiers=classifiers)
def finalize(self, context):
self.device.uninstall(self.binary)
```
#### File: workloads/linpack/__init__.py
```python
import os
import re
from wlauto import AndroidUiAutoBenchmark, Parameter
class Linpack(AndroidUiAutoBenchmark):
name = 'linpack'
description = """
The LINPACK Benchmarks are a measure of a system's floating point computing
power.
http://en.wikipedia.org/wiki/LINPACK_benchmarks
From the article:
Introduced by <NAME>, they measure how fast a computer solves
a dense n by n system of linear equations Ax = b, which is a common task in
engineering.
"""
package = 'com.greenecomputing.linpackpro'
activity = '.Linpack'
summary_metrics = ['Linpack ST', 'Linpack MT']
regex = re.compile(r'LINPACK RESULT: (?P<type>\w+) (?P<value>\S+)')
parameters = [
Parameter('output_file', default=None,
description='On-device output file path.'),
]
def __init__(self, device, **kwargs):
super(Linpack, self).__init__(device, **kwargs)
if self.output_file is None:
self.output_file = os.path.join(self.device.working_directory, 'linpack.txt')
self.uiauto_params['output_file'] = self.output_file
def update_result(self, context):
super(Linpack, self).update_result(context)
with open(self.logcat_log) as fh:
for line in fh:
match = self.regex.search(line)
if match:
metric = 'Linpack ' + match.group('type')
value = float(match.group('value'))
context.result.add_metric(metric, value, 'MFLOPS')
```
#### File: workloads/octaned8/__init__.py
```python
import os
import re
from wlauto import Workload, Parameter, Executable
from wlauto.common.resources import File
from wlauto.exceptions import ConfigError
regex_map = {
"Richards": (re.compile(r'Richards: (\d+.*)')),
"DeltaBlue": (re.compile(r'DeltaBlue: (\d+.*)')),
"Crypto": (re.compile(r'Crypto: (\d+.*)')),
"RayTrace": (re.compile(r'RayTrace: (\d+.*)')),
"EarleyBoyer": (re.compile(r'EarleyBoyer: (\d+.*)')),
"RegExp": (re.compile(r'RegExp: (\d+.*)')),
"Splay": (re.compile(r'Splay: (\d+.*)')),
"SplayLatency": (re.compile(r'SplayLatency: (\d+.*)')),
"NavierStokes": (re.compile(r'NavierStokes: (\d+.*)')),
"PdfJS": (re.compile(r'PdfJS: (\d+.*)')),
"Mandreel": (re.compile(r'Mandreel: (\d+.*)')),
"MandreelLatency": (re.compile(r'MandreelLatency: (\d+.*)')),
"Gameboy": (re.compile(r'Gameboy: (\d+.*)')),
"CodeLoad": (re.compile(r'CodeLoad: (\d+.*)')),
"Box2D": (re.compile(r'Box2D: (\d+.*)')),
"zlib": (re.compile(r'zlib: (\d+.*)')),
"Score": (re.compile(r'Score .*: (\d+.*)'))
}
class Octaned8(Workload):
name = 'octaned8'
description = """
Runs the Octane d8 benchmark.
This workload runs d8 binaries built from source and placed in the dependencies folder along
with test assets from https://github.com/chromium/octane which also need to be placed in an
assets folder within the dependencies folder.
Original source from::
https://github.com/v8/v8/wiki/D8%20on%20Android
"""
parameters = [
Parameter('run_timeout', kind=int, default=180,
description='Timeout, in seconds, for the test execution.'),
]
supported_platforms = ['android']
executables = ['d8', 'natives_blob.bin', 'snapshot_blob.bin']
def initialize(self, context): # pylint: disable=no-self-use
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
self.device.execute('mkdir -p {}'.format(assets_dir))
assets_tar = 'octaned8-assets.tar'
fpath = context.resolver.get(File(self, assets_tar))
self.device.push_file(fpath, assets_dir, timeout=300)
self.command = 'cd {}; {} busybox tar -x -f {}'.format(assets_dir, self.device.busybox, assets_tar)
self.output = self.device.execute(self.command, timeout=self.run_timeout)
for f in self.executables:
binFile = context.resolver.get(Executable(self, self.device.abi, f))
self.device_exe = self.device.install(binFile)
def setup(self, context):
self.logger.info('Copying d8 binaries to device')
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
device_file = self.device.path.join(self.device.working_directory, 'octaned8.output')
self.command = 'cd {}; {}/d8 ./run.js >> {} 2>&1'.format(assets_dir, self.device.binaries_directory, device_file)
def run(self, context):
self.logger.info('Starting d8 tests')
self.output = self.device.execute(self.command, timeout=self.run_timeout)
def update_result(self, context):
host_file = os.path.join(context.output_directory, 'octaned8.output')
device_file = self.device.path.join(self.device.working_directory, 'octaned8.output')
self.device.pull_file(device_file, host_file)
with open(os.path.join(host_file)) as octaned8_file:
for line in octaned8_file:
for label, regex in regex_map.iteritems():
match = regex.search(line)
if match:
context.result.add_metric(label, float(match.group(1)))
self.device.execute('rm {}'.format(device_file))
def finalize(self, context):
for f in self.executables:
self.device.uninstall_executable(f)
self.device.execute('rm {}'.format(self.device.path.join(self.device.working_directory, f)))
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
self.device.execute('rm -rf {}'.format(assets_dir))
```
#### File: workloads/smartbench/__init__.py
```python
import os
import re
import time
from wlauto import AndroidUiAutoBenchmark
class Smartbench(AndroidUiAutoBenchmark):
name = 'smartbench'
description = """
Smartbench is a multi-core friendly benchmark application that measures the
overall performance of an android device. It reports both Productivity and
Gaming Index.
https://play.google.com/store/apps/details?id=com.smartbench.twelve&hl=en
From the website:
It will be better prepared for the quad-core world. Unfortunately this also
means it will run slower on older devices. It will also run slower on
high-resolution tablet devices. All 3D tests are now rendered in full native
resolutions so naturally it will stress hardware harder on these devices.
This also applies to higher resolution hand-held devices.
"""
package = 'com.smartbench.twelve'
activity = '.Smartbench2012'
summary_metrics = ['Smartbench: valueGame', 'Smartbench: valueProd']
run_timeout = 10 * 60
prod_regex = re.compile('valueProd=(\d+)')
game_regex = re.compile('valueGame=(\d+)')
def update_result(self, context):
super(Smartbench, self).update_result(context)
with open(self.logcat_log) as fh:
text = fh.read()
match = self.prod_regex.search(text)
prod = int(match.group(1))
match = self.game_regex.search(text)
game = int(match.group(1))
context.result.add_metric('Smartbench: valueProd', prod)
context.result.add_metric('Smartbench: valueGame', game)
```
#### File: workloads/telemetry/__init__.py
```python
import os
import re
import csv
import shutil
import json
import urllib
import stat
from zipfile import is_zipfile, ZipFile
try:
import pandas as pd
except ImportError:
pd = None
from wlauto import Workload, Parameter
from wlauto.exceptions import WorkloadError, ConfigError
from wlauto.utils.misc import check_output, get_null, get_meansd
from wlauto.utils.types import numeric
RESULT_REGEX = re.compile(r'RESULT ([^:]+): ([^=]+)\s*=\s*' # preamble and test/metric name
r'(\[([^\]]+)\]|(\S+))' # value
r'\s*(\S+)') # units
TRACE_REGEX = re.compile(r'Trace saved as ([^\n]+)')
# Trace event that signifies rendition of a Frame
FRAME_EVENT = 'SwapBuffersLatency'
TELEMETRY_ARCHIVE_URL = 'http://storage.googleapis.com/chromium-telemetry/snapshots/telemetry.zip'
class Telemetry(Workload):
name = 'telemetry'
description = """
Executes Google's Telemetery benchmarking framework
Url: https://www.chromium.org/developers/telemetry
From the web site:
Telemetry is Chrome's performance testing framework. It allows you to
perform arbitrary actions on a set of web pages and report metrics about
it. The framework abstracts:
- Launching a browser with arbitrary flags on any platform.
- Opening a tab and navigating to the page under test.
- Fetching data via the Inspector timeline and traces.
- Using Web Page Replay to cache real-world websites so they don't
change when used in benchmarks.
Design Principles
- Write one performance test that runs on all platforms - Windows, Mac,
Linux, Chrome OS, and Android for both Chrome and ContentShell.
- Runs on browser binaries, without a full Chromium checkout, and without
having to build the browser yourself.
- Use WebPageReplay to get repeatable test results.
- Clean architecture for writing benchmarks that keeps measurements and
use cases separate.
- Run on non-Chrome browsers for comparative studies.
This instrument runs telemetry via its ``run_benchmark`` script (which
must be in PATH or specified using ``run_benchmark_path`` parameter) and
parses metrics from the resulting output.
**device setup**
The device setup will depend on whether you're running a test image (in
which case little or no setup should be necessary)
"""
supported_platforms = ['android', 'chromeos']
parameters = [
Parameter('run_benchmark_path', default=None,
description="""
This is the path to run_benchmark script which runs a
Telemetry benchmark. If not specified, WA will look for Telemetry in its
dependencies; if not found there, Telemetry will be downloaded.
"""),
Parameter('test', default='page_cycler.top_10_mobile',
description="""
Specifies the telemetry test to run.
"""),
Parameter('run_benchmark_params', default='',
description="""
Additional paramters to be passed to ``run_benchmark``.
"""),
Parameter('run_timeout', kind=int, default=900,
description="""
Timeout for execution of the test.
"""),
Parameter('extract_fps', kind=bool, default=False,
description="""
if ``True``, FPS for the run will be computed from the trace (must be enabled).
"""),
Parameter('target_config', kind=str, default=None,
description="""
Manually specify target configuration for telemetry. This must contain
--browser option plus any addition options Telemetry requires for a particular
target (e.g. --device or --remote)
"""),
]
def validate(self):
ret = os.system('{} > {} 2>&1'.format(self.run_benchmark_path, get_null()))
if ret > 255:
pass # telemetry found and appears to be installed properly.
elif ret == 127:
raise WorkloadError('run_benchmark not found (did you specify correct run_benchmark_path?)')
else:
raise WorkloadError('Unexected error from run_benchmark: {}'.format(ret))
if self.extract_fps and 'trace' not in self.run_benchmark_params:
raise ConfigError('"trace" profiler must be enabled in order to extract FPS for Telemetry')
self._resolve_run_benchmark_path()
def setup(self, context):
self.raw_output = None
self.error_output = None
self.command = self.build_command()
def run(self, context):
self.logger.debug(self.command)
self.raw_output, self.error_output = check_output(self.command, shell=True, timeout=self.run_timeout, ignore='all')
def update_result(self, context): # pylint: disable=too-many-locals
if self.error_output:
self.logger.error('run_benchmarks output contained errors:\n' + self.error_output)
elif not self.raw_output:
self.logger.warning('Did not get run_benchmark output.')
return
raw_outfile = os.path.join(context.output_directory, 'telemetry_raw.out')
with open(raw_outfile, 'w') as wfh:
wfh.write(self.raw_output)
context.add_artifact('telemetry-raw', raw_outfile, kind='raw')
results, artifacts = parse_telemetry_results(raw_outfile)
csv_outfile = os.path.join(context.output_directory, 'telemetry.csv')
with open(csv_outfile, 'wb') as wfh:
writer = csv.writer(wfh)
writer.writerow(['kind', 'url', 'iteration', 'value', 'units'])
for result in results:
writer.writerows(result.rows)
for i, value in enumerate(result.values, 1):
context.add_metric(result.kind, value, units=result.units,
classifiers={'url': result.url, 'time': i})
context.add_artifact('telemetry', csv_outfile, kind='data')
for idx, artifact in enumerate(artifacts):
if is_zipfile(artifact):
zf = ZipFile(artifact)
for item in zf.infolist():
zf.extract(item, context.output_directory)
zf.close()
context.add_artifact('telemetry_trace_{}'.format(idx), path=item.filename, kind='data')
else: # not a zip archive
wa_path = os.path.join(context.output_directory,
os.path.basename(artifact))
shutil.copy(artifact, wa_path)
context.add_artifact('telemetry_artifact_{}'.format(idx), path=wa_path, kind='data')
if self.extract_fps:
self.logger.debug('Extracting FPS...')
_extract_fps(context)
def build_command(self):
device_opts = ''
if self.target_config:
device_opts = self.target_config
else:
if self.device.platform == 'chromeos':
if '--remote' not in self.run_benchmark_params:
device_opts += '--remote={} '.format(self.device.host)
if '--browser' not in self.run_benchmark_params:
device_opts += '--browser=cros-chrome '
elif self.device.platform == 'android':
if '--device' not in self.run_benchmark_params and self.device.adb_name:
device_opts += '--device={} '.format(self.device.adb_name)
if '--browser' not in self.run_benchmark_params:
device_opts += '--browser=android-webview-shell '
else:
raise WorkloadError('Unless you\'re running Telemetry on a ChromeOS or Android device, '
'you mast specify target_config option')
return '{} {} {} {}'.format(self.run_benchmark_path,
self.test,
device_opts,
self.run_benchmark_params)
def _resolve_run_benchmark_path(self):
# pylint: disable=access-member-before-definition
if self.run_benchmark_path:
if not os.path.exists(self.run_benchmark_path):
raise ConfigError('run_benchmark path "{}" does not exist'.format(self.run_benchmark_path))
else:
self.run_benchmark_path = os.path.join(self.dependencies_directory, 'telemetry', 'run_benchmark')
self.logger.debug('run_benchmark_path not specified using {}'.format(self.run_benchmark_path))
if not os.path.exists(self.run_benchmark_path):
self.logger.debug('Telemetry not found locally; downloading...')
local_archive = os.path.join(self.dependencies_directory, 'telemetry.zip')
urllib.urlretrieve(TELEMETRY_ARCHIVE_URL, local_archive)
zf = ZipFile(local_archive)
zf.extractall(self.dependencies_directory)
if not os.path.exists(self.run_benchmark_path):
raise WorkloadError('Could not download and extract Telemetry')
old_mode = os.stat(self.run_benchmark_path).st_mode
os.chmod(self.run_benchmark_path, old_mode | stat.S_IXUSR)
def _extract_fps(context):
trace_files = [a.path for a in context.iteration_artifacts
if a.name.startswith('telemetry_trace_')]
for tf in trace_files:
name = os.path.splitext(os.path.basename(tf))[0]
fps_file = os.path.join(context.output_directory, name + '-fps.csv')
with open(tf) as fh:
data = json.load(fh)
events = pd.Series([e['ts'] for e in data['traceEvents'] if
FRAME_EVENT == e['name']])
fps = (1000000 / (events - events.shift(1)))
fps.index = events
df = fps.dropna().reset_index()
df.columns = ['timestamp', 'fps']
with open(fps_file, 'w') as wfh:
df.to_csv(wfh, index=False)
context.add_artifact('{}_fps'.format(name), fps_file, kind='data')
context.result.add_metric('{} FPS'.format(name), df.fps.mean(),
units='fps')
context.result.add_metric('{} FPS (std)'.format(name), df.fps.std(),
units='fps', lower_is_better=True)
class TelemetryResult(object):
@property
def average(self):
return get_meansd(self.values)[0]
@property
def std(self):
return get_meansd(self.values)[1]
@property
def rows(self):
for i, v in enumerate(self.values):
yield [self.kind, self.url, i, v, self.units]
def __init__(self, kind=None, url=None, values=None, units=None):
self.kind = kind
self.url = url
self.values = values or []
self.units = units
def __str__(self):
return 'TR({kind},{url},{values},{units})'.format(**self.__dict__)
__repr__ = __str__
def parse_telemetry_results(filepath):
results = []
artifacts = []
with open(filepath) as fh:
for line in fh:
match = RESULT_REGEX.search(line)
if match:
result = TelemetryResult()
result.kind = match.group(1)
result.url = match.group(2)
if match.group(4):
result.values = map(numeric, match.group(4).split(','))
else:
result.values = [numeric(match.group(5))]
result.units = match.group(6)
results.append(result)
match = TRACE_REGEX.search(line)
if match:
artifacts.append(match.group(1))
return results, artifacts
if __name__ == '__main__':
import sys # pylint: disable=wrong-import-order,wrong-import-position
from pprint import pprint # pylint: disable=wrong-import-order,wrong-import-position
path = sys.argv[1]
pprint(parse_telemetry_results(path))
```
#### File: workloads/video/__init__.py
```python
import os
import urllib
from collections import defaultdict
from wlauto import Workload, settings, Parameter, Alias
from wlauto.exceptions import ConfigError, WorkloadError
from wlauto.utils.misc import ensure_directory_exists as _d
from wlauto.utils.types import boolean
DOWNLOAD_URLS = {
'1080p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_1080p_surround.avi',
'720p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_surround.avi',
'480p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_480p_surround-fix.avi'
}
class VideoWorkload(Workload):
name = 'video'
description = """
Plays a video file using the standard android video player for a predetermined duration.
The video can be specified either using ``resolution`` workload parameter, in which case
`Big Buck Bunny`_ MP4 video of that resolution will be downloaded and used, or using
``filename`` parameter, in which case the video file specified will be used.
.. _Big Buck Bunny: http://www.bigbuckbunny.org/
"""
supported_platforms = ['android']
parameters = [
Parameter('play_duration', kind=int, default=20,
description='Playback duration of the video file. This become the duration of the workload.'),
Parameter('resolution', default='720p', allowed_values=['480p', '720p', '1080p'],
description='Specifies which resolution video file to play.'),
Parameter('filename',
description="""
The name of the video file to play. This can be either a path
to the file anywhere on your file system, or it could be just a
name, in which case, the workload will look for it in
``~/.workloads_automation/dependency/video``
*Note*: either resolution or filename should be specified, but not both!
"""),
Parameter('force_dependency_push', kind=boolean, default=False,
description="""
If true, video will always be pushed to device, regardless
of whether the file is already on the device. Default is ``False``.
"""),
]
aliases = [
Alias('video_720p', resolution='720p'),
Alias('video_1080p', resolution='1080p'),
]
@property
def host_video_file(self):
if not self._selected_file:
if self.filename:
if self.filename[0] in './' or len(self.filename) > 1 and self.filename[1] == ':':
filepath = os.path.abspath(self.filename)
else:
filepath = os.path.join(self.video_directory, self.filename)
if not os.path.isfile(filepath):
raise WorkloadError('{} does not exist.'.format(filepath))
self._selected_file = filepath
else:
files = self.video_files[self.resolution]
if not files:
url = DOWNLOAD_URLS[self.resolution]
filepath = os.path.join(self.video_directory, os.path.basename(url))
self.logger.debug('Downloading {}...'.format(filepath))
urllib.urlretrieve(url, filepath)
self._selected_file = filepath
else:
self._selected_file = files[0]
if len(files) > 1:
self.logger.warn('Multiple files for 720p found. Using {}.'.format(self._selected_file))
self.logger.warn('Use \'filename\'parameter instead of \'resolution\' to specify a different file.')
return self._selected_file
def init_resources(self, context):
self.video_directory = _d(os.path.join(settings.dependencies_directory, 'video'))
self.video_files = defaultdict(list)
self.enum_video_files()
self._selected_file = None
def setup(self, context):
on_device_video_file = os.path.join(self.device.working_directory, os.path.basename(self.host_video_file))
if self.force_dependency_push or not self.device.file_exists(on_device_video_file):
self.logger.debug('Copying {} to device.'.format(self.host_video_file))
self.device.push_file(self.host_video_file, on_device_video_file, timeout=120)
self.device.clear_logcat()
command = 'am start -W -S -n com.android.gallery3d/.app.MovieActivity -d {}'.format(on_device_video_file)
self.device.execute(command)
def run(self, context):
self.device.sleep(self.play_duration)
def update_result(self, context):
self.device.execute('am force-stop com.android.gallery3d')
def teardown(self, context):
pass
def validate(self):
if (self.resolution and self.filename) and (self.resolution != self.parameters['resolution'].default):
raise ConfigError('Ether resolution *or* filename must be specified; but not both.')
def enum_video_files(self):
for filename in os.listdir(self.video_directory):
for resolution in self.parameters['resolution'].allowed_values:
if resolution in filename:
self.video_files[resolution].append(os.path.join(self.video_directory, filename))
``` |
{
"source": "Joes-BitGit/LearnPython",
"score": 4
} |
#### File: Projects/Numbers/fizzbuzz.py
```python
def fizzBuzz(n: int):
nums = []
# hashmap to hold divisible numbers
# makes it much easier to add any other numbers needed to be divisible
fizzbuzz_dict = {3:'Fizz', 5:'Buzz', 7: 'Jazz'}
# loop through until n
for i in range(1,n+1):
ans_str = ''
# iterate over the dictionary to check if their are any numbers divisible
for key in fizzbuzz_dict.keys():
# concatenate if necessary
# eg if n == 15
# 3 -> 'Fizz' then ans_str = 'Fizz'
# 5 -> 'Buzz' then ans_str = 'FizzBuzz'
if i % key == 0:
ans_str += fizzbuzz_dict[key]
# if the string is empty then the number was not divisible
# by anything in the hashmap
if not ans_str:
ans_str = str(i)
nums.append(ans_str)
return nums
if __name__ == '__main__':
print(fizzBuzz(105))
``` |
{
"source": "Joes-BitGit/Leetcode",
"score": 4
} |
#### File: Leetcode/leetcode/best_time_buy_sell_2.py
```python
class Solution:
def maxProfit(self, prices: List[int]) -> int:
'''
Time: O(N), only need to iterate over the array once
Space: O(1), constant space no extra data structures or stack frames
'''
"""
Suppose the first sequence is "a <= b <= c <= d",
the profit is "d - a = (b - a) + (c - b) + (d - c)" without a doubt.
And suppose another one is "a <= b >= b' <= c <= d",
the profit is not difficult to be figured out as "(b - a) + (d - b')".
So you just target at monotone sequences.
"""
# Edge cases
# if the array is empty or
# array size is less than 1
if not prices or len(prices) <= 1:
return 0
max_profit = 0
# iterate over the array-1 for the index out of bounds
for i in range(1,len(prices)):
# if the current price is > the prev prices
if prices[i] > prices[i-1]:
# then profit can be made
max_profit += prices[i] - prices[i-1]
return max_profit
```
#### File: Leetcode/leetcode/constrct_bst_preorder.py
```python
class Solution:
def bstFromPreorder(self, preorder: List[int]) -> TreeNode:
'''
Time: O(N), Must iterate over every element of the list
Space: O(N), max space needed for recursive stack
'''
self.index = 0
return self.bst_build(preorder, float('inf'))
def bst_build(self, preorder, limit):
# if we have run out of numbers in list
# or the curr val is larger than the limit
if self.index >= len(preorder) or preorder[self.index] > limit:
return None
root = TreeNode(preorder[self.index])
# inc to move with the list
self.index += 1
# must not be larger than the parent value
root.left = self.bst_build(preorder, root.val)
# must not be larger than the parents limit
root.right = self.bst_build(preorder, limit)
return root
```
#### File: Leetcode/leetcode/contig_array.py
```python
class Solution:
def findMaxLength(self, nums: List[int]) -> int:
'''
Time: O(N), where N is the nums in list
Space: O(N), for the hashmap
'''
# hashtable to put the size of count at each index
counts = {}
# intialize hashmap when their are no entries
counts[0] = -1
max_length = 0
count = 0
for i in range(len(nums)):
if nums[i] == 1:
count += 1
else:
count += -1
if count in counts:
# find the max between the current max
# and curr index - prev index
max_length = max(max_length, i - counts[count])
else:
# when the counter is not in the hashmap
# create an entry
counts[count] = i
return max_length
```
#### File: Leetcode/leetcode/contiguous_array.py
```python
class Solution:
'''
Time: O(N), where N is the len of the given array, must iterate over the array at least once
Space: O(N), space needed for the hashmap of the counts
'''
def findMaxLength(self, nums: List[int]) -> int:
# hashtable to put the size of count at each index
counts = {}
# intialize hashmap when their are no entries
counts[0] = -1
max_length = 0
count = 0
for i in range(len(nums)):
if nums[i] == 1:
count += 1
else:
count += -1
if count in counts:
# find the max between the current max
# and curr index - originally seen index
max_length = max(max_length, i - counts[count])
else:
# when the counter is not in the hashmap
# create an entry
counts[count] = i
return max_length
```
#### File: Leetcode/leetcode/first_unique_char.py
```python
class Solution:
'''
Time: O(N), only need to loop through the string twice
Space: O(1), the hashtable can't grow larger than 26 letters
'''
def firstUniqChar(self, s: str) -> int:
first_unique = {}
# fill hashtable
for i in s:
if i in first_unique:
first_unique[i] += 1
else:
first_unique[i] = 1
# iterate over the string again to check in the hashtable
# need to look for the only unique char
for i in range(len(s)):
if first_unique[s[i]] == 1:
return i
# if we read the entire string and not found
return -1
```
#### File: Leetcode/leetcode/group_anagrams.py
```python
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
'''
Time: O(N * M log M), N is the strings in strs,
M is the char in the string
Space: O(NM), dict grows as the num words grows total info content
'''
# create dictionary of a list
table = {}
for word in strs:
# sort the word
# use tuple for immutable object as key in dict
key = tuple(sorted(word))
# when the key is not found add [] + [word] = [word]
# else add current value + [word] = [...,word]
table[key] = table.get(key,[]) + [word]
return table.values()
```
#### File: Leetcode/leetcode/interval_list_intersections.py
```python
class Solution:
'''
Time: O(N + M), N is the A and M the list of B
Space: O(1), since output array is not considered in complexity
'''
def intervalIntersection(self, A: List[List[int]], B: List[List[int]]) -> List[List[int]]:
result = []
if not A or not B or len(A) == 0 or len(B) == 0:
return result
a_len = len(A)
b_len = len(B)
a_ptr, b_ptr = 0, 0
while a_ptr < a_len and b_ptr < b_len:
# sets the first pair as curr
curr_a = A[a_ptr]
curr_b = B[b_ptr]
# searches for the smallest gap
# l chooses the one that is closer to the end
l = max(curr_a[0], curr_b[0])
# r chooses the one that is closer to the beginning
r = min(curr_a[1], curr_b[1])
# updates result only if the ptrs have not crossed
if l <= r:
result.append([l, r])
# updates ptrs of ranges
if curr_a[1] < curr_b[1]:
a_ptr += 1
elif curr_a[1] > curr_b[1]:
b_ptr += 1
else:
a_ptr += 1
b_ptr += 1
return result
```
#### File: Leetcode/leetcode/max_path_sum.py
```python
class Solution:
def maxPathSum(self, root: TreeNode) -> int:
'''
Time: O(N), number of nodes iterated once
Space: O(H), stack frames goes height level deep at most
'''
self.max_path_sum = float('-inf')
self.path_sum(root)
return self.max_path_sum
def path_sum(self, node):
if not node:
return 0
# doesn't return negatives
left = max(0, self.path_sum(node.left))
right = max(0, self.path_sum(node.right))
# checks the triangle (left parent right) compares it to the global max
# if its larger than our global then we have a path: left parent right
self.max_path_sum = max(self.max_path_sum, left + right + node.val)
# returns either the left track or the right track
# can only choose one for the next level of the stack frame
return max(left, right) + node.val
```
#### File: Leetcode/leetcode/movezeroes.py
```python
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
'''
Time: O(N), worst case no 0s have to iterate over the entire array
Space: O(1)
'''
"""
Do not return anything, modify nums in-place instead.
"""
# 2 pointers
# non zero ptr
n = 0
# array ptr
a = 0
"""
Mistake was using the i as the other pointer
should be its own pointer as in
"""
# iterate over the array
for i in range(len(nums)):
# if the non zero ptr is pointing at a non zero number
if nums[n] != 0:
# swap nonzero and array ptr
nums[n], nums[a] = nums[a], nums[n]
# inc nonzero ptr
n += 1
# inc array ptr
a += 1
# nonzero ptr is pointing at zero number
else:
# inc the non zero ptr
n += 1
```
#### File: Leetcode/leetcode/possible_bipartition.py
```python
class Solution:
'''
Time: O(D + N), where D is the length of disliked array and N is the input
Space: O(D + N)
'''
def possibleBipartition(self, N: int, dislikes: List[List[int]]) -> bool:
NO_COLOR, BLUE, RED = 0, 1, -1
# dfs
def blacklisted(person, color):
# assign the person the new color
colors[person] = color
# person2 is each person disliked by current person
for person2 in blacklist[person]:
# if the disliked person has the same color
# this means that person2 conflicts with curr person
if colors[person2] == color:
return False
# if the disliked person color has not been seen
# then assign the disliked person the oppo color to search for
if colors[person2] == NO_COLOR and (not blacklisted(person2, -color)):
return False
return True
# quick answers for simple cases
if N == 1 or not dislikes:
return True
# dictionary that holds each disliked person
blacklist = collections.defaultdict(list)
# holds the color values of each person
colors = [NO_COLOR for _ in range(N+1)]
# fills the dictionary up with disliked persons
for p1, p2 in dislikes:
blacklist[p1].append(p2)
blacklist[p2].append(p1)
for person in range(1, N+1):
# if the current person has not been seen and not been disliked
if colors[person] == NO_COLOR and (not blacklisted(person, BLUE)):
return False
return True
```
#### File: Leetcode/leetcode/remove_k_digits.py
```python
class Solution:
'''
Time: O(N), must iterate over the entire string
Space: O(N), size of the stack used grows as the input grows
'''
def removeKdigits(self, num: str, k: int) -> str:
stack = []
max_stack = len(num) - k
# edge case
if max_stack == 0:
return '0'
for i in num:
# stack is not empty
# the top element > curr
# k has not been satisfied
while stack and i < stack[-1] and k > 0:
k -= 1
stack.pop()
stack.append(i)
# if there are still digits to delete
# remove the top one(s)
if k > 0:
stack = stack[:-k]
# delete leading 0s if they exist
return "".join(stack).lstrip("0") or '0'
```
#### File: Leetcode/leetcode/single_elem_sorted_arr.py
```python
class Solution:
'''
Time: O(LOG N), Modified binary search
Space: O(1)
'''
def singleNonDuplicate(self, nums: List[int]) -> int:
if len(nums) == 1:
return nums[0]
l = 0
r = len(nums)-1
while l < r:
mid = l + (r-l)//2
parity = mid % 2
# mid is even
if parity == 0:
if nums[mid] == nums[mid+1]:
# search right
l = mid+2
else:
# search left
r = mid
# mid is odd
else:
if nums[mid] == nums[mid+1]:
# search left
r = mid
else:
# search right
l = mid+1
# l should always be at the start of a pair
return nums[l]
```
#### File: Leetcode/leetcode/uncrossed_lines.py
```python
class Solution:
'''
Time; O(A*B), Must use a double for loop to iterate over the two arrays and compare
Space: O(A*B), space needed for the dp array
'''
def maxUncrossedLines(self, A: List[int], B: List[int]) -> int:
# edge cases
if not A or not B:
return None
a_len = len(A)
b_len = len(B)
dp = [[0 for j in range(a_len + 1)] for i in range(b_len+1)]
result = float('-inf')
for i in range(1, b_len+1):
for j in range(1, a_len+1):
if B[i-1] == A[j-1]:
dp[i][j] = 1 + dp[i-1][j-1]
else:
dp[i][j] = max(dp[i][j-1], dp[i-1][j])
result = max(result, dp[i][j])
return result
```
#### File: Leetcode/leetcode/valid_paren.py
```python
# Any right parenthesis ')' must have a corresponding left parenthesis '('.
# Left parenthesis '(' must go before the corresponding right parenthesis ')'.
# '*' could be treated as a single right parenthesis ')'
# or a single left parenthesis '(' or an empty string.
# An empty string is also valid.
# EXAMPLE 1:
# Input: "()"
# Output: True
# EXAMPLE 2:
# Input: "(*))"
# Output: True
class Solution:
def checkValidString(self, s: str) -> bool:
'''
Time: O(N), where N is the length of the string
Space: O(1), constant space no aux space used
'''
# Greedy Algorithm
# increments at '(' dec for ')'
cmin = 0
# incs '(' and '*' decs for ')'
cmax = 0
for i in s:
if i == '(':
cmax += 1
cmin += 1
if i == ')':
cmax -= 1
# not including itself find the max between cmin-1 and 0
# this makes sure cmin is not negative
cmin = max(cmin - 1, 0)
if i == '*':
cmax += 1
cmin = max(cmin - 1, 0)
if cmax < 0:
return False
return cmin == 0
``` |
{
"source": "Joes-BitGit/school_dashboard",
"score": 2
} |
#### File: venv/webapp/app.py
```python
from flask import Flask, render_template, request, jsonify
app = Flask(__name__)
@app.route('/')
@app.route('/login')
def home():
return render_template('index.html')
@app.route('/student/dash')
def student_dash():
return render_template('student_dash.html')
@app.route('/professor/dash')
def professor_dash():
return render_template('professor_dash.html')
@app.route('/student/class')
def student_class():
return render_template('student_class.html')
@app.route('/professor/class')
def professor_class():
return render_template('professor_class.html')
@app.route('/student/<username>')
def profile(username):
return render_template('username.html', username=username)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "joesecurity/jbxapi",
"score": 2
} |
#### File: joesecurity/jbxapi/jbxapi.py
```python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import os
import sys
import io
import json
import copy
import argparse
import time
import itertools
import random
import errno
import shutil
import tempfile
import math
try:
import requests
except ImportError:
print("Please install the Python 'requests' package via pip", file=sys.stderr)
sys.exit(1)
__version__ = "3.17.2"
# API URL.
API_URL = "https://jbxcloud.joesecurity.org/api"
# for on-premise installations, use the following URL:
# API_URL = "http://" + webserveraddress + "/joesandbox/index.php/api"
# APIKEY, to generate goto user settings - API key
API_KEY = ""
# (for Joe Sandbox Cloud only)
# Set to True if you agree to the Terms and Conditions.
# https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
ACCEPT_TAC = False
# default submission parameters
# when specifying None, the server decides
UnsetBool = object()
submission_defaults = {
# system selection, set to None for automatic selection
# 'systems': ('w7', 'w7x64'),
'systems': None,
# comment for an analysis
'comments': None,
# maximum analysis time
'analysis-time': None,
# password for decrypting documents like MS Office and PDFs
'document-password': None,
# This password will be used to decrypt archives (zip, 7z, rar etc.). Default password is "<PASSWORD>".
'archive-password': None,
# Will start the sample with the given command-line argument. Currently only available for Windows analyzers.
'command-line-argument': None,
# country for routing internet through
'localized-internet-country': None,
# tags
'tags': None,
# enable internet access during analysis
'internet-access': UnsetBool,
# enable internet simulation during analysis
'internet-simulation': UnsetBool,
# lookup samples in the report cache
'report-cache': UnsetBool,
# hybrid code analysis
'hybrid-code-analysis': UnsetBool,
# hybrid decompilation
'hybrid-decompilation': UnsetBool,
# inspect ssl traffic
'ssl-inspection': UnsetBool,
# instrumentation of vba scripts
'vba-instrumentation': UnsetBool,
# instrumentation of javascript
'js-instrumentation': UnsetBool,
# traces Java JAR files
'java-jar-tracing': UnsetBool,
# traces .Net files
'dotnet-tracing': UnsetBool,
# send an e-mail upon completion of the analysis
'email-notification': UnsetBool,
# starts the Sample with normal user privileges
'start-as-normal-user': UnsetBool,
# Set the system date for the analysis. Format is YYYY-MM-DD
'system-date': None,
# changes the locale, location, and keyboard layout of the analysis machine
'language-and-locale': None,
# Do not unpack archive files (zip, 7zip etc).
'archive-no-unpack': UnsetBool,
# Enable Hypervisor based Inspection
"hypervisor-based-inspection": UnsetBool,
# select fast mode for a faster but less thorough analysis
'fast-mode': UnsetBool,
# Enables secondary Results such as Yara rule generation, classification via Joe Sandbox Class as well as several detail reports.
# Analysis will run faster if secondary results are not enabled.
'secondary-results': UnsetBool,
# Perform APK DEX code instrumentation. Only applies to Android analyzer. Default true.
'apk-instrumentation': UnsetBool,
# Perform AMSI unpacking. Only applies to Windows. Default true
'amsi-unpacking': UnsetBool,
# Use live interaction. Requires user interaction via the web UI. Default false
'live-interaction': UnsetBool,
# encryption password for analyses
'encrypt-with-password': None,
# choose the browser for URL analyses
'browser': None,
## JOE SANDBOX CLOUD EXCLUSIVE PARAMETERS
# export the report to Joe Sandbox View
'export-to-jbxview': UnsetBool,
# lookup the reputation of URLs and domains (Requires sending URLs third-party services.)
'url-reputation': UnsetBool,
# Delete the analysis after X days
'delete-after-days': None,
## ON PREMISE EXCLUSIVE PARAMETERS
# priority of submissions
'priority': None,
## DEPRECATED PARAMETERS
'office-files-password': None,
'anti-evasion-date': UnsetBool,
'remote-assistance': UnsetBool,
'remote-assistance-view-only': UnsetBool,
'static-only': UnsetBool,
}
class JoeSandbox(object):
def __init__(self, apikey=None, apiurl=None, accept_tac=None,
timeout=None, verify_ssl=True, retries=3,
proxies=None, user_agent=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
user_agent: The user agent. Use this when you write an integration with Joe Sandbox
so that it is possible to track how often an integration is being used.
"""
if apikey is None:
apikey = os.environ.get("JBX_API_KEY", API_KEY)
if apiurl is None:
apiurl = os.environ.get("JBX_API_URL", API_URL)
if accept_tac is None:
if "JBX_ACCEPT_TAC" in os.environ:
accept_tac = os.environ.get("JBX_ACCEPT_TAC") == "1"
else:
accept_tac = ACCEPT_TAC
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
if user_agent:
user_agent += " (jbxapi.py {})".format(__version__)
else:
user_agent = "jbxapi.py {}".format(__version__)
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": user_agent})
def analysis_list(self):
"""
Fetch a list of all analyses.
Consider using `analysis_list_paged` instead.
"""
return list(self.analysis_list_paged())
def analysis_list_paged(self):
"""
Fetch all analyses. Returns an iterator.
The returned iterator can throw an exception anytime `next()` is called on it.
"""
pagination_next = None
while True:
response = self._post(self.apiurl + '/v2/analysis/list', data={
"apikey": self.apikey,
"pagination": "1",
"pagination_next": pagination_next,
})
data = self._raise_or_extract(response)
for item in data:
yield item
try:
pagination_next = response.json()["pagination"]["next"]
except KeyError:
break
def submit_sample(self, sample, cookbook=None, params={},
_extra_params={}, _chunked_upload=True):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox(user_agent="My Integration")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox(user_agent="My Integration")
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
params = copy.copy(params)
files = {}
self._check_user_parameters(params)
if cookbook:
files['cookbook'] = cookbook
# extract sample name
if isinstance(sample, (tuple, list)):
filename, sample = sample
else: # sample is file-like object
filename = requests.utils.guess_filename(sample) or "sample"
retry_with_regular_upload = False
if _chunked_upload:
orig_pos = sample.tell()
params["chunked-sample"] = filename
try:
response = self._submit(params, files, _extra_params=_extra_params)
self._chunked_upload('/v2/submission/chunked-sample', sample, {
"apikey": self.apikey,
"submission_id": response["submission_id"],
})
except InvalidParameterError as e:
# re-raise if the error is not due to unsupported chunked upload
if "chunked-sample" not in e.message:
raise
retry_with_regular_upload = True
except _ChunkedUploadNotPossible as e:
retry_with_regular_upload = True
if retry_with_regular_upload:
del params["chunked-sample"]
sample.seek(orig_pos)
if not _chunked_upload or retry_with_regular_upload:
files["sample"] = (filename, sample)
response = self._submit(params, files, _extra_params=_extra_params)
return response
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# rename aliases
if 'document-password' in params:
params['office-files-password'] = params.pop('document-password')
# submit booleans as "0" and "1"
for key, value in list(params.items()):
try:
default = submission_defaults[key]
except KeyError:
continue
if default is UnsetBool or isinstance(default, bool):
params[key] = _to_bool(value, default)
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def _chunked_upload(self, url, f, params):
try:
file_size = self._file_size(f)
except (IOError, OSError):
raise _ChunkedUploadNotPossible("The file does not support chunked upload.")
chunk_size = 10 * 1024 * 1024
chunk_count = int(math.ceil(file_size / chunk_size))
params = copy.copy(params)
params.update({
"file-size": file_size,
"chunk-size": chunk_size,
"chunk-count": chunk_count,
})
chunk_index = 1
sent_size = 0
response = None
while sent_size < file_size:
# collect next chunk
chunk_data = io.BytesIO()
chunk_data_len = 0
while chunk_data_len < chunk_size:
read_data = f.read(chunk_size - chunk_data_len)
if read_data is None:
raise _ChunkedUploadNotPossible("Non-blocking files are not supported.")
if len(read_data) <= 0:
break
chunk_data.write(read_data)
chunk_data_len += len(read_data)
params["current-chunk-index"] = chunk_index
params["current-chunk-size"] = chunk_data_len
chunk_index += 1
chunk_data.seek(0)
response = self._post(self.apiurl + url, data=params, files={"chunk": chunk_data})
self._raise_or_extract(response) # raise Exception if the response is negative
sent_size += chunk_data_len
return response
def _file_size(self, f):
"""
Tries to find the size of the file-like object 'f'.
If the file-pointer is advanced (f.tell()) it subtracts this.
Raises ValueError if it fails to do so.
"""
pos = f.tell()
f.seek(0, os.SEEK_END)
end_pos = f.tell()
f.seek(pos, os.SEEK_SET)
return end_pos - pos
def submission_list(self, **kwargs):
"""
Fetch all submissions. Returns an iterator.
You can give the named parameter `include_shared`.
The returned iterator can throw an exception every time `next()` is called on it.
"""
include_shared = kwargs.get("include_shared", None)
pagination_next = None
while True:
response = self._post(self.apiurl + '/v2/submission/list', data={
"apikey": self.apikey,
"pagination_next": pagination_next,
"include-shared": _to_bool(include_shared),
})
data = self._raise_or_extract(response)
for item in data:
yield item
try:
pagination_next = response.json()["pagination"]["next"]
except KeyError:
break
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None, password=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writable file-like object (When omitted, the method returns
the data as a bytes object.)
password: a password for decrypting a resource (see the
encrypt-with-password submission option)
Example:
name, json_report = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
# password-encrypted resources have to be stored in a temp file first
if password:
_decrypted_file = _file
_file = tempfile.TemporaryFile()
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# decrypt temporary file
if password:
_file.seek(0)
self._decrypt(_file, _decrypted_file, password)
_file.close()
_file = _decrypted_file
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def joelab_machine_info(self, machine):
"""
Show JoeLab Machine info.
"""
response = self._post(self.apiurl + "/v2/joelab/machine/info", data={'apikey': self.apikey,
'machine': machine})
return self._raise_or_extract(response)
def joelab_images_list(self, machine):
"""
List available images.
"""
response = self._post(self.apiurl + "/v2/joelab/machine/info", data={'apikey': self.apikey,
'machine': machine})
return self._raise_or_extract(response)
def joelab_images_reset(self, machine, image=None):
"""
Reset the disk image of a machine.
"""
response = self._post(self.apiurl + "/v2/joelab/machine/info", data={'apikey': self.apikey,
'machine': machine,
'accept-tac': "1" if self.accept_tac else "0",
'image': image})
return self._raise_or_extract(response)
def joelab_filesystem_upload(self, machine, file, path=None, _chunked_upload=True):
"""
Upload a file to a Joe Lab machine.
Parameters:
machine The machine id.
file: The file to upload. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
"""
data = {
"apikey": self.apikey,
"accept-tac": "1" if self.accept_tac else "0",
"machine": machine,
"path": path,
}
# extract sample name
if isinstance(file, (tuple, list)):
filename, file = file
else: # sample is file-like object
filename = requests.utils.guess_filename(file) or "file"
retry_with_regular_upload = False
if _chunked_upload:
orig_pos = file.tell()
# filename
try:
response = self._chunked_upload('/v2/joelab/filesystem/upload-chunked', file, data)
except (UnknownEndpointError, _ChunkedUploadNotPossible) as e:
retry_with_regular_upload = True
file.seek(orig_pos)
if not _chunked_upload or retry_with_regular_upload:
files = {"file": (filename, file)}
response = self._post(self.apiurl + '/v2/joelab/filesystem/upload', data=data, files=files)
return self._raise_or_extract(response)
def joelab_filesystem_download(self, machine, path, file):
"""
Download a file from a Joe Lab machine.
Parameters:
machine: The machine id.
path: The path of the file on the Joe Lab machine.
file: a writable file-like object
Example:
with open("myfile.zip", "wb") as f:
joe.joelab_filesystem_download("w7_10", "C:\\windows32\\myfile.zip", f)
"""
data = {'apikey': self.apikey,
'machine': machine,
'path': path}
response = self._post(self.apiurl + "/v2/joelab/filesystem/download", data=data, stream=True)
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
def joelab_network_info(self, machine):
"""
Show Network info
"""
response = self._post(self.apiurl + "/v2/joelab/network/info", data={'apikey': self.apikey,
'machine': machine})
return self._raise_or_extract(response)
def joelab_network_update(self, machine, settings):
"""
Update the network settings.
"""
params = dict(settings)
params["internet-enabled"] = _to_bool(params["internet-enabled"])
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
params['machine'] = machine
response = self._post(self.apiurl + "/v2/joelab/network/update", data=params)
return self._raise_or_extract(response)
def joelab_pcap_start(self, machine):
"""
Start PCAP recording.
"""
params = {
'apikey': self.apikey,
'accept-tac': "1" if self.accept_tac else "0",
'machine': machine,
}
response = self._post(self.apiurl + "/v2/joelab/pcap/start", data=params)
return self._raise_or_extract(response)
def joelab_pcap_stop(self, machine):
"""
Stop PCAP recording.
"""
params = {
'apikey': self.apikey,
'accept-tac': "1" if self.accept_tac else "0",
'machine': machine,
}
response = self._post(self.apiurl + "/v2/joelab/pcap/stop", data=params)
return self._raise_or_extract(response)
def joelab_pcap_download(self, machine, file):
"""
Download the captured PCAP.
Parameters:
machine: The machine id.
file: a writable file-like object
Example:
with open("dump.pcap", "wb") as f:
joe.joelab_pcap_download("w7_10", f)
"""
data = {'apikey': self.apikey,
'machine': machine}
response = self._post(self.apiurl + "/v2/joelab/pcap/download", data=data, stream=True)
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
def joelab_list_exitpoints(self):
"""
List the available internet exit points.
"""
response = self._post(self.apiurl + "/v2/joelab/internet-exitpoints/list", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _decrypt(self, source, target, password):
"""
Decrypt encrypted files downloaded from a Joe Sandbox server.
"""
try:
import pyzipper
except ImportError:
raise NotImplementedError("Decryption requires Python 3 and the pyzipper library.")
try:
with pyzipper.AESZipFile(source) as myzip:
infolist = myzip.infolist()
assert(len(infolist) == 1)
with myzip.open(infolist[0], pwd=password) as zipmember:
shutil.copyfileobj(zipmember, target)
except Exception as e:
raise JoeException(str(e))
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
"""
# convert file names to ASCII for old urllib versions if necessary
_urllib3_fix_filenames(kwargs)
# try the request a few times
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
class JoeException(Exception):
pass
class ConnectionError(JoeException):
pass
class _ChunkedUploadNotPossible(JoeException):
pass
class ApiError(JoeException):
def __new__(cls, raw):
# select a more specific subclass if available
if cls is ApiError:
subclasses = {
2: MissingParameterError,
3: InvalidParameterError,
4: InvalidApiKeyError,
5: ServerOfflineError,
6: InternalServerError,
7: PermissionError,
8: UnknownEndpointError,
}
try:
cls = subclasses[raw["code"]]
except KeyError:
pass
return super(ApiError, cls).__new__(cls, raw["message"])
def __init__(self, raw):
super(ApiError, self).__init__(raw["message"])
self.raw = copy.deepcopy(raw)
self.code = raw["code"]
self.message = raw["message"]
class MissingParameterError(ApiError): pass
class InvalidParameterError(ApiError): pass
class InvalidApiKeyError(ApiError): pass
class ServerOfflineError(ApiError): pass
class InternalServerError(ApiError): pass
class PermissionError(ApiError): pass
class UnknownEndpointError(ApiError): pass
def _cli_bytes_from_str(text):
"""
Python 2/3 compatibility function to ensure that what is sent on the command line
is converted into bytes. In Python 2 this is a no-op.
"""
if isinstance(text, bytes):
return text
else:
return text.encode("utf-8", errors="surrogateescape")
def cli(argv):
def print_json(value, file=sys.stdout):
print(json.dumps(value, indent=4, sort_keys=True), file=file)
def analysis_list(joe, args):
print_json(joe.analysis_list())
def submit(joe, args):
params = {name[6:]: value for name, value in vars(args).items()
if name.startswith("param-") and value is not None}
extra_params = {}
for name, value in args.extra_params:
values = extra_params.setdefault(name, [])
values.append(value)
if args.url_mode:
print_json(joe.submit_url(args.sample, params=params, _extra_params=extra_params))
elif args.sample_url_mode:
print_json(joe.submit_sample_url(args.sample, params=params, _extra_params=extra_params))
else:
try:
f_cookbook = open(args.cookbook, "rb") if args.cookbook is not None else None
def _submit_file(path):
with open(path, "rb") as f:
print_json(joe.submit_sample(f, params=params, _extra_params=extra_params, cookbook=f_cookbook))
if os.path.isdir(args.sample):
for dirpath, _, filenames in os.walk(args.sample):
for filename in filenames:
_submit_file(os.path.join(dirpath, filename))
else:
_submit_file(args.sample)
finally:
if f_cookbook is not None:
f_cookbook.close()
def submission_list(joe, args):
print_json(list(joe.submission_list(include_shared=args.include_shared)))
def submission_info(joe, args):
print_json(joe.submission_info(args.submission_id))
def submission_delete(joe, args):
print_json(joe.submission_delete(args.submission_id))
def server_online(joe, args):
print_json(joe.server_online())
def analysis_info(joe, args):
print_json(joe.analysis_info(args.webid))
def analysis_delete(joe, args):
print_json(joe.analysis_delete(args.webid))
def account_info(joe, args):
print_json(joe.account_info())
def server_info(joe, args):
print_json(joe.server_info())
def server_lia_countries(joe, args):
print_json(joe.server_lia_countries())
def server_languages_and_locales(joe, args):
print_json(joe.server_languages_and_locales())
def analysis_report(joe, args):
(_, report) = joe.analysis_download(args.webid, type="irjsonfixed", run=args.run, password=args.password)
try:
print_json(json.loads(report))
except json.JSONDecodeError as e:
raise JoeException("Invalid response. Is the API url correct?")
def analysis_download(joe, args):
if args.dir is None:
args.dir = args.webid
try:
os.mkdir(args.dir)
except Exception as e:
# ignore if it already exists
if e.errno != errno.EEXIST:
raise
paths = {}
errors = []
for type in args.types:
try:
(filename, data) = joe.analysis_download(args.webid, type=type, run=args.run, password=args.password)
except ApiError as e:
if not args.ignore_errors:
raise
print(e.message, file=sys.stderr)
paths[type] = None
errors.append(e)
continue
path = os.path.join(args.dir, filename)
paths[type] = os.path.abspath(path)
try:
with open(path, "wb") as f:
f.write(data)
except Exception as e:
# delete incomplete data in case of an exception
os.remove(path)
raise
if errors and all(p is None for p in paths.values()):
raise errors[0]
print_json(paths)
def analysis_search(joe, args):
print_json(joe.analysis_search(args.searchterm))
def server_systems(joe, args):
print_json(joe.server_systems())
def joelab_machine_info(joe, args):
print_json(joe.joelab_machine_info(args.machine))
def joelab_filesystem_upload(joe, args):
with open(args.file, "rb") as f:
print_json(joe.joelab_filesystem_upload(args.machine, f, args.path))
def joelab_filesystem_download(joe, args):
output_path = args.destination
if os.path.isdir(output_path):
filename = os.path.basename(args.path.replace("\\", "/"))
output_path = os.path.join(output_path, filename)
with open(output_path, "wb") as f:
joe.joelab_filesystem_download(args.machine, args.path, f)
print_json({"path": os.path.abspath(output_path)})
def joelab_images_list(joe, args):
print_json(joe.joelab_images_list(args.machine))
def joelab_images_reset(joe, args):
print_json(joe.joelab_images_reset(args.machine, args.image))
def joelab_network_info(joe, args):
print_json(joe.joelab_network_info(args.machine))
def joelab_network_update(joe, args):
print_json(joe.joelab_network_update(args.machine, {
"internet-enabled": args.enable_internet,
"internet-exitpoint": args.internet_exitpoint,
}))
def joelab_pcap_start(joe, args):
print_json(joe.joelab_pcap_start(args.machine))
def joelab_pcap_stop(joe, args):
print_json(joe.joelab_pcap_stop(args.machine))
def joelab_pcap_download(joe, args):
output_path = args.destination
if os.path.isdir(output_path):
filename = "{}.pcap".format(args.machine)
output_path = os.path.join(output_path, filename)
with open(output_path, "wb") as f:
joe.joelab_pcap_download(args.machine, f)
print_json({"path": os.path.abspath(output_path)})
def joelab_exitpoints_list(joe, args):
print_json(joe.joelab_list_exitpoints())
# common arguments
common_parser = argparse.ArgumentParser(add_help=False)
common_group = common_parser.add_argument_group("common arguments")
common_group.add_argument('--apiurl',
help="Api Url (You can also set the env. variable JBX_API_URL.)")
common_group.add_argument('--apikey',
help="Api Key (You can also set the env. variable JBX_API_KEY.)")
common_group.add_argument('--accept-tac', action='store_true', default=None,
help="(Joe Sandbox Cloud only): Accept the terms and conditions: "
"https://jbxcloud.joesecurity.org/download/termsandconditions.pdf "
"(You can also set the env. variable ACCEPT_TAC=1.)")
common_group.add_argument('--no-check-certificate', action="store_true",
help="Do not check the server certificate.")
common_group.add_argument('--version', action='store_true',
help="Show version and exit.")
parser = argparse.ArgumentParser(description="Joe Sandbox Web API")
# add subparsers
subparsers = parser.add_subparsers(metavar="<command>", title="commands")
subparsers.required = True
# submit <filepath>
submit_parser = subparsers.add_parser('submit', parents=[common_parser],
usage="%(prog)s [--apiurl APIURL] [--apikey APIKEY] [--accept-tac]\n" +
24 * " " + "[parameters ...]\n" +
24 * " " + "[--url | --sample-url | --cookbook COOKBOOK]\n" +
24 * " " + "sample",
help="Submit a sample to Joe Sandbox.")
submit_parser.add_argument('sample',
help="Path or URL to the sample.")
group = submit_parser.add_argument_group("submission mode")
submission_mode_parser = group.add_mutually_exclusive_group(required=False)
# url submissions
submission_mode_parser.add_argument('--url', dest="url_mode", action="store_true",
help="Analyse the given URL instead of a sample.")
# sample url submissions
submission_mode_parser.add_argument('--sample-url', dest="sample_url_mode", action="store_true",
help="Download the sample from the given url.")
# cookbook submission
submission_mode_parser.add_argument('--cookbook', dest="cookbook",
help="Use the given cookbook.")
submit_parser.add_argument('--param', dest="extra_params", default=[], action="append", nargs=2, metavar=("NAME", "VALUE"),
help="Specify additional parameters.")
submit_parser.set_defaults(func=submit)
params = submit_parser.add_argument_group('analysis parameters')
def add_bool_param(parser, *names, **kwargs):
dest = kwargs.pop("dest")
help = kwargs.pop("help", None)
assert(not kwargs)
negative_names = []
for name in names:
if name.startswith("--no-"):
negative_names.append("-" + name[4:])
else:
negative_names.append("--no-" + name[2:])
parser.add_argument(*names, dest=dest, action="store_true", default=None, help=help)
parser.add_argument(*negative_names, dest=dest, action="store_false", default=None)
params.add_argument("--comments", dest="param-comments", metavar="TEXT",
help="Comment for the analysis.")
params.add_argument("--system", dest="param-systems", action="append", metavar="SYSTEM",
help="Select systems. Can be specified multiple times.")
params.add_argument("--analysis-time", dest="param-analysis-time", metavar="SEC",
help="Analysis time in seconds.")
add_bool_param(params, "--internet", dest="param-internet-access",
help="Enable Internet Access (on by default).")
add_bool_param(params, "--internet-simulation", dest="param-internet-simulation",
help="Enable Internet Simulation. No Internet Access is granted.")
add_bool_param(params, "--cache", dest="param-report-cache",
help="Check cache for a report before analyzing the sample.")
params.add_argument("--document-password", dest="param-document-password", metavar="PASSWORD",
help="Password for decrypting documents like MS Office and PDFs")
params.add_argument("--archive-password", dest="param-archive-password", metavar="PASSWORD",
help="This password will be used to decrypt archives (zip, 7z, rar etc.). Default password is '<PASSWORD>'.")
params.add_argument("--command-line-argument", dest="param-command-line-argument", metavar="TEXT",
help="Will start the sample with the given command-line argument. Currently only available for Windows analyzers.")
add_bool_param(params, "--hca", dest="param-hybrid-code-analysis",
help="Enable hybrid code analysis (on by default).")
add_bool_param(params, "--dec", dest="param-hybrid-decompilation",
help="Enable hybrid decompilation.")
add_bool_param(params, "--ssl-inspection", dest="param-ssl-inspection",
help="Inspect SSL traffic")
add_bool_param(params, "--vbainstr", dest="param-vba-instrumentation",
help="Enable VBA script instrumentation (on by default).")
add_bool_param(params, "--jsinstr", dest="param-js-instrumentation",
help="Enable JavaScript instrumentation (on by default).")
add_bool_param(params, "--java", dest="param-java-jar-tracing",
help="Enable Java JAR tracing (on by default).")
add_bool_param(params, "--net", dest="param-dotnet-tracing",
help="Enable .Net tracing.")
add_bool_param(params, "--normal-user", dest="param-start-as-normal-user",
help="Start sample as normal user.")
params.add_argument("--system-date", dest="param-system-date", metavar="YYYY-MM-DD",
help="Set the system date.")
add_bool_param(params, "--no-unpack", "--archive-no-unpack", dest="param-archive-no-unpack",
help="Do not unpack archive (zip, 7zip etc).")
add_bool_param(params, "--hypervisor-based-inspection", dest="param-hypervisor-based-inspection",
help="Enable Hypervisor based Inspection.")
params.add_argument("--localized-internet-country", "--lia", dest="param-localized-internet-country", metavar="NAME",
help="Country for routing internet traffic through.")
params.add_argument("--language-and-locale", "--langloc", dest="param-language-and-locale", metavar="NAME",
help="Language and locale to be set on Windows analyzer.")
params.add_argument("--tag", dest="param-tags", action="append", metavar="TAG",
help="Add tags to the analysis.")
params.add_argument("--delete-after-days", "--delafter", type=int, dest="param-delete-after-days", metavar="DAYS",
help="Delete analysis after X days.")
params.add_argument("--browser", dest="param-browser", metavar="BROWSER",
help="Browser for URL analyses.")
add_bool_param(params, "--fast-mode", dest="param-fast-mode",
help="Fast Mode focusses on fast analysis and detection versus deep forensic analysis.")
add_bool_param(params, "--secondary-results", dest="param-secondary-results",
help="Enables secondary results such as Yara rule generation, classification via Joe Sandbox Class as "
"well as several detail reports. "
"Analysis will run faster with disabled secondary results.")
add_bool_param(params, "--apk-instrumentation", dest="param-apk-instrumentation",
help="Perform APK DEX code instrumentation. Only applies to Android analyzer. Default on.")
add_bool_param(params, "--amsi-unpacking", dest="param-amsi-unpacking",
help="Perform AMSI unpacking. Only applies to Windows analyzer. Default on.")
add_bool_param(params, "--live-interaction", dest="param-live-interaction",
help="Use live interaction. Requires user interaction via the web UI. "
"Default off.")
params.add_argument("--encrypt-with-password", "--encrypt", type=_cli_bytes_from_str,
dest="param-encrypt-with-password", metavar="PASSWORD",
help="Encrypt the analysis data with the given password")
params.add_argument("--priority", dest="param-priority", type=int,
help="Priority of submission. (Only on on-premise.)")
# deprecated
params.add_argument("--office-pw", dest="param-document-password", metavar="PASSWORD",
help=argparse.SUPPRESS)
add_bool_param(params, "--anti-evasion-date", dest="param-anti-evasion-date",
help=argparse.SUPPRESS)
add_bool_param(params, "--remote-assistance", dest="param-remote-assistance",
help=argparse.SUPPRESS)
# submission <command>
submission_parser = subparsers.add_parser('submission',
help="Manage submissions")
submission_subparsers = submission_parser.add_subparsers(metavar="<submission command>", title="submission commands")
submission_subparsers.required = True
# submission list
submission_list_parser = submission_subparsers.add_parser('list', parents=[common_parser],
help="Show all submitted submissions.")
add_bool_param(submission_list_parser, "--include-shared", dest="include_shared",
help="Include shared submissions")
submission_list_parser.set_defaults(func=submission_list)
# submission info <submission_id>
submission_info_parser = submission_subparsers.add_parser('info', parents=[common_parser],
help="Show info about a submission.")
submission_info_parser.add_argument('submission_id',
help="Id of the submission.")
submission_info_parser.set_defaults(func=submission_info)
# submission delete <submission_id>
submission_delete_parser = submission_subparsers.add_parser('delete', parents=[common_parser],
help="Delete a submission.")
submission_delete_parser.add_argument('submission_id',
help="Id of the submission.")
submission_delete_parser.set_defaults(func=submission_delete)
# analysis <command>
analysis_parser = subparsers.add_parser('analysis',
help="Manage analyses")
analysis_subparsers = analysis_parser.add_subparsers(metavar="<analysis command>", title="analysis commands")
analysis_subparsers.required = True
# analysis info
analysis_info_parser = analysis_subparsers.add_parser('info', parents=[common_parser],
help="Show information about an analysis.")
analysis_info_parser.set_defaults(func=analysis_info)
analysis_info_parser.add_argument('webid',
help="Id of the analysis.")
# analysis delete
analysis_delete_parser = analysis_subparsers.add_parser('delete', parents=[common_parser],
help="Delete an analysis.")
analysis_delete_parser.set_defaults(func=analysis_delete)
analysis_delete_parser.add_argument('webid',
help="Id of the analysis.")
# analysis list
analysis_list_parser = analysis_subparsers.add_parser('list', parents=[common_parser],
help="Show all submitted analyses.")
analysis_list_parser.set_defaults(func=analysis_list)
# analysis search <term>
analysis_search_parser = analysis_subparsers.add_parser('search', parents=[common_parser],
help="Search for analyses.")
analysis_search_parser.add_argument('searchterm',
help="Search term.")
analysis_search_parser.set_defaults(func=analysis_search)
# analysis report <id>
report_parser = analysis_subparsers.add_parser('report', parents=[common_parser],
help="Print the irjsonfixed report.")
report_parser.add_argument('webid',
help="Webid of the analysis.")
report_parser.add_argument('--run', type=int,
help="Select the run.")
report_parser.add_argument('--password', type=_cli_bytes_from_str,
help="Password for decrypting the report (see encrypt-with-password)")
report_parser.set_defaults(func=analysis_report)
# analysis download <id> [resource, resource, ...]
download_parser = analysis_subparsers.add_parser('download', parents=[common_parser],
help="Download resources of an analysis.")
download_parser.add_argument('webid',
help="Webid of the analysis.")
download_parser.add_argument('--dir',
help="Directory to store the reports in. "
"Defaults to <webid> in the current working directory. (Will be created.)")
download_parser.add_argument('--run', type=int,
help="Select the run. Omitting this option lets Joe Sandbox choose a run.")
download_parser.add_argument('--ignore-errors', action="store_true",
help="Report the paths as 'null' instead of aborting on the first error."
" In case no resource can be downloaded, an error is still raised.")
download_parser.add_argument('--password', type=_cli_bytes_from_str,
help="Password for decrypting the report (see encrypt-with-password)")
download_parser.add_argument('types', nargs='*', default=['html'],
help="Resource types to download. Consult the help for all types. "
"(default 'html')")
download_parser.set_defaults(func=analysis_download)
# account <command>
account_parser = subparsers.add_parser('account',
help="Query account info (Cloud Pro only)")
account_subparsers = account_parser.add_subparsers(metavar="<command>", title="account commands")
account_subparsers.required = True
# account info
account_info_parser = account_subparsers.add_parser('info', parents=[common_parser],
help="Show information about the Joe Sandbox Cloud Pro account.")
account_info_parser.set_defaults(func=account_info)
# server
server_parser = subparsers.add_parser('server',
help="Query server info")
server_subparsers = server_parser.add_subparsers(metavar="<server command>", title="server commands")
server_subparsers.required = True
# server online
online_parser = server_subparsers.add_parser('online', parents=[common_parser],
help="Determine whether the Joe Sandbox servers are online or in maintenance mode.")
online_parser.set_defaults(func=server_online)
# server info
server_info_parser = server_subparsers.add_parser('info', parents=[common_parser],
help="Show information about the server.")
server_info_parser.set_defaults(func=server_info)
# server systems
server_systems_parser = server_subparsers.add_parser('systems', parents=[common_parser],
help="List all available systems.")
server_systems_parser.set_defaults(func=server_systems)
# server lia countries
server_lia_parser = server_subparsers.add_parser('lia_countries', parents=[common_parser],
help="Show available localized internet anonymization countries.")
server_lia_parser.set_defaults(func=server_lia_countries)
# server languages_and_locales
server_langloc_parser = server_subparsers.add_parser('languages_and_locales', parents=[common_parser],
help="Show available languages and locales for Windows.")
server_langloc_parser.set_defaults(func=server_languages_and_locales)
# joelab <command>
joelab_parser = subparsers.add_parser('joelab',
help="Joe Lab Commands")
joelab_subparsers = joelab_parser.add_subparsers(metavar="<command>", title="joelab commands")
joelab_subparsers.required = True
# joelab machine <command>
joelab_machine_parser = joelab_subparsers.add_parser('machine',
help="Machine Commands")
joelab_machine_subparsers = joelab_machine_parser.add_subparsers(metavar="<command>", title="machine commands")
joelab_machine_subparsers.required = True
# joelab machine info
joelab_machine_info_parser = joelab_machine_subparsers.add_parser('info', parents=[common_parser],
help="Show machine info")
joelab_machine_info_parser.add_argument("--machine", required=True, help="Joe Lab machine ID")
joelab_machine_info_parser.set_defaults(func=joelab_machine_info)
# joelab filesystem <command>
joelab_filesystem_parser = joelab_subparsers.add_parser('filesystem',
help="Filesystem Commands")
joelab_filesystem_subparsers = joelab_filesystem_parser.add_subparsers(metavar="<command>", title="filesystem commands")
joelab_filesystem_subparsers.required = True
# joelab filesystem upload
joelab_filesystem_upload_parser = joelab_filesystem_subparsers.add_parser('upload', parents=[common_parser],
help="Upload a file to a Joe Lab machine")
joelab_filesystem_upload_parser.add_argument("--machine", required=True, help="Machine ID")
joelab_filesystem_upload_parser.add_argument("file", help="File to upload")
joelab_filesystem_upload_parser.add_argument("--path", help="Path on the Joe Lab machine")
joelab_filesystem_upload_parser.set_defaults(func=joelab_filesystem_upload)
# joelab filesystem download
joelab_filesystem_download_parser = joelab_filesystem_subparsers.add_parser('download', parents=[common_parser],
help="Download a file")
joelab_filesystem_download_parser.add_argument("--machine", required=True, help="Machine ID")
joelab_filesystem_download_parser.add_argument("path", help="Path of file on the Joe Lab machine")
joelab_filesystem_download_parser.add_argument("-d", "--destination", default=".", help="Destination", metavar="PATH")
joelab_filesystem_download_parser.set_defaults(func=joelab_filesystem_download)
# joelab images <command>
joelab_images_parser = joelab_subparsers.add_parser('images',
help="Images Commands")
joelab_images_subparsers = joelab_images_parser.add_subparsers(metavar="<command>", title="images commands")
joelab_images_subparsers.required = True
# joelab images list
joelab_images_list_parser = joelab_images_subparsers.add_parser('list', parents=[common_parser],
help="List the stored images.")
joelab_images_list_parser.add_argument("--machine", required=True, help="Joe Lab machine ID")
joelab_images_list_parser.set_defaults(func=joelab_images_list)
# joelab images reset
joelab_images_reset_parser = joelab_images_subparsers.add_parser('reset', parents=[common_parser],
help="Reset machine to an image")
joelab_images_reset_parser.add_argument("--machine", required=True, help="Joe Lab machine ID")
joelab_images_reset_parser.add_argument("--image", help="Image ID")
joelab_images_reset_parser.set_defaults(func=joelab_images_reset)
# joelab network <command>
joelab_network_parser = joelab_subparsers.add_parser('network',
help="Network Commands")
joelab_network_subparsers = joelab_network_parser.add_subparsers(metavar="<command>", title="network commands")
joelab_network_subparsers.required = True
# joelab network info
joelab_network_info_parser = joelab_network_subparsers.add_parser('info', parents=[common_parser],
help="Get network info")
joelab_network_info_parser.add_argument("--machine", required=True, help="Joe Lab machine ID")
joelab_network_info_parser.set_defaults(func=joelab_network_info)
# joelab network update
joelab_network_update_parser = joelab_network_subparsers.add_parser('update', parents=[common_parser],
help="Update the network settings of a Joe Lab Machine")
joelab_network_update_parser.add_argument("--machine", required=True, help="Joe Lab machine ID")
joelab_network_update_parser.add_argument("--enable-internet", dest="enable_internet", action="store_true", default=None,
help="Enable Internet")
joelab_network_update_parser.add_argument("--disable-internet", dest="enable_internet", action="store_false", default=None)
joelab_network_update_parser.add_argument("--internet-exitpoint")
joelab_network_update_parser.set_defaults(func=joelab_network_update)
# joelab pcap <command>
joelab_pcap_parser = joelab_subparsers.add_parser('pcap',
help="PCAP Commands")
joelab_pcap_subparsers = joelab_pcap_parser.add_subparsers(metavar="<command>", title="PCAP commands")
joelab_pcap_subparsers.required = True
# joelab pcap download
joelab_pcap_download_parser = joelab_pcap_subparsers.add_parser('download', parents=[common_parser],
help="Download the most recent PCAP")
joelab_pcap_download_parser.add_argument("--machine", required=True, help="Joe Lab machine ID")
joelab_pcap_download_parser.add_argument("-d", "--destination", default=".", help="Destination", metavar="PATH")
joelab_pcap_download_parser.set_defaults(func=joelab_pcap_download)
# joelab pcap start
joelab_pcap_start_parser = joelab_pcap_subparsers.add_parser('start', parents=[common_parser],
help="Start PCAP recodring")
joelab_pcap_start_parser.add_argument("--machine", required=True, help="Joe Lab machine ID")
joelab_pcap_start_parser.set_defaults(func=joelab_pcap_start)
# joelab pcap stop
joelab_pcap_stop_parser = joelab_pcap_subparsers.add_parser('stop', parents=[common_parser],
help="Stop PCAP recording")
joelab_pcap_stop_parser.add_argument("--machine", required=True, help="Joe Lab machine ID")
joelab_pcap_stop_parser.set_defaults(func=joelab_pcap_stop)
# joelab internet-exitpoints <command>
joelab_exitpoints_parser = joelab_subparsers.add_parser('internet-exitpoints',
help="Exitpoints Commands")
joelab_exitpoints_subparsers = joelab_exitpoints_parser.add_subparsers(metavar="<command>", title="internet exitpoints commands")
joelab_exitpoints_subparsers.required = True
# joelab internet-exitpoints list
joelab_exitpoints_list_parser = joelab_exitpoints_subparsers.add_parser('list', parents=[common_parser],
help="List the available internet exitpoints")
joelab_exitpoints_list_parser.set_defaults(func=joelab_exitpoints_list)
# Parse common args first, this allows
# i.e. jbxapi.py --apikey 1234 list
# and jbxapi.py list --apikey 1234
common_args, remaining = common_parser.parse_known_args(argv)
if common_args.version:
print(__version__)
sys.exit()
args = parser.parse_args(remaining)
# overwrite args with common_args
vars(args).update(vars(common_args))
# run command
joe = JoeSandbox(apikey=args.apikey,
apiurl=args.apiurl,
accept_tac=args.accept_tac,
user_agent="CLI",
verify_ssl=not args.no_check_certificate)
try:
args.func(joe, args)
except ApiError as e:
print_json(e.raw)
sys.exit(e.code + 100) # api errors start from 100
except ConnectionError as e:
print_json({
"code": 1,
"message": str(e),
})
sys.exit(3)
except (OSError, IOError) as e:
print_json({
"code": 1,
"message": str(e),
})
sys.exit(4)
except JoeException as e:
print_json({
"code": 1,
"message": str(e),
})
sys.exit(5)
def main(argv=None):
# Workaround for a bug in Python 2.7 where sys.argv arguments are converted to ASCII and
# non-ascii characters are replaced with '?'.
#
# https://bugs.python.org/issue2128
# https://stackoverflow.com/q/846850/
if sys.version_info[0] == 2 and sys.platform.startswith('win32'):
def win32_unicode_argv():
"""Uses shell32.GetCommandLineArgvW to get sys.argv as a list of Unicode strings.
"""
from ctypes import POINTER, byref, cdll, c_int, windll
from ctypes.wintypes import LPCWSTR, LPWSTR
GetCommandLineW = cdll.kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = LPCWSTR
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPWSTR)
cmd = GetCommandLineW()
argc = c_int(0)
argv = CommandLineToArgvW(cmd, byref(argc))
if argc.value > 0:
# Remove Python executable and commands if present
start = argc.value - len(sys.argv)
return [argv[i] for i in
xrange(start, argc.value)]
sys.argv = win32_unicode_argv()
cli(argv if argv is not None else sys.argv[1:])
def _to_bool(value, default=None):
"""
Booleans should be submitted as "0" or "1". They can also be missing.
Returns "0", "1" or `None`
"""
if value is None or value is UnsetBool:
value = default
if value is None or value is UnsetBool:
return None
else:
return "1" if value else "0"
def _urllib3_fix_filenames(kwargs):
"""
Remove non-ASCII characters from file names due to a limitation of the combination of
urllib3 (via python-requests) and our server
https://github.com/requests/requests/issues/2117
Internal Ticket #3090
"""
import urllib3
# fixed in urllib3 1.25.2
# https://github.com/urllib3/urllib3/pull/1492
try:
urllib_version = [int(p) for p in urllib3.__version__.split(".")]
except Exception:
print("Error parsing urllib version: " + urllib3.__version__, file=sys.stderr)
return
if urllib_version >= [1, 25, 2]:
return
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
if __name__ == "__main__":
main()
``` |
{
"source": "joeseggie/demo_bankaccount",
"score": 2
} |
#### File: demo_bankaccount/account/permissions.py
```python
from uuid import UUID
from django.db.models import Sum
from django.utils.datetime_safe import datetime
from rest_framework import permissions
from rest_framework.generics import get_object_or_404
from account.exceptions import ForbiddenWithdrawException
from account.exceptions import InsufficientBalanceException
from account.models import Transaction, Account
from common.constants import WITHDRAW_THRESHOLD
class MaximumWithdrawPermission(permissions.BasePermission):
def has_permission(self, request, view):
withdraw_amount_input = request.data['amount']
if withdraw_amount_input:
withdraw_amount: int = int(withdraw_amount_input)
withdraw_sum_dict: dict = Transaction.objects\
.filter(
logged__year=datetime.today().year,
logged__month=datetime.today().month,
logged__day=datetime.today().day,
transaction_type='withdraw',
account_id=request.data['account_id']
)\
.aggregate(Sum('amount'))
current_withdraw: int = int(withdraw_sum_dict['amount__sum']) \
if withdraw_sum_dict['amount__sum'] else 0
if (current_withdraw + withdraw_amount) <= WITHDRAW_THRESHOLD:
return True
raise ForbiddenWithdrawException
class InsufficientBalancePermission(permissions.BasePermission):
def has_permission(self, request, view):
withdraw_amount: int = int(request.data['amount'])
account_id: UUID = UUID(request.data['account_id'])
account: Account = get_object_or_404(Account, pk=account_id)
if withdraw_amount > account.balance:
raise InsufficientBalanceException
return True
``` |
{
"source": "joeseggie/fingerprint_api_proxy",
"score": 3
} |
#### File: fingerprint_api_proxy/app/soapclientbuilder.py
```python
from app import app
import requests
from requests.auth import HTTPBasicAuth
from bs4 import BeautifulSoup
class SoapClientBuilder():
"""Class the builds the soap client"""
def __init__(self):
"""Constructor"""
self.fingerprint_api_wsdl = app.config['FINGERPRINT_WSDL']
self.api_username = app.config['USERNAME']
self.api_password = app.config['PASSWORD']
def build_request_body(self, msisdn, probe_minutiae, candidate_template):
"""Builds the soap request the is going to be sent
Arguments:
msisdn {str} -- MSISDN being registered
probe_minutiae {str} -- base64 encoded fingerprint from ID
candidate_template {str} -- base64 encoded template of scanned fingerprint
Returns:
text/xml soap request formatted string
"""
soap_body = '<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:end="http://endpoint.tlc.com/"><soapenv:Header/><soapenv:Body><end:verifier><!--Optional:--><parameter><msisdn>{0}</msisdn><probeMinutiae>{1}</probeMinutiae><candidateTemplate>{2}</candidateTemplate></parameter></end:verifier></soapenv:Body></soapenv:Envelope>'.format(msisdn, probe_minutiae, candidate_template)
return soap_body
def send_request(self, request_body):
"""Sends request to process the soap request
Argument(s):
request_body {str} -- text/xml formatted string
Returns:
Response from SOAP API as string
"""
url = self.fingerprint_api_wsdl
headers = {'Context-Type': 'text/xml'}
username = self.api_username
password = self.api_password
auth = HTTPBasicAuth(username=username, password=password)
api_response = requests.post(url, data=request_body, headers=headers, auth=auth)
return api_response.content.decode('utf-8')
def parse_response(self, soap_response):
"""Parse the SOAP xml response
Argument(s):
soap_response {str} -- Response returned from the soap request
Returns:
Dictionary of the response values
"""
soup = BeautifulSoup(soap_response, 'lxml-xml')
return {
'ReferenceId': soup.referenceid.string,
'Msisdn': soup.msisdn.string,
'Threshold': soup.threshold.string,
'Score': soup.score.string,
'MatchingResult': soup.matchingResult.string,
'KycUpdateStatus': soup.kycUpdateStatus.string,
'Message': soup.message.string
}
``` |
{
"source": "joeseggie/resourceidea",
"score": 2
} |
#### File: app/auth/views.py
```python
import os
from flask import Blueprint
from flask import render_template
import jwt
from app.common.utils import generate_hash
from app.user.repositories import UserRepository
from app.user.utils import confirm_email
auth_views_bp = Blueprint('auth_view', __name__)
@auth_views_bp.route('/confirmation/<token>')
def email_confirmation(token: str):
"""
Email confirmation route.
"""
key = os.environ.get('SECRET_KEY')
token_data = jwt.decode(token, key)
user_id = token_data['user_id']
user = UserRepository.get_one_by_id(user_id)
if user:
token_hash = generate_hash(email=user.email, user_id=user.id)
if token_hash == token_data['hash']:
confirm = {'email_confirmed': True}
result = confirm_email(user.id, **confirm)
if result.email_confirmed:
return render_template('confirmed_email.html')
return render_template('invalid_confirmation_token.html')
```
#### File: app/client/endpoints.py
```python
from flask import Blueprint
from flask import request
from flask_restful import Api
from flask_restful import Resource
from app.client.schemas import ClientSchema
from app.client.utils import create_client
client_bp = Blueprint('client', __name__)
client_api = Api(client_bp)
URL_PREFIX = '/clients'
class ClientsResource(Resource):
"""Clients resource"""
status_code = 200
def post(self):
"""HTTP POST method handler."""
payload = request.json
valid_input = ClientSchema(strict=True).load(payload).data
new_client = create_client(**valid_input)
output = ClientSchema(strict=True).dump(new_client).data
self.status_code = 201
return output, self.status_code
client_api.add_resource(ClientsResource)
```
#### File: app/client_industry/repositories.py
```python
import re
from typing import Tuple
from sqlalchemy.dialects.postgresql import UUID
from app.client_industry.models import ClientIndustry
from app.common.base_repository import BaseRepository
class ClientIndustryRepository(BaseRepository):
"""Client Industry repository"""
model_class = ClientIndustry
@classmethod
def _name_slug(cls, name: str) -> str:
"""
Get the slug of a client industry's name.
Args:
name (str): Client industry's name.
Returns:
str: Name slug formatted to remove all
non-alphanumeric characters.
"""
return '-'.join(re.split(r'\W', name.lower()))
@classmethod
def add(cls, model: model_class) -> model_class:
"""
Create new client industry.
Args:
model (ClientIndustry): New client industry to add.
Returns:
ClientIndustry: Client industry that has been added.
Raises:
ValueError if client industry name already exists.
"""
model.name_slug = cls._name_slug(model.name)
return cls.create(model)
@classmethod
def update(cls,
client_industry_id: UUID,
update_fields: Tuple,
**updates) -> model_class:
"""
Update client industry.
Args:
client_industry_id (UUID): Client industry ID.
update_fields (List, Tuple): Fields to be updated.
updates: Data to update.
Returns:
Updated client industry.
Raises:
ValueError if client industry update name already exists.
"""
if 'name' in updates:
updates['name_slug'] = cls._name_slug(updates['name'])
if 'name_slug' not in update_fields:
update_fields = update_fields + ('name_slug', )
return cls.update_by_id(
model_id=client_industry_id,
fields_for_update=update_fields,
**updates)
@classmethod
def list_client_industries(cls) -> list:
"""
List client industries.
Returns:
List of client industries.
"""
return cls.get_all()
```
#### File: app/engagement/endpoints.py
```python
from uuid import UUID
from flask import Blueprint
from flask import request
from flask_restful import Api
from flask_restful import Resource
from app.engagement.schemas import EngagementSchema
from app.engagement.utils import create_engagement
from app.engagement.utils import update_engagement
engagement_bp = Blueprint('engagement', __name__)
engagement_api = Api(engagement_bp)
URL_PREFIX = '/engagements'
class EngagementsResource(Resource):
"""Engagements resource"""
status_code = 200
def post(self):
"""HTTP POST method handler."""
payload = request.json
valid_input = EngagementSchema(strict=True).load(payload).data
new_engagement = create_engagement(**valid_input)
output = EngagementSchema(strict=True).dump(new_engagement).data
self.status_code = 201
return output, self.status_code
class EngagementResource(Resource):
"""Engagement Resource"""
status_code = 200
def put(self, resource_id: str):
"""
HTTP PUT method handler.
Args:
resource_id (str): Resource ID.
"""
payload = request.json
valid_input = EngagementSchema(strict=True).load(payload).data
engagement_update = update_engagement(
engagement_id=UUID(resource_id),
**valid_input)
output = EngagementSchema(strict=True).dump(engagement_update).data
return output, self.status_code
engagement_api.add_resource(EngagementsResource, URL_PREFIX)
engagement_api.add_resource(
EngagementResource,
f'{URL_PREFIX}/<string:resource_id>')
```
#### File: app/job/utils.py
```python
from typing import List
from uuid import UUID
from app.job.models import Job
from app.job.repositories import JobRepository
def create_job(**kwargs) -> Job:
"""
Create new job.
Returns:
Job created.
"""
new_job = Job(**kwargs)
return JobRepository.create(new_job)
def update_job(job_id: UUID, **kwargs) -> Job:
"""
Update job.
Args:
job_id (UUID): ID of job to be update.
Returns:
Updated job.
Raises:
ValueError if the job with the ID does not exist.
"""
job_for_update = JobRepository.get_one_by_id(job_id)
if not job_for_update:
raise ValueError('Job with ID specified does not exists')
update_fields = ('title', 'description', 'start_date', 'completion_date',
'status')
return JobRepository.update_by_id(
model_id=job_for_update.id,
fields_for_update=update_fields,
**kwargs)
def get_job(job_id: UUID) -> Job:
"""
Get job by ID.
Args:
job_id (UUID): ID of the job to be returned.
Returns:
Job
"""
return JobRepository.get_one_by_id(model_id=job_id)
def list_jobs() -> List[Job]:
"""
List jobs.
Returns:
List of jobs.
"""
return JobRepository.get_all()
```
#### File: app/models/job_status.py
```python
from database import db
class JobStatus(db.Model):
"""Job status model
Parameters
----------
db : Model
"""
__tablename__ = 'job_status'
id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String(64))
def __repr__(self):
return '<Job status %s>' % self.description
```
#### File: app/organization/endpoints.py
```python
from flask import Blueprint
from flask import request
from flask_restful import Api
from flask_restful import Resource
from app.organization.schemas import OrganizationCreatedSchema
from app.organization.schemas import OrganizationInputSchema
from app.organization.schemas import OrganizationListFilterSchema
from app.organization.schemas import OrganizationListSchema
from app.organization.utils import create_organization
from app.organization.utils import get_organizations
organization_bp = Blueprint('organization', __name__)
organization_api = Api(organization_bp)
URL_PREFIX = '/organizations'
class OrganizationListResource(Resource):
def get(self):
args = request.args
validated_input = OrganizationListFilterSchema(strict=True)\
.load(args).data
companies_list = get_organizations(**validated_input)
output = OrganizationListSchema(strict=True).dump(
{
'status': 'OK',
'code': 200,
'data': companies_list
}
).data
return output, 200
def post(self):
payload = request.json
validated_input = OrganizationInputSchema(strict=True)\
.load(payload).data
new_company = create_organization(**validated_input)
output = OrganizationCreatedSchema(strict=True).dump({
'status': 'OK',
'code': 201,
'data': new_company
}).data
return output, 201
organization_api.add_resource(OrganizationListResource, URL_PREFIX)
```
#### File: app/organization/repositories.py
```python
from typing import List
from sqlalchemy.dialects.postgresql import UUID
from app.common.base_repository import BaseRepository
from app.organization.models import Organization
class OrganizationRepository(BaseRepository):
"""
Organization repository.
Args:
BaseRepository: Base repository.
"""
model_class = Organization
@classmethod
def update(cls, model_id: UUID, **kwargs) -> model_class:
"""
Update organization details.
Args:
model_id (UUID): Model ID.
Returns:
Organization updated.
"""
update_fields = ('name', 'name_slug', 'address')
return cls.update_by_id(model_id, update_fields, **kwargs)
@classmethod
def get_all(cls, **kwargs) -> List[model_class]:
"""
List organizations.
Returns:
List of organizations.
"""
sort_key = kwargs.get('sort_key')
sort_order = kwargs.get('sort_order')
if sort_order == 'desc':
query = cls.model_class.query.order_by(
getattr(cls.model_class, sort_key).desc())
else:
query = cls.model_class.query.order_by(
getattr(cls.model_class, sort_key).asc())
return query.to_list()
@classmethod
def get_one_by_name(cls, name_slug: str, **kwargs) -> model_class:
"""
Get an organization by name.
Args:
name_slug (str): Name slug.
Returns:
Organization.
"""
return cls.model_class.query\
.filter(cls.model_class.name_slug == name_slug)\
.first()
```
#### File: app/task/utils.py
```python
from typing import List
from uuid import UUID
from app.task.models import Task
from app.task.repository import TaskRepository
def create_task(**kwargs) -> Task:
"""
Create a new task.
Returns:
Created task.
"""
new_task = Task(
title=kwargs['title'],
description=kwargs.get('description', None),
job_id=kwargs['job_id'])
return TaskRepository.create(new_task)
def update_task(task_id: UUID, **kwargs) -> Task:
"""
Update job task.
Args:
task_id (UUID): Task ID.
Returns:
Task updated.
"""
update_fields = ('title', 'description', 'job_id',)
return TaskRepository.update_by_id(
model_id=task_id,
fields_for_update=update_fields,
**kwargs)
def get_task(task_id: UUID) -> Task:
"""
Get task.
Args:
task_id (UUID): Task ID.
Returns:
Task
"""
return TaskRepository.get_one_by_id(model_id=task_id)
def list_tasks() -> List[Task]:
"""
List tasks.
Returns:
List of tasks.
"""
return TaskRepository.get_all()
```
#### File: app/user/endpoints.py
```python
from flask import Blueprint
from flask import request
from flask_restful import Api
from flask_restful import Resource
from app.user.schemas import UserInputSchema
from app.user.schemas import UsersListFilterSchema
from app.user.schemas import UsersListSchema
from app.user.utils import get_all_users
from app.user.utils import create_user
user_bp = Blueprint('users', __name__)
user_api = Api(user_bp)
USER_ENDPOINT_PREFIX = '/users'
class UserAccountsListResource(Resource):
def get(self):
args = request.args
validated_input = UsersListFilterSchema(strict=True)\
.load(args).data
users_list = get_all_users(**validated_input)
output = UsersListSchema(strict=True).dump(
{
'status': 'OK',
'code': 200,
'data': users_list}).data
return output, 200
def post(self):
payload = request.json
validated_input = UserInputSchema(strict=True).load(payload).data
new_user = create_user(**validated_input)
output = {
{
'status': 'OK',
'code': 201,
'data': new_user
}
}
return output
user_api.add_resource(UserAccountsListResource, USER_ENDPOINT_PREFIX)
```
#### File: app/user/repositories.py
```python
from typing import List
from uuid import UUID
from sqlalchemy.orm import Query
from app.common.base_repository import BaseRepository
from app.user.models import UserAccount
class UserRepository(BaseRepository):
model_class = UserAccount
@classmethod
def _sorted_user_accounts_query(cls, query: Query, **kwargs) -> Query:
"""
Returns a sorted query of user accounts.
Returns:
Query: Sorted user accounts query.
"""
sort_key = kwargs['sort_key']
sort_order = kwargs['sort_order']
if sort_order == 'desc':
query = query.order_by(
getattr(cls.model_class, sort_key).desc())
else:
query = query.order_by(
getattr(cls.model_class, sort_key).asc())
return query
@classmethod
def get_all(cls, **kwargs) -> List[model_class]:
"""
Get all the user accounts.
Returns:
List[UserAccount]: List of user accounts.
"""
organization_id = kwargs['organization_id']
query = cls.model_class.query\
.filter(cls.model_class.organization_id == organization_id)
query = cls._sorted_user_accounts_query(query, **kwargs)
return query.to_list()
@classmethod
def update(cls, model_id: UUID, **kwargs) -> model_class:
"""
Update user account.
Args:
model_id (UUID): User account id.
Returns:
UserAccount: User account.
"""
update_fields = ('username', 'email', 'phone_number')
return cls.update_by_id(model_id, update_fields, **kwargs)
@classmethod
def confirm_phone_number(cls, model_id: UUID, **kwargs) -> model_class:
"""
Confirm phone number of a user account.
Args:
model_id (UUID): id of the user account whose phone number
is to be confirmed.
Returns:
UserAccount: User account.
"""
update_fields = ('phone_number_confirmed',)
return cls.update_by_id(model_id, update_fields, **kwargs)
@classmethod
def confirm_email(cls, model_id: UUID, **kwargs) -> model_class:
"""
Confirm email of a user account.
Args:
model_id (UUID): Id of user account whose email is to be
confirmed.
Returns:
UserAccount: User account.
"""
update_fields = ('email_confirmed',)
return cls.update_by_id(model_id, update_fields, **kwargs)
@classmethod
def get_one_by_field(cls, **kwargs) -> model_class:
"""
Get all user accounts filtered by fields.
Returns:
UserAccount: User account.
"""
query = cls.model_class.query
for field in list(kwargs.keys()):
query = query.filter(
getattr(cls.model_class, field) == kwargs[field])
return query.first()
```
#### File: tests/unit/test_auth_utils_signup.py
```python
import pytest
from app.auth.utils import signup
def test_raises_value_error_when_organization_exists(session):
# Arrange
input = {
'name': 'Organization 1'
}
# Assert
with pytest.raises(ValueError):
signup(**input)
def test_raises_value_error_when_email_exists(session):
# Arrange
input = {
'name': 'Organization 10',
'email': '<EMAIL>'
}
# Assert
with pytest.raises(ValueError):
signup(**input)
```
#### File: tests/unit/test_client_industry_utils.py
```python
from uuid import uuid4
import pytest
from werkzeug.exceptions import NotFound
from app.client_industry.models import ClientIndustry
from app.client_industry.utils import create_client_industry
from app.client_industry.utils import update_client_industry
from app.client_industry.utils import get_client_industry
from app.client_industry.utils import list_client_industries
def test_update(session, fake_lorem):
"""Test updating a client industry"""
# Arrange
test_model = create_client_industry(name=fake_lorem.word())
updates = {
'name': 'Changed name',
}
# Act
result = update_client_industry(test_model.id, **updates)
# Assert
assert isinstance(result, ClientIndustry)
assert test_model.id == result.id
def test_update_raises_not_found_exception(session, fake_lorem):
"""Test update raises NotFound exception when the resource is
not found"""
# Arrange
fake_id = str(uuid4())
fake_updates = {
'name': fake_lorem.word(),
}
# Assert
with pytest.raises(NotFound):
update_client_industry(fake_id, **fake_updates)
def test_update_raises_value_error_exception(session, fake_lorem):
"""Test update raises ValueError exception when invalid
data is supplied"""
# Arrange
test_model = create_client_industry(name=fake_lorem.word())
updates = {
'name': 'Existing name',
}
# Assert
with pytest.raises(ValueError):
update_client_industry(test_model.id, **updates)
def test_get_client_industry(session, fake_lorem):
"""Test querying for a client industry."""
# Arrange
fake_model = create_client_industry(name=fake_lorem.word())
# Act
result = get_client_industry(fake_model.id)
# Assert
assert result is not None
assert isinstance(result, ClientIndustry)
assert result == fake_model
def test_list_client_industries(session):
"""Test listing client industries."""
# Act
result = list_client_industries()
# Assert
assert isinstance(result, list)
```
#### File: tests/unit/test_client_utils.py
```python
from uuid import uuid4
from app.client.models import Client
from app.client.utils import create_client
from app.client.utils import get_client
from app.client.utils import list_clients
from app.client.utils import update_client
from app.client_industry.utils import create_client_industry
from app.organization.utils import create_organization
def test_create_client(session, fake_lorem):
"""Test create_client function."""
# Arrange
fake_industry = create_client_industry(name=fake_lorem.word())
fake_org = create_organization(
address=fake_lorem.word(),
organization_name=fake_lorem.word())
# Act
result = create_client(
name=fake_lorem.word(),
organization_id=fake_org.id,
client_industry_id=fake_industry.id,
name_stub=fake_lorem.word())
# Assert
assert isinstance(result, Client)
def test_update_client(session, fake_lorem):
"""Test update_client function."""
# Arrange
first_fake_industry = create_client_industry(name=fake_lorem.word())
sec_fake_industry = create_client_industry(name=fake_lorem.word())
fake_org = create_organization(
address=fake_lorem.word(),
organization_name=fake_lorem.word())
fake_client = create_client(
name=fake_lorem.word(),
organization_id=fake_org.id,
client_industry_id=first_fake_industry.id)
fake_client_name = fake_client.name
fake_client_slug = fake_client.name_slug
# Act
result = update_client(
client_id=fake_client.id,
name=fake_lorem.word(),
client_industry_id=sec_fake_industry.id)
# Assert
assert isinstance(result, Client)
assert result.name != fake_client_name
assert result.name_slug != fake_client_slug
assert result.client_industry_id != first_fake_industry.id
assert result.id == fake_client.id
def test_get_client(session, fake_lorem):
"""Test get_client function."""
# Arrange
fake_industry = create_client_industry(name=fake_lorem.word())
fake_org = create_organization(
address=fake_lorem.word(),
organization_name=fake_lorem.word())
test_client = create_client(
name=fake_lorem.word(),
organization_id=fake_org.id,
client_industry_id=fake_industry.id)
# Act
result = get_client(test_client.id)
# Assert
assert result is not None
assert isinstance(result, Client)
assert result.id == test_client.id
def test_get_client_returns_none(session):
"""Test get_client returns None if not found."""
# Arrange
fake_client_id = str(uuid4())
# Act
result = get_client(client_id=fake_client_id)
# Assert
assert result is None
def test_list_clients(session):
"""Test list_clients."""
# Act
result = list_clients()
# Assert
assert isinstance(result, list)
```
#### File: tests/unit/test_resource_utils.py
```python
from app.employee.repository import EmployeeRepository
from app.organization.utils import get_organizations
from app.resource.models import Resource
from app.resource.utils import create_resource
from app.resource.utils import get_resource
from app.resource.utils import list_resources
from app.resource.utils import update_resource
def test_create_resource(session, fake_color):
"""Test create_resource function"""
# Arrange
org_list_params = {'sort_key': 'name', 'sort_order': 'asc'}
fake_employee = next(iter(EmployeeRepository.get_all() or []), None)
fake_organization = next(
iter(get_organizations(**org_list_params) or []), None)
fake_resource = {
'employee_id': fake_employee.id,
'color': fake_color.hex_color(),
'organization_id': fake_organization.id
}
# Act
result = create_resource(**fake_resource)
# Assert
if not isinstance(result, Resource):
raise AssertionError()
def test_update_resource(session, fake_color):
"""Test update_resource function."""
# Arrange
org_list_params = {'sort_key': 'name', 'sort_order': 'asc'}
fake_employee = next(iter(EmployeeRepository.get_all() or []), None)
fake_organization = next(
iter(get_organizations(**org_list_params) or []), None)
fake_resource = {
'employee_id': fake_employee.id,
'color': fake_color.hex_color(),
'organization_id': fake_organization.id
}
fake_resource = create_resource(**fake_resource)
original_color = fake_resource.color
resource_updates = {'color': fake_color.hex_color()}
# Act
result = update_resource(fake_resource.id, **resource_updates)
# Assert
if not isinstance(result, Resource):
raise AssertionError()
if original_color == result.color:
raise AssertionError()
def test_get_resource(session, fake_color):
"""Test get_resource function."""
# Arrange
org_list_params = {'sort_key': 'name', 'sort_order': 'asc'}
fake_employee = next(iter(EmployeeRepository.get_all() or []), None)
fake_organization = next(
iter(get_organizations(**org_list_params) or []), None)
fake_resource_input = {
'employee_id': fake_employee.id,
'color': fake_color.hex_color(),
'organization_id': fake_organization.id
}
fake_resource = create_resource(**fake_resource_input)
# Act
result = get_resource(fake_resource.id)
# Assert
if not isinstance(result, Resource):
raise AssertionError()
if result.id != fake_resource.id:
raise AssertionError()
def test_list_resources(session):
"""Test list_resources function."""
# Act
result = list_resources()
# Assert
if not isinstance(result, list):
raise AssertionError()
```
#### File: tests/unit/test_role_repository.py
```python
from app.role.models.role import Role
from app.role.repositories.role_repository import RoleRepository
def test_update(session):
"""
Test the role repository update function.
"""
# Arrange
test_model = RoleRepository.create(Role(name='Super User'))
test_model_id = test_model.id
update_fields = ('name',)
# Act
result = RoleRepository.update(
test_model_id, update_fields, name='Admin User')
# Assert
assert isinstance(result, Role)
assert result.normalized_name == 'admin-user'
assert result.name == 'Admin User'
def test_get_all(session):
"""
Test role repository get_all function
"""
# Arrange
sort_key = 'name'
sort_order = 'asc'
# Act
result = RoleRepository.get_all(sort_key=sort_key, sort_order=sort_order)
# Assert
assert isinstance(result, list)
def test_get_by_name(session):
"""
Test role repository get_by_name function.
"""
# Arrange
role_name = 'Super User'
normalized_role_name = 'super-user'
RoleRepository.create(
Role(name=role_name, normalized_name=normalized_role_name))
# Act
result = RoleRepository.get_by_name(role_name)
# Assert
assert isinstance(result, Role)
assert result.name == role_name
assert result.normalized_name == normalized_role_name
def test_create(session):
"""
Test role repository create function.
"""
# Arrange
role_name = 'Super User'
normalized_role_name = 'super-user'
# Act
result = RoleRepository.create(
Role(name=role_name, normalized_name=normalized_role_name))
# Assert
assert isinstance(result, Role)
assert result.name == role_name
assert result.normalized_name == normalized_role_name
def test_update_by_id(session):
"""
Test role repository update_by_id function.
"""
# Arrange
role_name = 'New Role'
normalized_role_name = 'new-role'
test_stub = RoleRepository.create(
Role(name=role_name, normalized_name=normalized_role_name))
update_fields = ('name', 'normalized_name')
role_update = 'Role update'
normalized_role_update = 'role-update'
# Act
result = RoleRepository.update_by_id(
model_id=test_stub.id,
fields_for_update=update_fields,
name=role_update,
normalized_name=normalized_role_update
)
# Assert
assert isinstance(result, Role)
assert result.normalized_name == normalized_role_update
assert result.name == role_update
```
#### File: tests/unit/test_task_utils.py
```python
from app.job.utils import list_jobs
from app.task.models import Task
from app.task.utils import create_task
from app.task.utils import update_task
from app.task.utils import get_task
from app.task.utils import list_tasks
def test_create_task(session, fake_lorem):
"""Test create_task function"""
# Arrange
fake_job_id = next(iter(list_jobs() or []), None).id
fake_task = {
'title': fake_lorem.sentence(),
'description': fake_lorem.paragraph(),
'job_id': fake_job_id
}
# Act
result = create_task(**fake_task)
# Assert
if not isinstance(result, Task):
raise AssertionError()
def test_update_task(session, fake_lorem):
"""Test update_task function"""
# Arrange
fake_job_id = next(iter(list_jobs() or []), None).id
fake_task_data = {
'title': fake_lorem.sentence(),
'description': fake_lorem.paragraph(),
'job_id': fake_job_id
}
fake_task = create_task(**fake_task_data)
fake_task_title = fake_task.title
task_updates = {'title': 'Changed task'}
# Act
result = update_task(fake_task.id, **task_updates)
# Assert
if fake_task_title == result.title:
raise AssertionError()
def test_get_task(session, fake_lorem):
"""Test get_task"""
# Arrange
fake_job_id = next(iter(list_jobs() or []), None).id
fake_task_data = {
'title': fake_lorem.sentence(),
'description': fake_lorem.paragraph(),
'job_id': fake_job_id
}
fake_task = create_task(**fake_task_data)
# Act
result = get_task(fake_task.id)
# Assert
if not isinstance(result, Task):
raise AssertionError()
def test_list_tasks(session):
"""Test list_tasks function."""
# Act
result = list_tasks()
# Assert
if not isinstance(result, list):
raise AssertionError()
``` |
{
"source": "Joeseph5/data_analysis_project",
"score": 4
} |
#### File: bikes-sharing_analysis/src/bikeshare.py
```python
import time
import pandas as pd
import numpy as np
import os
CITY_DATA = {'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv'}
DAY = ['all', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
MONTH = ['all', 'january', 'february', 'march', 'april', 'may', 'june']
DIR_ROOT = os.path.abspath(os.path.dirname(os.getcwd()))
DIR_DATA = os.path.join(DIR_ROOT, 'data/')
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
city = get_input('city', CITY_DATA.keys())
month = get_input('day', MONTH)
day = get_input('day', DAY)
print('-'*40)
return city, month, day
def get_input(analysis_obj, objects_set):
"Get the input objects."
while True:
ret = input("\nPlease input the %s you want to analyza: " %analysis_obj)
ret = ret.lower()
if ret in objects_set:
break
else:
print("Please input the right %s!" %analysis_obj)
return ret
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
path_city = os.path.join(DIR_DATA, CITY_DATA[city])
df = pd.read_csv(path_city)
if (month != 'all'):
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['month'] = df['Start Time'].dt.month
df = df[df['month'] == MONTH.index(month)]
if (day != 'all'):
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['day_of_week'] = df['Start Time'].dt.weekday_name
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
df['Start Time'] = pd.to_datetime(df['Start Time'])
# display the most common month
df['month'] = df['Start Time'].dt.month
# mode():Return the highest frequency value in a Series.
month_most_common = df['month'].mode()[0]
print("The most common month: ", MONTH[month_most_common].title())
# display the most common day of week
df['day_of_week'] = df['Start Time'].dt.weekday_name
print("The most common day of week: ", df['day_of_week'].mode()[0])
# display the most common start hour
df['hour'] = df['Start Time'].dt.hour
print("The most common start hour: ", df['hour'].mode()[0])
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
most_common_start_station = df['Start Station'].mode()[0]
print("The most commonly used start station: ", most_common_start_station)
# display most commonly used end station
most_common_end_station = df['End Station'].mode()[0]
print("The most commonly used end station: ", most_common_end_station)
# display most frequent combination of start station and end station trip
# top = df.groupby(['Start Station', 'End Station']).size().idxmax()
# top[0],top[1]
df_station = df['Start Station'] + df['End Station']
most_frequent_station = df_station.mode()[0]
print("The most commonly frequent combination of start station and end station trip: ",
most_frequent_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
print("Display total travel time: ", total_travel_time)
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
print("Display maen travel time: ", mean_travel_time)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
user_types = df['User Type'].value_counts()
print("Display counts of user types:\n", user_types)
# Display counts of gender
try:
genders = df['Gender'].value_counts()
print("Display counts of gender:\n", genders)
except:
print("\nWarning! There is no gender infomations in this city.")
# Display earliest, most recent, and most common year of birth
try:
most_common_year = df['Birth Year'].mode()[0]
year_sorted = df['Birth Year'].sort_values()
year_sorted = year_sorted.dropna()
earliest_year = year_sorted.reset_index(drop=True).iloc[0]
most_recent_year = year_sorted.reset_index(drop=True).iloc[-1]
print("Display earliest, most recent, and most common year of birth:",
earliest_year, most_recent_year, most_common_year)
except:
print("\nWarning! There is no birth year infomations in this city.")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
```
#### File: data_analysis_project/weather_trend_analysis/weather_trend_analysis.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def computeMovingAverage(data, step):
'''
@description: 计算移动平均值
@param {type} data- 输入数据, step- 单次计算移动平均值的数目
@return: res- 移动平均值结果
'''
res = np.array([])
for i in range(step, len(data)):
res = np.append(res, np.average(data[i-step:i]))
return res
# read data
fname_weather_global = 'data/weather_global.csv'
fname_weather_shanghai = 'data/weather_shanghai.csv'
weather_global = pd.read_csv(fname_weather_global)
weather_shanghai = pd.read_csv(fname_weather_shanghai)
# compute moving average of temperature
step = 10
weather_shanghai_avg = computeMovingAverage(weather_shanghai.avg_temp, step)
weather_global_avg = computeMovingAverage(weather_global.avg_temp, step)
diff_avg = weather_shanghai_avg - \
weather_global_avg[-len(weather_shanghai_avg):]
# plot
plt.title("Moving average temperature for weather trend")
plt.xlabel("year")
plt.ylabel("Temperature")
plt.plot(weather_shanghai.year[step:], weather_shanghai_avg, color='red')
plt.plot(weather_global.year[step:], weather_global_avg, color='blue')
plt.plot(weather_shanghai.year[step:], diff_avg, color='green')
plt.legend(['Shanghai', 'global', 'diff'])
plt.savefig('plots/weather_trend.png')
plt.show()
``` |
{
"source": "joeshaw/pubnub-python",
"score": 3
} |
#### File: joeshaw/pubnub-python/subscribe-example.py
```python
import sys
from Pubnub import Pubnub
## Initiat Class
pubnub = Pubnub( 'demo', 'demo', None, False )
## Subscribe Example
def receive(message) :
print(message)
return True
channel = sys.argv[1] or 'hello_world'
print("Listening for messages on '%s' channel..." % channel)
pubnub.subscribe({
'channel' : channel,
'callback' : receive
})
``` |
{
"source": "JoeshGichon/News-App",
"score": 4
} |
#### File: News-App/tests/news_articles_test.py
```python
import unittest
from app.models import News_Articles
class ArticleTest(unittest.TestCase):
'''
Test Class to test the behaviour of the News_article class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_article = News_Articles("The Associated Press","Man attacked by alligator in Hurricane Ida's floodwaters","https://abcnews.go.com/US/wireStory/man-attacked-alligator-hurricane-idas-floodwaters-79746310","https://s.abcnews.com/images/GMA/210831_gma_zee_0713_hpMain_16x9_992.jpg","2021-08-31T15:39:13Z","SLIDELL, La. -- A man was attacked by a large alligator while walking through floodwaters from Hurricane Ida and is now missing, a Louisiana sheriff said.\r\nThe 71-year-old mans wife told sheriffs dep… [+1041 chars]")
def test_instance(self):
self.assertTrue(isinstance(self.new_article,News_Articles))
``` |
{
"source": "JoeshGichon/pitch-app",
"score": 3
} |
#### File: app/main/views.py
```python
from flask import render_template,request,redirect,url_for,abort
from . import main
from ..models import Pitches,Comments,User
from .forms import PitchForm,CommentForm,UpdateProfile
from .. import db
from flask_login import login_required
@main.route("/")
def index():
title="Home"
return render_template("index.html",title=title)
@main.route('/pitches', methods = ['GET','POST'])
def new_pitch():
form = PitchForm()
if form.validate_on_submit():
title = form.title.data
author = form.author.data
content = form.content.data
new_pitch = Pitches(title,author,content)
new_pitch.save_pitches()
pitchess = Pitches.get_pitches()
title = "pitches"
return render_template('pitches.html',title = title, pitch_form=form,pitchess=pitchess)
@main.route('/comments', methods = ['GET','POST'])
@login_required
def new_comment():
form = CommentForm()
if form.validate_on_submit():
comment= form.comment.data
author=form.author.data
new_comment = Comments(comment,author)
new_comment.save_comment()
commentss = Comments.get_comments()
title = "comments"
return render_template('comments.html',title = title, comment_form=form,commentss=commentss)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
``` |
{
"source": "JoeshGichon/python-password-lock",
"score": 3
} |
#### File: JoeshGichon/python-password-lock/passwordLockerTest.py
```python
from passwordLoker import User,Credentials
import unittest
class TestPassword(unittest.TestCase):
def setUp(self):
self.new_user = User("<NAME>","JG","5678")
self.new_credentials = Credentials("facebook","JG","5678")
def tearDown(self):
User.users_list = []
Credentials.credentials_list = []
def test_init(self):
self.assertEqual(self.new_user.official_name,"<NAME>")
self.assertEqual(self.new_user.username,"mn")
self.assertEqual(self.new_user.password,"<PASSWORD>")
self.assertEqual(self.new_credentials.account,"facebook")
self.assertEqual(self.new_credentials.username,"JG")
self.assertEqual(self.new_credentials.password,"<PASSWORD>")
def test_save_user(self):
self.new_user.save_user()
self.new_credentials.save_credentials()
self.assertEqual(len(User.users_list),1)
self.assertEqual(len(Credentials.credentials_list),1)
def test_save_multiple_credentials(self):
self.new_credentials.save_credentials()
test_credential = Credentials("Instagrame","Joe","JT")
test_credential.save_credentials()
message = "Cannot add multiple user"
self.assertGreater(len(Credentials.credentials_list),1,message)
def test_save_multiple_users(self):
self.new_user.save_user()
test_user = User("Joe Tech","JT","0000")
test_user.save_user()
self.assertEqual(len(User.users_list),2)
def test_find_account_by_password(self):
self.new_user.save_user()
test_user = User("Joe Tech","JT","0000")
test_user.save_user()
found_user = User.find_by_password("<PASSWORD>")
self.assertEqual(found_user.password,test_user.password)
def test_find_credential(self):
self.new_credentials.save_credentials()
test_credential = Credentials("Instagram","JG","6789")
test_credential.save_credentials()
found_credential = Credentials.find_credential("Instagram")
self.assertEqual(found_credential.account,test_credential.account)
def test_delete_credentials_account(self):
self.new_credentials.save_credentials()
test_credential = Credentials("Instagram","JG","6789")
test_credential.save_credentials()
test_credential.delete_credential_account("Instagram")
self.assertEqual(len(Credentials.credentials_list),1)
if __name__ == '__main__':
unittest.main()
```
#### File: JoeshGichon/python-password-lock/passwordLoker.py
```python
class User:
users_list = []
def save_user(self):
User.users_list.append(self)
@classmethod
def find_by_password(cls,password_input):
for found in cls.users_list:
if found.password == password_input:
return found
def __init__(self,o_name,u_name,p_word):
self.official_name = o_name
self.username = u_name
self.password = <PASSWORD>
class Credentials:
credentials_list = []
def save_credentials(self):
Credentials.credentials_list.append(self)
@classmethod
def find_credential(cls,account_name_input):
for found in cls.credentials_list:
if found.account == account_name_input:
return found
@classmethod
def delete_credential_account(self,to_delete):
for indeletion in self.credentials_list:
if indeletion.account == to_delete:
return Credentials.credentials_list.remove(indeletion)
def __init__(self,account,u_name,p_word):
self.account = account
self.username = u_name
self.password = <PASSWORD>
``` |
{
"source": "JoeShi/amazon-s3-resumable-upload",
"score": 2
} |
#### File: amazon-s3-resumable-upload/single_node/s3_upload.py
```python
import os
import sys
import json
import base64
from boto3.session import Session
from botocore.client import Config
from concurrent import futures
from configparser import ConfigParser, RawConfigParser, NoOptionError
import time
import datetime
import hashlib
import logging
from pathlib import PurePosixPath, Path
import platform
import codecs
os.system("") # workaround for some windows system to print color
global JobType, SrcFileIndex, DesProfileName, DesBucket, S3Prefix, MaxRetry, MaxThread, \
MaxParallelFile, StorageClass, ifVerifyMD5, DontAskMeToClean, LoggingLevel, \
SrcDir, SrcBucket, SrcProfileName, ali_SrcBucket, ali_access_key_id, ali_access_key_secret, ali_endpoint
# Read config.ini with GUI
def set_config():
sys_para = sys.argv
file_path = os.path.split(sys_para[0])[0]
gui = False
if platform.uname()[0] == 'Windows': # Win默认打开
gui = True
if platform.uname()[0] == 'Linux': # Linux 默认关闭
gui = False
if '--gui' in sys.argv: # 指定 gui 模式
gui = True
if '--nogui' in sys.argv: # 带 nogui 就覆盖前面Win打开要求
gui = False
JobType_list = ['LOCAL_TO_S3', 'S3_TO_S3', 'ALIOSS_TO_S3']
StorageClass_list = ['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA', 'ONEZONE_IA', 'INTELLIGENT_TIERING',
'GLACIER', 'DEEP_ARCHIVE']
config_file = os.path.join(file_path, 's3_upload_config.ini')
# If no config file, read the default config
if not os.path.exists(config_file):
config_file += '.default'
print("No customized config, use the default config")
cfg = ConfigParser()
print(f'Reading config file: {config_file}')
# Get local config value
try:
global JobType, SrcFileIndex, DesProfileName, DesBucket, S3Prefix, MaxRetry, MaxThread, \
MaxParallelFile, StorageClass, ifVerifyMD5, DontAskMeToClean, LoggingLevel, \
SrcDir, SrcBucket, SrcProfileName, ali_SrcBucket, ali_access_key_id, ali_access_key_secret, ali_endpoint
cfg.read(config_file, encoding='utf-8-sig')
JobType = cfg.get('Basic', 'JobType')
SrcFileIndex = cfg.get('Basic', 'SrcFileIndex')
DesProfileName = cfg.get('Basic', 'DesProfileName')
DesBucket = cfg.get('Basic', 'DesBucket')
S3Prefix = cfg.get('Basic', 'S3Prefix')
Megabytes = 1024 * 1024
ChunkSize = cfg.getint('Advanced', 'ChunkSize') * Megabytes
MaxRetry = cfg.getint('Advanced', 'MaxRetry')
MaxThread = cfg.getint('Advanced', 'MaxThread')
MaxParallelFile = cfg.getint('Advanced', 'MaxParallelFile')
StorageClass = cfg.get('Advanced', 'StorageClass')
ifVerifyMD5 = cfg.getboolean('Advanced', 'ifVerifyMD5')
DontAskMeToClean = cfg.getboolean('Advanced', 'DontAskMeToClean')
LoggingLevel = cfg.get('Advanced', 'LoggingLevel')
try:
SrcDir = cfg.get('LOCAL_TO_S3', 'SrcDir')
except NoOptionError:
SrcDir = ''
try:
SrcBucket = cfg.get('S3_TO_S3', 'SrcBucket')
SrcProfileName = cfg.get('S3_TO_S3', 'SrcProfileName')
except NoOptionError:
SrcBucket = ''
SrcProfileName = ''
try:
ali_SrcBucket = cfg.get('ALIOSS_TO_S3', 'ali_SrcBucket')
ali_access_key_id = cfg.get('ALIOSS_TO_S3', 'ali_access_key_id')
ali_access_key_secret = cfg.get('ALIOSS_TO_S3', 'ali_access_key_secret')
ali_endpoint = cfg.get('ALIOSS_TO_S3', 'ali_endpoint')
except NoOptionError:
ali_SrcBucket = ""
ali_access_key_id = ""
ali_access_key_secret = ""
ali_endpoint = ""
except Exception as e:
print("ERR loading s3_upload_config.ini", str(e))
input('PRESS ENTER TO QUIT')
sys.exit(0)
# GUI only well support LOCAL_TO_S3 mode, start with --gui option
# For other JobTpe, GUI is not a prefer option since it's better run on EC2 Linux
if gui:
# For GUI
from tkinter import Tk, filedialog, END, StringVar, BooleanVar, messagebox
from tkinter.ttk import Combobox, Label, Button, Entry, Spinbox, Checkbutton
# get profile name list in ./aws/credentials
pro_conf = RawConfigParser()
pro_path = os.path.join(os.path.expanduser("~"), ".aws")
cre_path = os.path.join(pro_path, "credentials")
if os.path.exists(cre_path):
pro_conf.read(cre_path)
profile_list = pro_conf.sections()
else:
print(f"There is no aws_access_key in {cre_path}, please input for Destination S3 Bucket: ")
os.mkdir(pro_path)
aws_access_key_id = input('aws_access_key_id: ')
aws_secret_access_key = input('aws_secret_access_key: ')
region = input('region: ')
pro_conf.add_section('default')
pro_conf['default']['aws_access_key_id'] = aws_access_key_id
pro_conf['default']['aws_secret_access_key'] = aws_secret_access_key
pro_conf['default']['region'] = region
profile_list = ['default']
with open(cre_path, 'w') as f:
print(f"Saving credentials to {cre_path}")
pro_conf.write(f)
# Click Select Folder
def browse_folder():
local_dir = filedialog.askdirectory(initialdir=os.path.dirname(__file__))
url_txt.delete(0, END)
url_txt.insert(0, local_dir)
file_txt.delete(0, END)
file_txt.insert(0, "*")
# Finsih browse folder
# Click Select File
def browse_file():
local_file = filedialog.askopenfilename()
url_txt.delete(0, END)
url_txt.insert(0, os.path.split(local_file)[0])
file_txt.delete(0, END)
file_txt.insert(0, os.path.split(local_file)[1])
# Finsih browse file
# Click List Buckets
def ListBuckets(*args):
DesProfileName = DesProfileName_txt.get()
client = Session(profile_name=DesProfileName).client('s3')
bucket_list = []
try:
response = client.list_buckets()
if 'Buckets' in response:
bucket_list = [b['Name'] for b in response['Buckets']]
except Exception as e:
messagebox.showerror('Error', f'Failt to List buckets. \n'
f'Please verify your aws_access_key of profile: [{DesProfileName}]\n'
f'{str(e)}')
bucket_list = ['CAN_NOT_GET_BUCKET_LIST']
DesBucket_txt['values'] = bucket_list
DesBucket_txt.current(0)
# Finish ListBuckets
# Click List Prefix
def ListPrefix(*args):
DesProfileName = DesProfileName_txt.get()
client = Session(profile_name=DesProfileName).client('s3')
prefix_list = []
this_bucket = DesBucket_txt.get()
max_get = 100
try:
response = client.list_objects_v2(
Bucket=this_bucket,
Delimiter='/'
) # Only get the max 1000 prefix for simply list
if 'CommonPrefixes' in response:
prefix_list = [c['Prefix'] for c in response['CommonPrefixes']]
if not prefix_list:
messagebox.showinfo('Message', f'There is no "/" Prefix in: {this_bucket}')
if response['IsTruncated']:
messagebox.showinfo('Message', f'More than {max_get} Prefix, cannot fully list here.')
except Exception as e:
messagebox.showinfo('Error', f'Cannot get prefix list from bucket: {this_bucket}, {str(e)}')
S3Prefix_txt['values'] = prefix_list
S3Prefix_txt.current(0)
# Finish list prefix
# Change JobType
def job_change(*args):
if JobType_mode.get() != 'LOCAL_TO_S3':
messagebox.showinfo('Notice', 'S3_TO_S3 or OSS_TO_S3. \n'
'Please config the rest hidden parameter in s3_upload_config.ini')
# Finish JobType change message
# Click START button
def close():
window.withdraw()
ok = messagebox.askokcancel('Start uploading job',
f'Upload from Local to \ns3://{DesBucket_txt.get()}/{S3Prefix_txt.get()}\n'
f'Click OK to START')
if not ok:
window.deiconify()
return
window.quit()
return
# Finish close()
# Start GUI
window = Tk()
window.title("LONGBOW - AMAZON S3 UPLOAD TOOL WITH BREAK-POINT RESUMING")
window.geometry('705x350')
window.configure(background='#ECECEC')
window.protocol("WM_DELETE_WINDOW", sys.exit)
Label(window, text='Job Type').grid(column=0, row=0, sticky='w', padx=2, pady=2)
JobType_mode = Combobox(window, width=15, state="readonly")
JobType_mode['values'] = tuple(JobType_list)
JobType_mode.grid(column=1, row=0, sticky='w', padx=2, pady=2)
if JobType in JobType_list:
position = JobType_list.index(JobType)
JobType_mode.current(position)
else:
JobType_mode.current(0)
JobType_mode.bind("<<ComboboxSelected>>", job_change)
Label(window, text="Folder").grid(column=0, row=1, sticky='w', padx=2, pady=2)
url_txt = Entry(window, width=50)
url_txt.grid(column=1, row=1, sticky='w', padx=2, pady=2)
url_btn = Button(window, text="Select Folder", width=10, command=browse_folder)
url_btn.grid(column=2, row=1, sticky='w', padx=2, pady=2)
url_txt.insert(0, SrcDir)
Label(window, text="Filename or *").grid(column=0, row=2, sticky='w', padx=2, pady=2)
file_txt = Entry(window, width=50)
file_txt.grid(column=1, row=2, sticky='w', padx=2, pady=2)
file_btn = Button(window, text="Select File", width=10, command=browse_file)
file_btn.grid(column=2, row=2, sticky='w', padx=2, pady=2)
file_txt.insert(0, SrcFileIndex)
Label(window, text="AWS Profile").grid(column=0, row=3, sticky='w', padx=2, pady=2)
DesProfileName_txt = Combobox(window, width=15, state="readonly")
DesProfileName_txt['values'] = tuple(profile_list)
DesProfileName_txt.grid(column=1, row=3, sticky='w', padx=2, pady=2)
if DesProfileName in profile_list:
position = profile_list.index(DesProfileName)
DesProfileName_txt.current(position)
else:
DesProfileName_txt.current(0)
DesProfileName = DesProfileName_txt.get()
DesProfileName_txt.bind("<<ComboboxSelected>>", ListBuckets)
Label(window, text="S3 Bucket").grid(column=0, row=4, sticky='w', padx=2, pady=2)
DesBucket_txt = Combobox(window, width=48)
DesBucket_txt.grid(column=1, row=4, sticky='w', padx=2, pady=2)
DesBucket_txt['values'] = DesBucket
DesBucket_txt.current(0)
Button(window, text="List Buckets", width=10, command=ListBuckets) \
.grid(column=2, row=4, sticky='w', padx=2, pady=2)
Label(window, text="S3 Prefix").grid(column=0, row=5, sticky='w', padx=2, pady=2)
S3Prefix_txt = Combobox(window, width=48)
S3Prefix_txt.grid(column=1, row=5, sticky='w', padx=2, pady=2)
S3Prefix_txt['values'] = S3Prefix
if S3Prefix != '':
S3Prefix_txt.current(0)
Button(window, text="List Prefix", width=10, command=ListPrefix) \
.grid(column=2, row=5, sticky='w', padx=2, pady=2)
Label(window, text="MaxThread/File").grid(column=0, row=6, sticky='w', padx=2, pady=2)
if MaxThread < 1 or MaxThread > 100:
MaxThread = 5
var_t = StringVar()
var_t.set(str(MaxThread))
MaxThread_txt = Spinbox(window, from_=1, to=100, width=15, textvariable=var_t)
MaxThread_txt.grid(column=1, row=6, sticky='w', padx=2, pady=2)
Label(window, text="MaxParallelFile").grid(column=0, row=7, sticky='w', padx=2, pady=2)
if MaxParallelFile < 1 or MaxParallelFile > 100:
MaxParallelFile = 5
var_f = StringVar()
var_f.set(str(MaxParallelFile))
MaxParallelFile_txt = Spinbox(window, from_=1, to=100, width=15, textvariable=var_f)
MaxParallelFile_txt.grid(column=1, row=7, sticky='w', padx=2, pady=2)
Label(window, text="S3 StorageClass").grid(column=0, row=8, sticky='w', padx=2, pady=2)
StorageClass_txt = Combobox(window, width=15, state="readonly")
StorageClass_txt['values'] = tuple(StorageClass_list)
StorageClass_txt.grid(column=1, row=8, sticky='w', padx=2, pady=2)
if StorageClass in StorageClass_list:
position = StorageClass_list.index(StorageClass)
StorageClass_txt.current(position)
else:
StorageClass_txt.current(0)
save_config = BooleanVar()
save_config.set(True)
save_config_txt = Checkbutton(window, text="Save to s3_upload_config.ini", var=save_config)
save_config_txt.grid(column=1, row=9, padx=2, pady=2)
Button(window, text="Start Upload", width=15, command=close).grid(column=1, row=10, padx=5, pady=5)
window.mainloop()
JobType = JobType_mode.get()
SrcDir = url_txt.get()
SrcFileIndex = file_txt.get()
DesBucket = DesBucket_txt.get()
S3Prefix = S3Prefix_txt.get()
DesProfileName = DesProfileName_txt.get()
StorageClass = StorageClass_txt.get()
MaxThread = int(MaxThread_txt.get())
MaxParallelFile = int(MaxParallelFile_txt.get())
if save_config:
cfg['Basic']['JobType'] = JobType
cfg['Basic']['DesProfileName'] = DesProfileName
cfg['Basic']['DesBucket'] = DesBucket
cfg['Basic']['S3Prefix'] = S3Prefix
cfg['Advanced']['MaxThread'] = str(MaxThread)
cfg['Advanced']['MaxParallelFile'] = str(MaxParallelFile)
cfg['Advanced']['StorageClass'] = StorageClass
cfg['LOCAL_TO_S3']['SrcDir'] = SrcDir
cfg['Basic']['SrcFileIndex'] = SrcFileIndex
config_file = os.path.join(file_path, 's3_upload_config.ini')
with codecs.open(config_file, 'w', 'utf-8') as f:
cfg.write(f)
print(f"Save config to {config_file}")
# GUI window finish
S3Prefix = str(PurePosixPath(S3Prefix)) # 去掉结尾的'/',如果有的话
if S3Prefix == '/' or S3Prefix == '.':
S3Prefix = ''
# 校验
if JobType not in JobType_list:
print(f'ERR JobType: {JobType}, check config file: {config_file}')
input('PRESS ENTER TO QUIT')
sys.exit(0)
# Finish set_config()
return ChunkSize
# Configure logging
def set_log():
logger = logging.getLogger()
# File logging
if not os.path.exists("./log"):
os.system("mkdir log")
this_file_name = os.path.splitext(os.path.basename(__file__))[0]
file_time = datetime.datetime.now().isoformat().replace(':', '-')[:19]
log_file_name = './log/' + this_file_name + '-' + file_time + '.log'
print('Logging to file:', os.path.abspath(log_file_name))
print('Logging level:', LoggingLevel)
fileHandler = logging.FileHandler(filename=log_file_name, encoding='utf-8')
fileHandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s - %(message)s'))
logger.addHandler(fileHandler)
# Screen stream logging
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s - %(message)s'))
logger.addHandler(streamHandler)
# Loggin Level
logger.setLevel(logging.WARNING)
if LoggingLevel == 'INFO':
logger.setLevel(logging.INFO)
elif LoggingLevel == 'DEBUG':
logger.setLevel(logging.DEBUG)
return logger, log_file_name
# Get local file list
def get_local_file_list(str_key=False):
__src_file_list = []
try:
if SrcFileIndex == "*":
for parent, dirnames, filenames in os.walk(SrcDir):
for filename in filenames: # 遍历输出文件信息
file_absPath = os.path.join(parent, filename)
file_relativePath = file_absPath[len(SrcDir) + 1:]
file_size = os.path.getsize(file_absPath)
key = Path(file_relativePath)
if str_key:
key = str(key)
__src_file_list.append({
"Key": key,
"Size": file_size
})
else:
join_path = os.path.join(SrcDir, SrcFileIndex)
file_size = os.path.getsize(join_path)
__src_file_list = [{
"Key": SrcFileIndex,
"Size": file_size
}]
except Exception as err:
logger.error('Can not get source files. ERR: ' + str(err))
input('PRESS ENTER TO QUIT')
sys.exit(0)
if not __src_file_list:
logger.error('Source file empty.')
input('PRESS ENTER TO QUIT')
sys.exit(0)
return __src_file_list
# Get object list on S3
def get_s3_file_list(*, s3_client, bucket, S3Prefix, no_prefix=False):
logger.info('Get s3 file list ' + bucket)
# For delete prefix in des_prefix
if S3Prefix == '':
# 目的bucket没有设置 Prefix
dp_len = 0
else:
# 目的bucket的 "prefix/"长度
dp_len = len(S3Prefix) + 1
paginator = s3_client.get_paginator('list_objects_v2')
__des_file_list = []
try:
response_iterator = paginator.paginate(
Bucket=bucket,
Prefix=S3Prefix
)
for page in response_iterator:
if "Contents" in page:
for n in page["Contents"]:
key = n["Key"]
if no_prefix:
key = key[dp_len:]
__des_file_list.append({
"Key": key,
"Size": n["Size"]
})
logger.info(f'Bucket list length:{str(len(__des_file_list))}')
except Exception as err:
logger.error(str(err))
input('PRESS ENTER TO QUIT')
sys.exit(0)
return __des_file_list
# Check single file on S3
def head_s3_single_file(s3_client, bucket):
try:
response_fileList = s3_client.head_object(
Bucket=bucket,
Key=str(Path(S3Prefix)/SrcFileIndex)
)
file = [{
"Key": str(Path(S3Prefix)/SrcFileIndex),
"Size": response_fileList["ContentLength"]
}]
except Exception as err:
logger.error(str(err))
input('PRESS ENTER TO QUIT')
sys.exit(0)
return file
# Check single file on OSS
def head_oss_single_file(__ali_bucket):
try:
response_fileList = __ali_bucket.head_object(
key=S3Prefix + SrcFileIndex
)
file = [{
"Key": S3Prefix + SrcFileIndex,
"Size": response_fileList.content_length
}]
except Exception as err:
logger.error(str(err))
input('PRESS ENTER TO QUIT')
sys.exit(0)
return file
# Get object list on OSS
def get_ali_oss_file_list(__ali_bucket):
logger.info('Get oss file list ' + ali_SrcBucket)
__des_file_list = []
try:
response_fileList = __ali_bucket.list_objects(
prefix=S3Prefix,
max_keys=1000
)
if len(response_fileList.object_list) != 0:
for n in response_fileList.object_list:
__des_file_list.append({
"Key": n.key,
"Size": n.size
})
while response_fileList.is_truncated:
response_fileList = __ali_bucket.list_objects(
prefix=S3Prefix,
max_keys=1000,
marker=response_fileList.next_marker
)
for n in response_fileList.object_list:
__des_file_list.append({
"Key": n.key,
"Size": n.size
})
else:
logger.info('File list is empty in the ali_oss bucket')
except Exception as err:
logger.error(str(err))
input('PRESS ENTER TO QUIT')
sys.exit(0)
return __des_file_list
# Get all exist object list on S3
def get_uploaded_list(s3_client):
logger.info('Get unfinished multipart upload')
NextKeyMarker = ''
IsTruncated = True
__multipart_uploaded_list = []
while IsTruncated:
list_multipart_uploads = s3_client.list_multipart_uploads(
Bucket=DesBucket,
Prefix=S3Prefix,
MaxUploads=1000,
KeyMarker=NextKeyMarker
)
IsTruncated = list_multipart_uploads["IsTruncated"]
NextKeyMarker = list_multipart_uploads["NextKeyMarker"]
if NextKeyMarker != '':
for i in list_multipart_uploads["Uploads"]:
__multipart_uploaded_list.append({
"Key": i["Key"],
"Initiated": i["Initiated"],
"UploadId": i["UploadId"]
})
logger.info(f'Unfinished upload, Key: {i["Key"]}, Time: {i["Initiated"]}')
return __multipart_uploaded_list
# Jump to handle next file
class NextFile(Exception):
pass
def uploadThread_small(srcfile, prefix_and_key):
print(f'\033[0;32;1m--->Uploading\033[0m {srcfile["Key"]} - small file')
with open(os.path.join(SrcDir, srcfile["Key"]), 'rb') as data:
for retryTime in range(MaxRetry + 1):
try:
pstart_time = time.time()
chunkdata = data.read()
chunkdata_md5 = hashlib.md5(chunkdata)
s3_dest_client.put_object(
Body=chunkdata,
Bucket=DesBucket,
Key=prefix_and_key,
ContentMD5=base64.b64encode(chunkdata_md5.digest()).decode('utf-8'),
StorageClass=StorageClass
)
pload_time = time.time() - pstart_time
pload_bytes = len(chunkdata)
pload_speed = size_to_str(int(pload_bytes / pload_time)) + "/s"
print(f'\033[0;34;1m --->Complete\033[0m {srcfile["Key"]} - small file - {pload_speed}')
break
except Exception as e:
logger.warning(f'Upload small file Fail: {srcfile["Key"]}, '
f'{str(e)}, Attempts: {retryTime}')
if retryTime >= MaxRetry:
logger.error(f'Fail MaxRetry Download/Upload small file: {srcfile["Key"]}')
return "MaxRetry"
else:
time.sleep(5 * retryTime)
return
def download_uploadThread_small(srcfileKey):
for retryTime in range(MaxRetry + 1):
try:
pstart_time = time.time()
# Get object
print(f"\033[0;33;1m--->Downloading\033[0m {srcfileKey} - small file")
response_get_object = s3_src_client.get_object(
Bucket=SrcBucket,
Key=srcfileKey
)
getBody = response_get_object["Body"].read()
chunkdata_md5 = hashlib.md5(getBody)
ContentMD5 = base64.b64encode(chunkdata_md5.digest()).decode('utf-8')
# Put object
print(f'\033[0;32;1m --->Uploading\033[0m {srcfileKey} - small file')
s3_dest_client.put_object(
Body=getBody,
Bucket=DesBucket,
Key=srcfileKey,
ContentMD5=ContentMD5,
StorageClass=StorageClass
)
# 结束 Upload/download
pload_time = time.time() - pstart_time
pload_bytes = len(getBody)
pload_speed = size_to_str(int(pload_bytes / pload_time)) + "/s"
print(f'\033[0;34;1m --->Complete\033[0m {srcfileKey} - small file - {pload_speed}')
break
except Exception as e:
logger.warning(f'Download/Upload small file Fail: {srcfileKey}, '
f'{str(e)}, Attempts: {retryTime}')
if retryTime >= MaxRetry:
logger.error(f'Fail MaxRetry Download/Upload small file: {srcfileKey}')
return "MaxRetry"
else:
time.sleep(5 * retryTime)
return
def alioss_download_uploadThread_small(srcfileKey):
for retryTime in range(MaxRetry + 1):
try:
pstart_time = time.time()
# Get Objcet
print(f"\033[0;33;1m--->Downloading\033[0m {srcfileKey} - small file")
response_get_object = ali_bucket.get_object(
key=srcfileKey
)
getBody = b''
for chunk in response_get_object:
if chunk != '':
getBody += chunk
chunkdata_md5 = hashlib.md5(getBody)
# Put Object
print(f"\033[0;32;1m --->Uploading\033[0m {srcfileKey} - small file")
s3_dest_client.put_object(
Body=getBody,
Bucket=DesBucket,
Key=srcfileKey,
ContentMD5=base64.b64encode(chunkdata_md5.digest()).decode('utf-8'),
StorageClass=StorageClass
)
pload_time = time.time() - pstart_time
pload_bytes = len(getBody)
pload_speed = size_to_str(int(pload_bytes / pload_time)) + "/s"
print(f'\033[0;34;1m --->Complete\033[0m {srcfileKey} - small file - {pload_speed}')
break
except Exception as e:
logger.warning(f'Download/Upload small file Fail: {srcfileKey} - small file, '
f'{str(e)}, Attempts: {retryTime}')
if retryTime >= MaxRetry:
logger.error(f'Fail MaxRetry Download/Upload small file: {srcfileKey} - small file')
return "MaxRetry"
else:
time.sleep(5 * retryTime)
return
# Upload file with different JobType
def upload_file(*, srcfile, desFilelist, UploadIdList, ChunkSize_default): # UploadIdList就是multipart_uploaded_list
logger.info(f'Start file: {srcfile["Key"]}')
prefix_and_key = srcfile["Key"]
if JobType == 'LOCAL_TO_S3':
prefix_and_key = str(PurePosixPath(S3Prefix) / srcfile["Key"])
if srcfile['Size'] >= ChunkSize_default:
try:
# 循环重试3次(如果MD5计算的ETag不一致)
for md5_retry in range(3):
# 检查文件是否已存在,存在不继续、不存在且没UploadID要新建、不存在但有UploadID得到返回的UploadID
response_check_upload = check_file_exist(srcfile=srcfile,
desFilelist=desFilelist,
UploadIdList=UploadIdList)
if response_check_upload == 'UPLOAD':
logger.info(f'New upload: {srcfile["Key"]}')
response_new_upload = s3_dest_client.create_multipart_upload(
Bucket=DesBucket,
Key=prefix_and_key,
StorageClass=StorageClass
)
# logger.info("UploadId: "+response_new_upload["UploadId"])
reponse_uploadId = response_new_upload["UploadId"]
partnumberList = []
elif response_check_upload == 'NEXT':
logger.info(f'Duplicated. {srcfile["Key"]} same size, goto next file.')
raise NextFile()
else:
reponse_uploadId = response_check_upload
# 获取已上传partnumberList
partnumberList = checkPartnumberList(srcfile, reponse_uploadId)
# 获取索引列表,例如[0, 10, 20]
response_indexList, ChunkSize_auto = split(srcfile, ChunkSize_default)
# 执行分片upload
upload_etag_full = uploadPart(uploadId=reponse_uploadId,
indexList=response_indexList,
partnumberList=partnumberList,
srcfile=srcfile,
ChunkSize_auto=ChunkSize_auto)
# 合并S3上的文件
response_complete = completeUpload(reponse_uploadId=reponse_uploadId,
srcfileKey=srcfile["Key"],
len_indexList=len(response_indexList))
logger.info(f'FINISH: {srcfile["Key"]} TO {response_complete["Location"]}')
# 检查文件MD5
if ifVerifyMD5:
if response_complete["ETag"] == upload_etag_full:
logger.info(f'MD5 ETag Matched - {srcfile["Key"]} - {response_complete["ETag"]}')
break
else: # ETag 不匹配,删除S3的文件,重试
logger.warning(f'MD5 ETag NOT MATCHED {srcfile["Key"]}( Destination / Origin ): '
f'{response_complete["ETag"]} - {upload_etag_full}')
s3_dest_client.delete_object(
Bucket=DesBucket,
Key=prefix_and_key
)
UploadIdList = []
logger.warning('Deleted and retry upload {srcfile["Key"]}')
if md5_retry == 2:
logger.warning('MD5 ETag NOT MATCHED Exceed Max Retries - {srcfile["Key"]}')
else:
break
except NextFile:
pass
# Small file procedure
else:
# Check file exist
for f in desFilelist:
if f["Key"] == prefix_and_key and \
(srcfile["Size"] == f["Size"]):
logger.info(f'Duplicated. {prefix_and_key} same size, goto next file.')
return
# 找不到文件,或文件Size不一致 Submit upload
if JobType == 'LOCAL_TO_S3':
uploadThread_small(srcfile, prefix_and_key)
elif JobType == 'S3_TO_S3':
download_uploadThread_small(srcfile["Key"])
elif JobType == 'ALIOSS_TO_S3':
alioss_download_uploadThread_small(srcfile["Key"])
return
# Compare file exist on desination bucket
def check_file_exist(*, srcfile, desFilelist, UploadIdList):
# 检查源文件是否在目标文件夹中
prefix_and_key = srcfile["Key"]
if JobType == 'LOCAL_TO_S3':
prefix_and_key = str(PurePosixPath(S3Prefix) / srcfile["Key"])
for f in desFilelist:
if f["Key"] == prefix_and_key and \
(srcfile["Size"] == f["Size"]):
return 'NEXT' # 文件完全相同
# 找不到文件,或文件不一致,要重新传的
# 查Key是否有未完成的UploadID
keyIDList = []
for u in UploadIdList:
if u["Key"] == prefix_and_key:
keyIDList.append(u)
# 如果找不到上传过的Upload,则从头开始传
if not keyIDList:
return 'UPLOAD'
# 对同一个Key(文件)的不同Upload找出时间最晚的值
UploadID_latest = keyIDList[0]
for u in keyIDList:
if u["Initiated"] > UploadID_latest["Initiated"]:
UploadID_latest = u
return UploadID_latest["UploadId"]
# Check parts number exist on S3
def checkPartnumberList(srcfile, uploadId):
try:
prefix_and_key = srcfile["Key"]
if JobType == 'LOCAL_TO_S3':
prefix_and_key = str(PurePosixPath(S3Prefix) / srcfile["Key"])
partnumberList = []
PartNumberMarker = 0
IsTruncated = True
while IsTruncated:
response_uploadedList = s3_dest_client.list_parts(
Bucket=DesBucket,
Key=prefix_and_key,
UploadId=uploadId,
MaxParts=1000,
PartNumberMarker=PartNumberMarker
)
NextPartNumberMarker = response_uploadedList['NextPartNumberMarker']
IsTruncated = response_uploadedList['IsTruncated']
if NextPartNumberMarker > 0:
for partnumberObject in response_uploadedList["Parts"]:
partnumberList.append(partnumberObject["PartNumber"])
PartNumberMarker = NextPartNumberMarker
if partnumberList: # 如果为0则表示没有查到已上传的Part
logger.info("Found uploaded partnumber: " + json.dumps(partnumberList))
except Exception as checkPartnumberList_err:
logger.error("checkPartnumberList_err" + json.dumps(checkPartnumberList_err))
input('PRESS ENTER TO QUIT')
sys.exit(0)
return partnumberList
# split the file into a virtual part list of index, each index is the start point of the file
def split(srcfile, ChunkSize):
partnumber = 1
indexList = [0]
if int(srcfile["Size"] / ChunkSize) + 1 > 10000:
ChunkSize = int(srcfile["Size"] / 10000) + 1024 # 对于大于10000分片的大文件,自动调整Chunksize
logger.info(f'Size excess 10000 parts limit. Auto change ChunkSize to {ChunkSize}')
while ChunkSize * partnumber < srcfile["Size"]: # 如果刚好是"=",则无需再分下一part,所以这里不能用"<="
indexList.append(ChunkSize * partnumber)
partnumber += 1
return indexList, ChunkSize
# upload parts in the list
def uploadPart(*, uploadId, indexList, partnumberList, srcfile, ChunkSize_auto):
partnumber = 1 # 当前循环要上传的Partnumber
total = len(indexList)
md5list = [hashlib.md5(b'')] * total
complete_list = []
# 线程池Start
with futures.ThreadPoolExecutor(max_workers=MaxThread) as pool:
for partStartIndex in indexList:
# start to upload part
if partnumber not in partnumberList:
dryrun = False
else:
dryrun = True
# upload 1 part/thread, or dryrun to only caculate md5
if JobType == 'LOCAL_TO_S3':
pool.submit(uploadThread,
uploadId=uploadId,
partnumber=partnumber,
partStartIndex=partStartIndex,
srcfileKey=srcfile["Key"],
total=total,
md5list=md5list,
dryrun=dryrun,
complete_list=complete_list,
ChunkSize=ChunkSize_auto)
elif JobType == 'S3_TO_S3':
pool.submit(download_uploadThread,
uploadId=uploadId,
partnumber=partnumber,
partStartIndex=partStartIndex,
srcfileKey=srcfile["Key"],
total=total,
md5list=md5list,
dryrun=dryrun,
complete_list=complete_list,
ChunkSize=ChunkSize_auto)
elif JobType == 'ALIOSS_TO_S3':
pool.submit(alioss_download_uploadThread,
uploadId=uploadId,
partnumber=partnumber,
partStartIndex=partStartIndex,
srcfileKey=srcfile["Key"],
srcfileSize=srcfile["Size"],
total=total,
md5list=md5list,
dryrun=dryrun,
complete_list=complete_list,
ChunkSize=ChunkSize_auto)
partnumber += 1
# 线程池End
logger.info(f'All parts uploaded - {srcfile["Key"]} - size: {srcfile["Size"]}')
# Local upload 的时候考虑传输过程中文件会变更的情况,重新扫描本地文件的MD5,而不是用之前读取的body去生成的md5list
if ifVerifyMD5 and JobType == 'LOCAL_TO_S3':
md5list = cal_md5list(indexList=indexList,
srcfileKey=srcfile["Key"],
ChunkSize=ChunkSize_auto)
# 计算所有分片列表的总etag: cal_etag
digests = b"".join(m.digest() for m in md5list)
md5full = hashlib.md5(digests)
cal_etag = '"%s-%s"' % (md5full.hexdigest(), len(md5list))
return cal_etag
# convert bytes to human readable string
def size_to_str(size):
def loop(integer, remainder, level):
if integer >= 1024:
remainder = integer % 1024
integer //= 1024
level += 1
return loop(integer, remainder, level)
else:
return integer, round(remainder / 1024, 1), level
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
integer, remainder, level = loop(int(size), 0, 0)
if level+1 > len(units):
level = -1
return f'{integer+remainder} {units[level]}'
# 本地文件重新计算一次MD5
def cal_md5list(*, indexList, srcfileKey, ChunkSize):
logger.info(f'Re-read local file to calculate MD5 again: {srcfileKey}')
md5list = []
with open(os.path.join(SrcDir, srcfileKey), 'rb') as data:
for partStartIndex in indexList:
data.seek(partStartIndex)
chunkdata = data.read(ChunkSize)
chunkdata_md5 = hashlib.md5(chunkdata)
md5list.append(chunkdata_md5)
return md5list
# Single Thread Upload one part, from local to s3
def uploadThread(*, uploadId, partnumber, partStartIndex, srcfileKey, total, md5list, dryrun, complete_list, ChunkSize):
prefix_and_key = str(PurePosixPath(S3Prefix) / srcfileKey)
if not dryrun:
print(f'\033[0;32;1m--->Uploading\033[0m {srcfileKey} - {partnumber}/{total}')
pstart_time = time.time()
with open(os.path.join(SrcDir, srcfileKey), 'rb') as data:
retryTime = 0
while retryTime <= MaxRetry:
try:
data.seek(partStartIndex)
chunkdata = data.read(ChunkSize)
chunkdata_md5 = hashlib.md5(chunkdata)
md5list[partnumber - 1] = chunkdata_md5
if not dryrun:
s3_dest_client.upload_part(
Body=chunkdata,
Bucket=DesBucket,
Key=prefix_and_key,
PartNumber=partnumber,
UploadId=uploadId,
ContentMD5=base64.b64encode(chunkdata_md5.digest()).decode('utf-8')
)
# 这里对单个part上传做了 MD5 校验,后面多part合并的时候会再做一次整个文件的
break
except Exception as err:
retryTime += 1
logger.info(f'UploadThreadFunc log: {srcfileKey} - {str(err)}')
logger.info(f'Upload Fail - {srcfileKey} - Retry part - {partnumber} - Attempt - {retryTime}')
if retryTime > MaxRetry:
logger.error(f'Quit for Max retries: {retryTime}')
input('PRESS ENTER TO QUIT')
sys.exit(0)
time.sleep(5 * retryTime) # 递增延迟重试
complete_list.append(partnumber)
pload_time = time.time() - pstart_time
pload_bytes = len(chunkdata)
pload_speed = size_to_str(int(pload_bytes / pload_time)) + "/s"
if not dryrun:
print(f'\033[0;34;1m --->Complete\033[0m {srcfileKey} '
f'- {partnumber}/{total} \033[0;34;1m{len(complete_list) / total:.2%} - {pload_speed}\033[0m')
return
# download part from src. s3 and upload to dest. s3
def download_uploadThread(*, uploadId, partnumber, partStartIndex, srcfileKey, total, md5list, dryrun, complete_list,
ChunkSize):
pstart_time = time.time()
getBody, chunkdata_md5 = b'', b'' # init
if ifVerifyMD5 or not dryrun:
# 下载文件
if not dryrun:
print(f"\033[0;33;1m--->Downloading\033[0m {srcfileKey} - {partnumber}/{total}")
else:
print(f"\033[0;33;40m--->Downloading for verify MD5\033[0m {srcfileKey} - {partnumber}/{total}")
retryTime = 0
while retryTime <= MaxRetry:
try:
response_get_object = s3_src_client.get_object(
Bucket=SrcBucket,
Key=srcfileKey,
Range="bytes=" + str(partStartIndex) + "-" + str(partStartIndex + ChunkSize - 1)
)
getBody = response_get_object["Body"].read()
chunkdata_md5 = hashlib.md5(getBody)
md5list[partnumber - 1] = chunkdata_md5
break
except Exception as err:
retryTime += 1
logger.warning(f"DownloadThreadFunc - {srcfileKey} - Exception log: {str(err)}")
logger.warning(f"Download part fail, retry part: {partnumber} Attempts: {retryTime}")
if retryTime > MaxRetry:
logger.error(f"Quit for Max Download retries: {retryTime}")
input('PRESS ENTER TO QUIT')
sys.exit(0)
time.sleep(5 * retryTime) # 递增延迟重试
if not dryrun:
# 上传文件
print(f'\033[0;32;1m --->Uploading\033[0m {srcfileKey} - {partnumber}/{total}')
retryTime = 0
while retryTime <= MaxRetry:
try:
s3_dest_client.upload_part(
Body=getBody,
Bucket=DesBucket,
Key=srcfileKey,
PartNumber=partnumber,
UploadId=uploadId,
ContentMD5=base64.b64encode(chunkdata_md5.digest()).decode('utf-8')
)
break
except Exception as err:
retryTime += 1
logger.warning(f"UploadThreadFunc - {srcfileKey} - Exception log: {str(err)}")
logger.warning(f"Upload part fail, retry part: {partnumber} Attempts: {retryTime}")
if retryTime > MaxRetry:
logger.error(f"Quit for Max Upload retries: {retryTime}")
input('PRESS ENTER TO QUIT')
sys.exit(0)
time.sleep(5 * retryTime) # 递增延迟重试
complete_list.append(partnumber)
pload_time = time.time() - pstart_time
pload_bytes = len(getBody)
pload_speed = size_to_str(int(pload_bytes / pload_time)) + "/s"
if not dryrun:
print(f'\033[0;34;1m --->Complete\033[0m {srcfileKey} '
f'- {partnumber}/{total} \033[0;34;1m{len(complete_list) / total:.2%} - {pload_speed}\033[0m')
return
# download part from src. ali_oss and upload to dest. s3
def alioss_download_uploadThread(*, uploadId, partnumber, partStartIndex, srcfileKey, srcfileSize, total, md5list,
dryrun, complete_list, ChunkSize):
pstart_time = time.time()
getBody, chunkdata_md5 = b'', b'' # init
if ifVerifyMD5 or not dryrun:
# 下载文件
if not dryrun:
print(f"\033[0;33;1m--->Downloading\033[0m {srcfileKey} - {partnumber}/{total}")
else:
print(f"\033[0;33;40m--->Downloading for verify MD5\033[0m {srcfileKey} - {partnumber}/{total}")
retryTime = 0
while retryTime <= MaxRetry:
try:
partEndIndex = partStartIndex + ChunkSize - 1
if partEndIndex >= srcfileSize:
partEndIndex = srcfileSize - 1
# Ali OSS 如果range结尾超出范围会变成从头开始下载全部(什么脑子?),所以必须人工修改为FileSize-1
# 而S3或本地硬盘超出范围只会把结尾指针改为最后一个字节
response_get_object = ali_bucket.get_object(
key=srcfileKey,
byte_range=(partStartIndex, partEndIndex)
)
getBody = b''
for chunk in response_get_object:
if chunk != '':
getBody += chunk
chunkdata_md5 = hashlib.md5(getBody)
md5list[partnumber - 1] = chunkdata_md5
break
except Exception as err:
retryTime += 1
logger.warning(f"DownloadThreadFunc - {srcfileKey} - Exception log: {str(err)}")
logger.warning(f"Download part fail, retry part: {partnumber} Attempts: {retryTime}")
if retryTime > MaxRetry:
logger.error(f"Quit for Max Download retries: {retryTime}")
input('PRESS ENTER TO QUIT')
sys.exit(0)
time.sleep(5 * retryTime) # 递增延迟重试
if not dryrun:
# 上传文件
print(f'\033[0;32;1m --->Uploading\033[0m {srcfileKey} - {partnumber}/{total}')
retryTime = 0
while retryTime <= MaxRetry:
try:
s3_dest_client.upload_part(
Body=getBody,
Bucket=DesBucket,
Key=srcfileKey,
PartNumber=partnumber,
UploadId=uploadId,
ContentMD5=base64.b64encode(chunkdata_md5.digest()).decode('utf-8')
)
break
except Exception as err:
retryTime += 1
logger.warning(f"UploadThreadFunc - {srcfileKey} - Exception log: {str(err)}")
logger.warning(f"Upload part fail, retry part: {partnumber} Attempts: {retryTime}")
if retryTime > MaxRetry:
logger.error(f"Quit for Max Download retries: {retryTime}")
input('PRESS ENTER TO QUIT')
sys.exit(0)
time.sleep(5 * retryTime) # 递增延迟重试
complete_list.append(partnumber)
pload_time = time.time() - pstart_time
pload_bytes = len(getBody)
pload_speed = size_to_str(int(pload_bytes / pload_time)) + "/s"
if not dryrun:
print(f'\033[0;34;1m --->Complete\033[0m {srcfileKey} '
f'- {partnumber}/{total} \033[0;34;1m{len(complete_list) / total:.2%} - {pload_speed}\033[0m')
return
# Complete multipart upload, get uploadedListParts from S3 and construct completeStructJSON
def completeUpload(*, reponse_uploadId, srcfileKey, len_indexList):
# 查询S3的所有Part列表uploadedListParts构建completeStructJSON
prefix_and_key = srcfileKey
if JobType == 'LOCAL_TO_S3':
prefix_and_key = str(PurePosixPath(S3Prefix) / srcfileKey)
uploadedListPartsClean = []
PartNumberMarker = 0
IsTruncated = True
while IsTruncated:
response_uploadedList = s3_dest_client.list_parts(
Bucket=DesBucket,
Key=prefix_and_key,
UploadId=reponse_uploadId,
MaxParts=1000,
PartNumberMarker=PartNumberMarker
)
NextPartNumberMarker = response_uploadedList['NextPartNumberMarker']
IsTruncated = response_uploadedList['IsTruncated']
if NextPartNumberMarker > 0:
for partObject in response_uploadedList["Parts"]:
ETag = partObject["ETag"]
PartNumber = partObject["PartNumber"]
addup = {
"ETag": ETag,
"PartNumber": PartNumber
}
uploadedListPartsClean.append(addup)
PartNumberMarker = NextPartNumberMarker
if len(uploadedListPartsClean) != len_indexList:
logger.warning(f'Uploaded parts size not match - {srcfileKey}')
input('PRESS ENTER TO QUIT')
sys.exit(0)
completeStructJSON = {"Parts": uploadedListPartsClean}
# S3合并multipart upload任务
response_complete = s3_dest_client.complete_multipart_upload(
Bucket=DesBucket,
Key=prefix_and_key,
UploadId=reponse_uploadId,
MultipartUpload=completeStructJSON
)
logger.info(f'Complete merge file {srcfileKey}')
return response_complete
# Compare local file list and s3 list
def compare_local_to_s3():
logger.info('Comparing destination and source ...')
fileList = get_local_file_list(str_key=True)
desFilelist = get_s3_file_list(s3_client=s3_dest_client,
bucket=DesBucket,
S3Prefix=S3Prefix,
no_prefix=True)
deltaList = []
for source_file in fileList:
if source_file not in desFilelist:
deltaList.append(source_file)
if not deltaList:
logger.warning('All source files are in destination Bucket/Prefix. Job well done.')
else:
logger.warning(f'There are {len(deltaList)} files not in destination or not the same size. List:')
for delta_file in deltaList:
logger.warning(str(delta_file))
return
# Compare S3 buckets
def compare_buckets():
logger.info('Comparing destination and source ...')
deltaList = []
desFilelist = get_s3_file_list(s3_client=s3_dest_client,
bucket=DesBucket,
S3Prefix=S3Prefix)
if JobType == 'S3_TO_S3':
if SrcFileIndex == "*":
fileList = get_s3_file_list(s3_client=s3_src_client,
bucket=SrcBucket,
S3Prefix=S3Prefix)
else:
fileList = head_s3_single_file(s3_src_client, SrcBucket)
elif JobType == 'ALIOSS_TO_S3':
if SrcFileIndex == "*":
fileList = get_ali_oss_file_list(ali_bucket)
else:
fileList = head_oss_single_file(ali_bucket)
else:
return
for source_file in fileList:
if source_file not in desFilelist:
deltaList.append(source_file)
if not deltaList:
logger.warning('All source files are in destination Bucket/Prefix. Job well done.')
else:
logger.warning(f'There are {len(deltaList)} files not in destination or not the same size. List:')
for delta_file in deltaList:
logger.warning(json.dumps(delta_file))
return
# Main
if __name__ == '__main__':
start_time = datetime.datetime.now()
ChunkSize_default = set_config()
logger, log_file_name = set_log()
# Define s3 client
s3_config = Config(max_pool_connections=200)
s3_dest_client = Session(profile_name=DesProfileName).client('s3', config=s3_config)
# Check destination S3 writable
try:
logger.info(f'Checking write permission for: {DesBucket}')
s3_dest_client.put_object(
Bucket=DesBucket,
Key=str(PurePosixPath(S3Prefix) / 'access_test'),
Body='access_test_content'
)
except Exception as e:
logger.error(f'Can not write to {DesBucket}/{S3Prefix}, {str(e)}')
input('PRESS ENTER TO QUIT')
sys.exit(0)
# 获取源文件列表
logger.info('Get source file list')
src_file_list = []
if JobType == "LOCAL_TO_S3":
SrcDir = str(Path(SrcDir))
src_file_list = get_local_file_list()
elif JobType == "S3_TO_S3":
s3_src_client = Session(profile_name=SrcProfileName).client('s3', config=s3_config)
if SrcFileIndex == "*":
src_file_list = get_s3_file_list(s3_client=s3_src_client,
bucket=SrcBucket,
S3Prefix=S3Prefix)
else:
src_file_list = head_s3_single_file(s3_src_client, SrcBucket)
elif JobType == 'ALIOSS_TO_S3':
import oss2
ali_bucket = oss2.Bucket(oss2.Auth(ali_access_key_id, ali_access_key_secret), ali_endpoint, ali_SrcBucket)
if SrcFileIndex == "*":
src_file_list = get_ali_oss_file_list(ali_bucket)
else:
src_file_list = head_oss_single_file(ali_bucket)
# 获取目标s3现存文件列表
des_file_list = get_s3_file_list(s3_client=s3_dest_client,
bucket=DesBucket,
S3Prefix=S3Prefix)
# 获取Bucket中所有未完成的Multipart Upload
multipart_uploaded_list = get_uploaded_list(s3_dest_client)
# 是否清理所有未完成的Multipart Upload, 用于强制重传
if multipart_uploaded_list:
logger.warning(f'{len(multipart_uploaded_list)} Unfinished upload, clean them and restart?')
logger.warning('NOTICE: IF CLEAN, YOU CANNOT RESUME ANY UNFINISHED UPLOAD')
if not DontAskMeToClean:
keyboard_input = input("CLEAN unfinished upload and restart(input CLEAN) or resume loading(press enter)? "
"Please confirm: (n/CLEAN)")
else:
keyboard_input = 'no'
if keyboard_input == 'CLEAN':
# 清理所有未完成的Upload
for clean_i in multipart_uploaded_list:
s3_dest_client.abort_multipart_upload(
Bucket=DesBucket,
Key=clean_i["Key"],
UploadId=clean_i["UploadId"]
)
multipart_uploaded_list = []
logger.info('CLEAN FINISHED')
else:
logger.info('You choose not to clean, now try to resume unfinished upload')
# 对文件列表中的逐个文件进行上传操作
with futures.ThreadPoolExecutor(max_workers=MaxParallelFile) as file_pool:
for src_file in src_file_list:
file_pool.submit(upload_file,
srcfile=src_file,
desFilelist=des_file_list,
UploadIdList=multipart_uploaded_list,
ChunkSize_default=ChunkSize_default)
# 再次获取源文件列表和目标文件夹现存文件列表进行比较,每个文件大小一致,输出比较结果
time_str = str(datetime.datetime.now() - start_time)
if JobType == 'S3_TO_S3':
str_from = f'{SrcBucket}/{S3Prefix}'
compare_buckets()
elif JobType == 'ALIOSS_TO_S3':
str_from = f'{ali_SrcBucket}/{S3Prefix}'
compare_buckets()
elif JobType == 'LOCAL_TO_S3':
str_from = f'{SrcDir}'
compare_local_to_s3()
else:
str_from = ""
print(f'\033[0;34;1mMISSION ACCOMPLISHED - Time: {time_str} \033[0m - FROM: {str_from} TO {DesBucket}/{S3Prefix}')
print('Logged to file:', os.path.abspath(log_file_name))
input('PRESS ENTER TO QUIT')
``` |
{
"source": "joeshow79/nn_dataflow",
"score": 2
} |
#### File: nn_dataflow/core/inter_layer_pipeline.py
```python
import itertools
from .layer import ConvLayer
from .network import Network
from .pipeline_segment import PipelineSegment
from .resource import Resource
class InterLayerPipeline(object):
'''
Inter-layer pipeline.
'''
def __init__(self, network, batch_size, resource, max_util_drop=0.05):
if not isinstance(network, Network):
raise TypeError('InterLayerPipeline: network must be '
'a Network instance.')
if not isinstance(resource, Resource):
raise TypeError('InterLayerPipeline: resource must be '
'a Resource instance.')
if not 0 <= max_util_drop <= 1:
raise ValueError('InterLayerPipeline: max_util_drop must be '
'between [0, 1].')
self.network = network
self.batch_size = batch_size
self.resource = resource
self.max_util_drop = max_util_drop
self._calc_sched_dag()
# Vertices starting from which we have generated the segments.
self.seg_vertex_done = set()
def ordered_layer_list(self):
'''
Get a list of the layers in their topological order in the scheduling
DAG.
'''
return list(sum(self.dag_vertex_list, tuple()))
def gen_segment(self, options):
'''
Generate all valid inter-layer pipelining segments.
'''
kwargs = {'network': self.network,
'batch_size': self.batch_size,
'resource': self.resource,
'max_util_drop': self.max_util_drop,
'with_opt': options.layer_pipeline_opt,
}
# No pipelining, each layer sequentially occupies the whole resource.
for layer in self.network:
seg = ((layer,),)
segment = PipelineSegment(seg, **kwargs)
assert segment.valid
yield segment
# Pipelining.
for vseg in self._gen_vseg():
if len(vseg) > options.layer_pipeline_max_degree:
continue
if len(vseg) == 1 and len(self.dag_vertex_list[vseg[0]]) == 1:
# An individual layer, already returned in no-pipelining case.
continue
# Use set to eliminate duplicates.
seg_cands = set()
if options.partition_interlayer:
# Spatial pipelining.
seg = tuple(self.dag_vertex_list[vidx] for vidx in vseg)
seg_cands.add(seg)
if options.hw_gbuf_save_writeback:
# Temporal pipelining.
# Reduce the spatial dimension.
seg = (tuple(itertools.chain.from_iterable(
self.dag_vertex_list[vidx] for vidx in vseg)),)
seg_cands.add(seg)
# Determine segment allocation.
for seg in seg_cands:
segment = PipelineSegment(seg, **kwargs)
if segment.valid:
yield segment
def _gen_vseg(self, vertex_idx=0, done=None):
'''
Generate vertex segments starting from vertex `vertex_idx`. Yield a
tuple of the vertices in the segment.
`done` is a set of vertices which have already been scheduled and the
output is already in memory.
Rules:
1. If a vertex does not share any dependencies with the current
segment, i.e., none of its previous vertices is in the current segment
or among the previous vertices of the current segment, we do not add it
to the segment, because there is no benefit to co-locate them.
2. If a vertex has multiple previous vertices, at most one of them
can be in the same segment as this vertex, because the output data
availability timing of multiple previous vertices may not match.
3. If a vertex has multiple next vertices, either all or at most one of
them can be NOT in the same segment as this vertex, because only
including a small subset saves little data write-back to memory.
'''
vseg = tuple()
if not done:
done = set()
# Reset.
self.seg_vertex_done = set()
if self.dag_input_vertex not in done:
# Input layer is always in memory.
done.add(self.dag_input_vertex)
# The frontier is the vertex to be considered to be added to the
# current segment.
for frontier in range(vertex_idx, len(self.dag_vertex_list)):
# Check whether the frontier can be added to the current segment.
frontier_prevs = self.dag_prev_dict[frontier]
# Whether the frontier share dependencies with the current segment,
# if the segment is not empty.
share_deps = not vseg or not frontier_prevs.isdisjoint(
set.union(set(vseg), *[self.dag_prev_dict[i] for i in vseg]))
# Whether multiple previous vertices are in the current segment.
multi_prevs = len(frontier_prevs.intersection(vseg)) > 1
if not share_deps or multi_prevs:
# Not sharing any dependencies (rule 1), or multiple previous
# vertices in the current segment (rule 2).
# Make sure the current segment is not empty.
assert vseg
# Not extend the segment any more. Note that the current
# segment has already been yielded, as well as the recursion,
# in the last iteration.
break
# Extend the segment.
vseg += (frontier,)
# Check whether the segment is valid.
for idx in vseg:
nexts = self.dag_next_dict[idx]
# The next vertices should either all or at most one not in the
# segment (rule 3).
if not nexts.isdisjoint(vseg) \
and len(nexts.difference(vseg)) > 1:
# The segment is invalid. Need to add more vertices.
break
else:
# The segment is valid.
yield vseg
# Skip if have done.
if frontier + 1 in self.seg_vertex_done:
continue
# Recursion.
for tpl in self._gen_vseg(frontier + 1, done.union(vseg)):
yield tpl
assert vertex_idx not in self.seg_vertex_done
self.seg_vertex_done.add(vertex_idx)
def _calc_sched_dag(self):
'''
Build the scheduling DAG of the network. We merge layers with no
filters into their last previous layer, so a DAG vertex can contain one
or more layers.
We order and index the DAG vertices in their depth-first topological
order. This will also be the order to schedule the layers.
Also establish two dicts for the previous and next vertices of each DAG
vertex.
In summary, the attributes initialized include: `dag_input_vertex`,
`dag_vertex_list`, `dag_vertex_dict`, `dag_prev_dict`, `dag_next_dict`.
'''
# Vertex of the input layer.
self.dag_input_vertex = -1
# The DAG vertex set. Each vertex is a merged layer tuples, represented
# by their layer names. Use a list type to make modification easier.
dag_vertex_set = []
for layer_name in self.network:
layer = self.network[layer_name]
if isinstance(layer, ConvLayer):
dag_vertex_set.append((layer_name,))
else:
prevs = set(self.network.prevs(layer_name))
assert prevs
# Find and merge to a vertex if that vertex only contains one
# previous layer at the last, because non-last previous layer
# will not have its data available to be used for this layer.
# Also the previous layer can only have this one next layer,
# because its data will be overwritten by this layer locally.
# Check vertices in the reversed order.
for idx in reversed(range(len(dag_vertex_set))):
vhead = dag_vertex_set[idx][:-1]
vtail = dag_vertex_set[idx][-1]
if prevs.isdisjoint(vhead) and vtail in prevs \
and len(self.network.nexts(vtail)) == 1:
dag_vertex_set[idx] += (layer_name,)
break
else:
# No valid vertex to merge.
dag_vertex_set.append((layer_name,))
assert sum(len(v) for v in dag_vertex_set) == len(self.network)
# The DAG vertex list in the topological order.
self.dag_vertex_list = self._topological_order(dag_vertex_set)
# Make a directory from layer name to DAG vertex index.
self.dag_vertex_dict = {}
for vidx, v in enumerate(self.dag_vertex_list):
for layer_name in v:
assert layer_name not in self.dag_vertex_dict
self.dag_vertex_dict[layer_name] = vidx
# Add the input layer.
self.dag_vertex_dict[self.network.INPUT_LAYER_KEY] = \
self.dag_input_vertex
# Add the external layers.
for ext_layer in self.network.ext_layers():
self.dag_vertex_dict[ext_layer] = self.dag_input_vertex
# The previous and next relationship of the DAG vertices.
self.dag_prev_dict = dict((vidx, set()) for vidx
in range(len(self.dag_vertex_list)))
self.dag_next_dict = dict((vidx, set()) for vidx
in range(len(self.dag_vertex_list)))
for layer_name in self.network:
vidx = self.dag_vertex_dict[layer_name]
# Previous layers.
for p in self.network.prevs(layer_name):
pvidx = self.dag_vertex_dict[p] \
if p and p not in self.network.ext_layers() \
else self.dag_input_vertex
if pvidx != vidx:
self.dag_prev_dict[vidx].add(pvidx)
# Next layers.
for n in self.network.nexts(layer_name):
if not n:
continue
nvidx = self.dag_vertex_dict[n]
if nvidx != vidx:
self.dag_next_dict[vidx].add(nvidx)
# Add next layers of the input layer.
self.dag_next_dict[self.dag_input_vertex] = set()
for vidx in self.dag_prev_dict:
if self.dag_input_vertex in self.dag_prev_dict[vidx]:
self.dag_next_dict[self.dag_input_vertex].add(vidx)
def _topological_order(self, dag_vertex_set):
'''
Order the DAG vertices in topological order using DFS.
Specifically, The backtrace order of the depth-first search is the
inverse of the topological order. See
https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search
'''
# The visited layers in the DFS order.
visited = []
# The unseen pending layers.
unseen = set(dag_vertex_set)
# The layers that have been seen, but not visited due to unvisited
# previous layers.
seen = set()
def _dfs(vertex):
assert vertex not in seen
if vertex in visited:
return
unseen.discard(vertex)
seen.add(vertex)
nexts = []
for l in vertex:
for n in self.network.nexts(l):
if n and n not in vertex and n not in nexts:
nexts.append(n)
# Visit next layers in the reversed order, so the reversed visit
# order has the original order.
next_vertices = []
for n in reversed(nexts):
for nv in unseen:
if n in nv:
next_vertices.append(nv)
for nv in next_vertices:
_dfs(nv)
visited.append(vertex)
seen.remove(vertex)
# Start from the first layers.
start_vertices = []
for l in reversed(self.network.firsts()):
for v in unseen:
if l in v:
start_vertices.append(v)
for v in start_vertices:
_dfs(v)
assert not unseen
assert not seen
return list(reversed(visited))
```
#### File: nn_dataflow/core/pipeline_segment.py
```python
from collections import namedtuple, OrderedDict, Counter
import itertools
from sympy import symbols
from sympy import Basic as symbasic
from sympy import Eq as symeq
from sympy.core.containers import Tuple as symtuple
from sympy.functions.elementary.piecewise import Piecewise as sympiecewise
from .. import util
from .layer import ConvLayer
from .network import Network
from .resource import Resource
from .scheduling_constraint import SchedulingConstraintLayerPipeline as Cstr
class PipelineSegment(object):
'''
Inter-layer pipeline segment.
Segment is a two-level layer hierarchy, where the first level is spatially
scheduled and the second level is temporally scheduled.
'''
# pylint: disable=too-many-instance-attributes
# Scheduling index in the segment, as a tuple of spatial and temporal
# scheduling indices.
SchedIndex = namedtuple('SchedIndex', ['sp_idx', 'tm_idx'])
def __init__(self, seg, network, batch_size, resource, max_util_drop=0.05,
with_opt=True):
if not isinstance(seg, tuple):
raise TypeError('PipelineSegment: seg must be a tuple.')
for ltpl in seg:
if not isinstance(ltpl, tuple):
raise TypeError('PipelineSegment: seg must be a tuple '
'of sub-tuples.')
if not isinstance(network, Network):
raise TypeError('PipelineSegment: network must be '
'a Network instance.')
if not isinstance(resource, Resource):
raise TypeError('PipelineSegment: resource must be '
'a Resource instance.')
self.seg = seg
self.network = network
self.batch_size = batch_size
self.resource = resource
self.max_util_drop = max_util_drop
self.with_opt = with_opt
self.valid = self._init_deps()
if not self.valid:
return
# Resource allocation.
self.valid = self._alloc_resource(max_util_drop=max_util_drop)
if not self.valid:
return
# Scheduling constraints.
self.valid = self._init_sym_cstrs()
if not self.valid:
return
def allocation(self):
'''
Get resource allocation, as a tuple of sub-tuples corresponding to the
layers in the segment.
'''
if not self.valid:
return None
return self.alloc
def gen_constraint(self, max_time_overhead=float('inf')):
'''
Generate scheduling constraint for the segment, as a tuple of
sub-tuples of SchedulingConstraint instances, corresponding to the
layers in the segment.
Yield the segment constraint tuple, and hints for pruning.
Pruning hints are the top-level loop blocking factors. Smaller hints
indicate better (lower) cost, and larger hints indicate better segment
timing (with lower time overhead). Constraints with smaller hints are
generated before those with larger hints. So if a constraint results in
a valid scheduling, the later ones with all hints larger than its can
be pruned.
'''
syms = self.cstr_symvals.keys()
vals = self.cstr_symvals.values()
assert syms and vals
# Sort from small to large.
# This is not a strict ordering, but we guarantee that if all values in
# hint A are larger than the corresponding values in hint B, A will be
# generated after B.
vals = [sorted(v) for v in vals]
if self.cstr_topbat_idx is not None:
# Tovhd = (1 + 1/to + 1 + 1/to + ...) / tb
# >= (1 + 1 + ...) / tb = num_sp_fbs / tb
min_topbat = 1. * self.cstr_num_sp_fbs / max_time_overhead
pos = self.cstr_topbat_idx
vals[pos] = [t for t in vals[pos] if t >= min_topbat]
for valp in itertools.product(*vals):
constraint = tuple()
for atpl in self._subs_symargs(self.cstr_symargs, zip(syms, valp)):
ctpl = tuple()
for a in atpl:
# Construct kwargs, adjust the types of the values.
kwargs = {}
kwargs['topbat'] = int(a.get('topbat', 0))
kwargs['fbifm'] = bool(a.get('fbifm', False))
if not kwargs['fbifm']:
kwargs['topifm'] = int(a.get('topifm', 0))
kwargs['fbofm'] = bool(a.get('fbofm', False))
if not kwargs['fbofm']:
kwargs['topofm'] = int(a.get('topofm', 0))
kwargs['update_dict'] = a.get('update_dict')
c = Cstr(**kwargs)
ctpl += (c,)
constraint += (ctpl,)
if None in valp:
assert len(valp) == 1
hints = (1,)
else:
hints = tuple(valp)
yield constraint, hints
def __getitem__(self, index):
return self.seg[index]
def __iter__(self):
return self.seg.__iter__()
def __len__(self):
return len(self.seg)
def __eq__(self, other):
if isinstance(other, self.__class__):
# pylint: disable=protected-access
return self._key_attrs() == other._key_attrs()
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(tuple(self._key_attrs()))
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'seg={}'.format(repr(self.seg)),
'network={}'.format(repr(self.network)),
'batch_size={}'.format(repr(self.batch_size)),
'resource={}'.format(repr(self.resource)),
'max_util_drop={}'.format(repr(self.max_util_drop)),
'with_opt={}'.format(repr(self.with_opt))]))
def _key_attrs(self):
''' Used for comparison. '''
return (self.seg, self.network, self.batch_size, self.resource,
self.max_util_drop, self.with_opt)
def _init_deps(self):
'''
Initialize the dependency relationship of the layers in the segment as
a mapping of the scheduling indices, and check validation. Return
whether the segment is valid to schedule.
We categorize dependencies to 3 categories:
- local: with the same spatial index but different temporal indices;
- neighbor: with different spatial indices but in the same segment;
- memory: in different segments, from/to memory.
The values of the src/dst dicts are tuples of indices of the neighbor
dependencies. A layer can have at most one neighbor source (must be a
last temporal scheduled layer), but may have multiple neighbor
destinations (could be temporal scheduled in the middle). Also, all
layers with the same spatial index can have at most one neighbor
source.
Special index `None` means memory dependency, i.e., from/to memory.
Memory sources and neighbor sources must be mutual exclusive, in order
to correctly set the src data regions; memory destinations and neighbor
destinations can co-exist.
Local dependencies are omitted, as by default each layer has its
immediately previous layer as local source and immediately next layer
as local destination.
Construct an ifmap forwarding dict for shared memory source data. It
maps previous layer name tuples, to a list of scheduling indices of all
layers in this segment that share these exact previous layers. The
first in the list is responsible to fetch the previous layer data and
to forward them to others. We allow shared memory source data between
two layers only when both layers have memory dependency only (so their
temporal indices must be 0), and their previous layers are exactly the
same.
Construct an ofmap forwarding dict for multiple destinations of both
on-chip and off-chip. It maps the scheduling index of a layer in this
segment that has both memory and neighbor/local destinations (so needs
to store its ofmaps back to memory), to a list of scheduling indices of
all layers in this segment that accepts its ofmaps as ifmaps. Neighbor
dependencies are only between the last temporal one and the first
temporal ones; local dependencies are only between adjacent temporal
ones.
'''
self.src_dict = [[None for _ in ltpl] for ltpl in self.seg]
self.dst_dict = [[None for _ in ltpl] for ltpl in self.seg]
self.ifm_fwd_dict = {}
self.ofm_fwd_dict = {}
# Mapping from layer to spatial/temporal indices in the segment.
layer2idx = {l: PipelineSegment.SchedIndex(sp_idx, tm_idx)
for sp_idx, ltpl in enumerate(self.seg)
for tm_idx, l in enumerate(ltpl)}
# Mapping from previous layer tuple to layer.
prevs2layer = {}
for sp_idx, ltpl in enumerate(self.seg):
single_nbr_src = None
for tm_idx, l in enumerate(ltpl):
assert layer2idx[l] == (sp_idx, tm_idx)
# Sources.
src = tuple()
prevs = self.network.prevs(l)
assert all(p not in layer2idx or layer2idx[p] < layer2idx[l]
for p in prevs)
mem_src = [p for p in prevs if p not in layer2idx]
lcl_src = [p for p in prevs if p not in mem_src
and layer2idx[p].sp_idx == sp_idx]
nbr_src = [p for p in prevs if p not in mem_src + lcl_src]
# Ensure single local source to be the immediately previous.
# Check at the destination so here are assertions.
if not lcl_src:
assert tm_idx == 0
else:
assert len(lcl_src) == 1 \
and layer2idx[lcl_src[0]].tm_idx == tm_idx - 1
# Mutual exclusive.
if mem_src and nbr_src:
# We now allow each spatial scheduling (vertex) to have
# both memory source and neighbor source when generating
# segments. But each single layer cannot have both;
# otherwise there would be multiple source data regions.
return False
if mem_src:
# Memory source.
src += (None,)
if nbr_src:
# Neighbor source.
# Single neighbor source to be the last temporal scheduled.
assert len(nbr_src) == 1
prev_idx = layer2idx[nbr_src[0]]
assert prev_idx.tm_idx == len(self.seg[prev_idx.sp_idx]) - 1
# Single neighbor source across this spatial scheduling.
if single_nbr_src is not None:
return False
single_nbr_src = prev_idx
src += (prev_idx,)
# Shared memory source.
if mem_src and not lcl_src:
assert not nbr_src
assert tm_idx == 0
if prevs in prevs2layer:
fet_idx = layer2idx[prevs2layer[prevs]]
self.ifm_fwd_dict.setdefault(prevs, [fet_idx]).append(
layer2idx[l])
else:
prevs2layer[prevs] = l
# Destinations.
dst = tuple()
nexts = self.network.nexts(l)
assert all(n not in layer2idx or layer2idx[n] > layer2idx[l]
for n in nexts)
mem_dst = [n for n in nexts if n not in layer2idx]
lcl_dst = [n for n in nexts if n not in mem_dst
and layer2idx[n].sp_idx == sp_idx]
nbr_dst = [n for n in nexts if n not in mem_dst + lcl_dst]
# Ensure single local destination to be the immediate next.
if not lcl_dst:
if tm_idx != len(ltpl) - 1:
# Not utilize local data, sub-optimal.
return False
else:
if len(lcl_dst) != 1 \
or layer2idx[lcl_dst[0]].tm_idx != tm_idx + 1:
# Local data will not be available if not adjacent.
return False
# Mutual exclusive.
# Now they can co-exist.
# assert not mem_dst or not nbr_dst
if mem_dst and nbr_dst:
assert tm_idx == len(ltpl) - 1
self.ofm_fwd_dict[layer2idx[l]] = [layer2idx[n]
for n in nbr_dst]
if mem_dst and lcl_dst:
assert not nbr_dst
self.ofm_fwd_dict[layer2idx[l]] = [layer2idx[lcl_dst[0]]]
if mem_dst:
# Memory destination.
dst += (None,)
if nbr_dst:
# Neighbor destinations.
# This layer is the last temporal scheduled.
assert tm_idx == len(ltpl) - 1
dst += tuple(layer2idx[n] for n in nbr_dst)
# Basic pipelining requires a linear structure (on-chip).
if not self.with_opt:
if len(nbr_src) + len(lcl_src) > 1 \
or len(nbr_dst) + len(lcl_dst) > 1 \
or ((sp_idx, tm_idx) != (0, 0)
and not nbr_src and not lcl_src):
return False
self.src_dict[sp_idx][tm_idx] = src
self.dst_dict[sp_idx][tm_idx] = dst
return True
def _alloc_resource(self, max_util_drop=0.05):
'''
Decide the resource allocation. Return whether the allocation succeeds.
`max_util_drop` specifies the maximum utilization drop due to mismatch
throughput between layers.
'''
self.alloc = tuple()
# Allocate processing subregions.
subregions = self._alloc_proc(max_util_drop=max_util_drop)
if not subregions:
return False
no_time_mux = len(self.network) == sum(len(ltpl) for ltpl in self.seg)
# All layers that have model filters must be spatially scheduled.
if no_time_mux:
for ltpl in self.seg:
if len([l for l in ltpl
if isinstance(self.network[l], ConvLayer)]) > 1:
no_time_mux = False
break
for sp_idx, ltpl in enumerate(self.seg):
# Resource for the subregion.
rtpl = tuple()
for tm_idx, _ in enumerate(ltpl):
# Processing region.
proc_region = subregions[sp_idx]
# Data source.
src = self.src_dict[sp_idx][tm_idx]
if None in src:
# Data source is memory.
assert src == (None,)
src_data_region = self.resource.src_data_region
for sh_idx_list in self.ifm_fwd_dict.values():
# Find shared memory source to use forwarding.
if (sp_idx, tm_idx) in sh_idx_list[1:]:
src_data_region = subregions[sh_idx_list[0].sp_idx]
break
elif src:
# Data source is neighbor.
assert len(src) == 1
src_data_region = subregions[src[0].sp_idx]
else:
# Data source is all local.
src_data_region = proc_region
# Data destination.
dst = self.dst_dict[sp_idx][tm_idx]
if None in dst:
# Data destination is memory.
# assert dst == (None,)
# Now we can have both memory and neighbor destinations. If
# they co-exist, we need to store them locally and also
# store back to memory. In this case the dst data region is
# set to memory.
dst_data_region = self.resource.dst_data_region
elif dst:
# Data destinations are neighbors.
# Put data in local. The next layers will fetch.
dst_data_region = proc_region
else:
# Data destination is all local.
dst_data_region = proc_region
# Make resource.
# Note that DRAM bandwidth is not split here. We optimistically
# assume each layer can use the full DRAM bandwidth at
# different time. We adjust this assumption when calculating
# the segment timing.
rtpl += (self.resource._replace(
proc_region=proc_region,
src_data_region=src_data_region,
dst_data_region=dst_data_region,
no_time_mux=no_time_mux),)
assert len(rtpl) == len(ltpl)
self.alloc += (rtpl,)
assert len(self.alloc) == len(self.seg)
return True
def _alloc_proc(self, max_util_drop=0.05):
'''
Allocate processing subregions for the segment.
Return a list of processing subregions corresponding to the first-level
(spatial scheduled) layers in the segment. Return None if allocation
failed.
`max_util_drop` specifies the maximum utilization drop due to mismatch
throughput between layers.
'''
# Spatial allocation.
proc_region = self.resource.proc_region
dim_nodes = proc_region.dim
total_nodes = dim_nodes.size()
# Number of operations of each spatial allocation.
ops = [sum(self.network[l].total_ops() for l in ltpl)
for ltpl in self.seg]
# Enforce a common factor among the numbers of nodes allocated to all
# vertices in the segment. Such common factor is likely to be the
# common height of the vertex node regions.
common_factor_list = [cf for cf, _ in util.factorize(dim_nodes.h, 2)]
for cf in sorted(common_factor_list, reverse=True):
# Pick the largest common factor within the utilization constraint.
# Number of nodes of each vertex should be approximate to the
# number of ops of the vertex.
nodes_raw = [o * 1. / sum(ops) * total_nodes for o in ops]
# Round to the common factor multiples.
assert total_nodes % cf == 0
nodes = [max(1, int(round(nr / cf))) * cf for nr in nodes_raw]
# Fix margin.
while sum(nodes) != total_nodes:
diff = [n - nr for n, nr in zip(nodes, nodes_raw)]
if sum(nodes) > total_nodes:
# Decrease the nodes for the vertex with the maximum
# positive difference.
idx, _ = max(enumerate(diff), key=lambda tpl: tpl[1])
nodes[idx] -= cf
else:
# Increase the nodes for the vertex with the minimum
# negative difference.
idx, _ = min(enumerate(diff), key=lambda tpl: tpl[1])
nodes[idx] += cf
if 0 in nodes:
continue
# Utilization.
time = max(o * 1. / n for o, n in zip(ops, nodes))
utilization = sum(ops) / time / sum(nodes)
assert utilization < 1 + 1e-6
if utilization >= 1 - max_util_drop:
# Found
break
else:
# Not found.
return None
# Allocate in the processing region according to the number of nodes.
subregions = proc_region.allocate(nodes)
assert subregions
assert len(subregions) == len(self.seg)
if len(subregions) == 1:
assert subregions[0] == proc_region
return subregions
def _init_sym_cstrs(self):
'''
Initialize the symbolic scheduling constraints for the layers in the
segment, by constructing a nested lists of dicts `cstr_symargs` whose
values can be symbolic expressions for the keyword arguments of layers
in the segment, and a dict `cstr_symvals` mapping each symbol to its
possible numerical values.
Rules for constraints.
- Top BAT loop factor.
With a single layer, there is no constraint on the top BAT loop factor.
Otherwise all layers must share the same factor, namely `topbat_shr`.
- Fmap forwarding and fully buffering.
Only CONV layers require to fully buffer fmaps. Local-region layers
process data in a streaming manner.
Each CONV layer, and all local-region layers immediately following it
within the same spatial scheduling, are made into a group G.
(initial) if G is both the first spatial and the first temporal
scheduling with a CONV layer, it can choose whether to fully buffer
ofmaps or not. This is a configuration to explore, namely `fbofm_init`.
We decide its value by choosing the one that gives the fewer fully
buffered inter-spatial pairs on the critical forwarding path, and the
smaller maximum fully buffered data size.
(within-group) within G, the CONV layer, and all local-region layers,
should use the same top OFM factors (IFM factors are automatically
determined by OFM factors in local-region layers), unless CONV ofmaps
need to be fully buffered, in which case, the CONV layer and the last
layer in G fully buffer ofmaps (top OFM factor is 1), and other layers
still use the same top OFM factors but can be different than 1.
(inter-temporal) if G has a source from G' in the same spatial
scheduling (which must be immediately before G), G should fully buffer
ifmaps, and G' should fully buffer ofmaps.
(inter-spatial) if G has a source from G' in another spatial scheduling
(where the source must be the last temporal scheduling in G' and that
spatial scheduling),
(a) if G' already fully buffers ofmaps, make G fully buffer ifmaps.
(b) otherwise, make G fully buffer ofmaps (do not require G' to fully
buffer ifmaps; leave it to other rules, e.g. inter-temporal, to
decide); forward data between G' and G, by matching their top O/IFM
factors (biasing this case for smaller pipeline filling delay).
Notice the destination can be: (1) the leading CONV layer, whose top
IFM factor is constrained; (2) a local-region layer, where we constrain
the top OFM factors of this group (except otherwise constrained by
fully buffering ofmaps).
'''
# pylint: disable=too-many-branches
# Symbolic variables mapping to numerical values.
symvals = dict()
# Top BAT loop factor.
topbat = symbols('topbat_shr', integer=True)
symvals[topbat] = [t for t, _ in util.factorize(self.batch_size, 2)]
# Whether the initial CONV layer fully buffers ofmaps.
fbofm_init = symbols('fbofm_init')
symvals[fbofm_init] = [False, True]
def _layer_topofm_vals(layer_name):
layer = self.network[layer_name]
# We require that the total ofmap size takes at least 5% of the
# gbuf capacity of a single node, to avoid too fine blocking.
tmax = layer.total_ofmap_size(self.batch_size) \
/ (0.05 * self.resource.size_gbuf)
vals = [t for t, _ in util.factorize(layer.nofm, 2)
if t <= tmax or t == 1]
assert vals
return vals
def _layer_topifm_vals(layer_name):
layer = self.network[layer_name]
# We require that the total ifmap size takes at least 5% of the
# gbuf capacity of a single node, to avoid too fine blocking.
tmax = layer.total_ifmap_size(self.batch_size) \
/ (0.05 * self.resource.size_gbuf)
vals = [t for t, _ in util.factorize(layer.nifm, 2)
if t <= tmax or t == 1]
assert vals
return vals
# Layer constraint kwargs.
symargs = [[{'topbat': topbat} for _ in ltpl] for ltpl in self.seg]
# Candidates for critical forwarding path between spatial scheduling.
sp_crit_path_cands = set()
sp_crit_path_cands.add((0,)) # init with the first spatial.
# The last CONV layer index.
last_conv = PipelineSegment.SchedIndex(-1, 0)
# Whether the current group needs to fully buffer ofmap. Delayed apply
# to the last layer in the group.
curr_fbofm = False
for sp_idx, ltpl in enumerate(self.seg):
# Initial topofm, in case of a non-CONV starting layer.
curr_topofm = symbols('topofm_{}_s'.format(sp_idx), integer=True)
symvals[curr_topofm] = _layer_topofm_vals(ltpl[0])
for tm_idx, l in enumerate(ltpl):
layer = self.network[l]
curr_sa = symargs[sp_idx][tm_idx]
# Neighbor source dependency.
nsrc_sa = None
src_deps = self.src_dict[sp_idx][tm_idx]
if any(s is not None for s in src_deps):
assert len(src_deps) == 1
nbr_src = src_deps[0]
assert nbr_src.sp_idx < sp_idx
nsrc_sa = symargs[nbr_src.sp_idx][nbr_src.tm_idx]
assert nsrc_sa # not empty, used to test nbr src exists.
# Set critical path candidates.
new_cands = set()
for cand in sp_crit_path_cands:
if cand[-1] == nbr_src.sp_idx:
new_cands.add(cand + (sp_idx,))
sp_crit_path_cands |= new_cands
if isinstance(layer, ConvLayer):
# Conv layer.
# The last group may require to fully buffer ofmaps.
# Delayed apply to the immediate previous layer.
if curr_fbofm is not False:
assert last_conv >= (0, 0)
if last_conv.sp_idx == sp_idx:
assert tm_idx > 0
lsrc_sa = symargs[sp_idx][tm_idx - 1]
else:
lsrc_sa = symargs[last_conv.sp_idx][-1]
lsrc_sa['fbofm'] = curr_fbofm
# Reset.
curr_fbofm = False
# New topofm for a new group.
curr_topofm = symbols('topofm_{}_{}'.format(sp_idx, tm_idx),
integer=True)
symvals[curr_topofm] = _layer_topofm_vals(l)
# Set topofm.
curr_sa['topofm'] = curr_topofm
if sp_idx == last_conv.sp_idx:
# Rule inter-temporal.
assert tm_idx > 0
# Make this group fully buffer ifmaps.
curr_sa['fbifm'] = True
# Make the last group fully buffer ofmaps.
last_sa = symargs[sp_idx][last_conv.tm_idx]
lsrc_sa = symargs[sp_idx][tm_idx - 1]
last_sa['fbofm'] = True
lsrc_sa['fbofm'] = True
elif nsrc_sa:
# Rule inter-spatial.
# We only look at this rule when inter-temporal rule
# does not apply and the ifmaps of this group are not
# yet required to fully buffer.
if not self.with_opt:
# Basic pipelining requires fully buffering all
# pairs of neighbor src/dst.
nsrc_sa['fbofm'] = True
nsrc_fbofm = nsrc_sa.get('fbofm', False)
# (a): if the source already fully buffers ofmaps.
# Make this group fully buffer ifmaps.
curr_sa['fbifm'] = symeq(nsrc_fbofm, True)
# (b)-(1): otherwise.
# Make this group fully buffer ofmaps.
curr_sa['fbofm'] = symeq(nsrc_fbofm, False)
curr_fbofm = symeq(nsrc_fbofm, False) # delayed apply.
# Match top OFM/IFM factors.
curr_sa['topifm'] = sympiecewise(
(nsrc_sa['topofm'], symeq(nsrc_fbofm, False)),
(curr_sa.get('topifm', 0), True))
elif last_conv < (0, 0):
# The first CONV layer.
# Rule initial.
curr_sa['fbofm'] = fbofm_init
curr_fbofm = fbofm_init
last_conv = PipelineSegment.SchedIndex(sp_idx, tm_idx)
else:
# Non-Conv layer.
if nsrc_sa:
# Rule inter-spatial, (b)-(2).
nsrc_fbofm = nsrc_sa.get('fbofm', False)
curr_topofm = sympiecewise(
(nsrc_sa['topofm'], symeq(nsrc_fbofm, False)),
(curr_topofm, True))
# Also backtrace this group.
for bt_idx in range(last_conv.tm_idx, tm_idx):
symargs[sp_idx][bt_idx]['topofm'] = curr_topofm
# Rule within-group.
curr_sa['topofm'] = curr_topofm
# If this layer has no on-chip destinations, cancel the
# requirement to fully buffer ofmaps.
if all(d is None for d in self.dst_dict[sp_idx][tm_idx]) \
and tm_idx == len(ltpl) - 1:
curr_sa.pop('fbofm', False)
# Simplify.
self._simplify_symargs(symargs, symvals)
# Get critical forwarding path between spatial scheduling.
# The critical path has the longest forwarding chain.
sp_crit_path = max(sp_crit_path_cands, key=len)
# Check maximum fully-buffering size, and decide fbofm_init.
opt_val = None
opt_key = (float('inf'),) * 2 # (num of fb pairs, max fb size)
num_sp_fbs = 0
for val in symvals.get(fbofm_init, [False]):
subs_symargs = self._subs_symargs(symargs, fbofm_init, val)
maxsz = 0
numfb = 0
for sp_idx, (ltpl, atpl) in enumerate(zip(self.seg, subs_symargs)):
ms = max(itertools.chain(
((self.network[l].total_ofmap_size() if a.get('fbofm')
else 0)
+ (self.network[l].total_ifmap_size() if a.get('fbifm')
else 0)
for l, a in zip(ltpl, atpl)),
[0])) # safe max with default.
if ms > self.alloc[sp_idx][0].proc_region.dim.size() \
* self.alloc[sp_idx][0].size_gbuf:
break
maxsz = max(maxsz, ms)
if sp_idx in sp_crit_path and atpl[-1].get('fbofm', False):
numfb += 1
else:
key = (numfb, maxsz)
if key < opt_key:
opt_val, opt_key = val, key
num_sp_fbs = numfb
if opt_val is None:
return False
# Use the optimal value.
symvals[fbofm_init] = [opt_val]
self._simplify_symargs(symargs, symvals)
# Shared memory source must have the same topifm.
for sh_idx_list in self.ifm_fwd_dict.values():
assert len(sh_idx_list) > 1
fet_sp_idx = sh_idx_list[0].sp_idx
sh_symarg_list = [symargs[idx.sp_idx][0] for idx in sh_idx_list]
# Must have no constraint on ifmaps access from memory.
assert all(not sa.get('fbifm', False) and not sa.get('topifm', 0)
for sa in sh_symarg_list)
# Cannot constrain both topifm and topofm.
if any(sa.get('fbofm', False) or sa.get('topofm', 0)
for sa in sh_symarg_list):
sh_kwargs = {'fbifm': True}
else:
topifm = symbols('topifm_{}'.format(fet_sp_idx), integer=True)
symvals[topifm] = _layer_topifm_vals(self.seg[fet_sp_idx][0])
sh_kwargs = {'topifm': topifm}
# Set constraints.
for sa in sh_symarg_list:
sa.update(sh_kwargs)
# Simplify.
self._simplify_symargs(symargs, symvals)
# Turn constraints into lazily updated rules.
self._lazify_topofm_symargs(symargs, symvals)
# Cannot simplify any more as update_dict is not sympifi-able.
# Sort symbol dict.
symvals = OrderedDict(sorted(((s, symvals[s]) for s in symvals),
key=lambda item: str(item[0])))
if not symvals:
# Must add a dummy symbol so iterative substitution can happen.
symvals[symbols('_dummy')] = [None]
self.cstr_symargs = symargs
self.cstr_symvals = symvals
self.cstr_num_sp_fbs = num_sp_fbs
try:
self.cstr_topbat_idx = list(symvals.keys()).index(topbat)
except ValueError:
self.cstr_topbat_idx = None
return True
@staticmethod
def _simplify_symargs_one_pass(symargs, symvals):
'''
Simplify symargs and symvals in-place:
- If fbi/ofm is False, then remove it.
- If fbi/ofm is True, then remove topi/ofm.
- If a symbol can take only one value, then substitute it.
- If a symbol only occurs once, then remove its constraint.
Return whether the symargs and symvals are already simplified.
'''
for a in itertools.chain.from_iterable(symargs):
is_fbifm = a.get('fbifm')
is_fbofm = a.get('fbofm')
# pylint: disable=singleton-comparison
# lhs may be symbolic, see
# docs.sympy.org/latest/modules/logic.html#sympy.logic.boolalg.BooleanTrue
if is_fbifm == True:
a.pop('topifm', 0)
if is_fbifm == False:
a.pop('fbifm', False)
if is_fbofm == True:
a.pop('topofm', 0)
if is_fbofm == False:
a.pop('fbofm', False)
subs_dict = {}
# Possible values for symbols.
subs_dict.update(
(s, symvals[s][0]) for s in symvals if len(symvals[s]) == 1)
# Count the occurrence of symbols in all args (values).
symcnts = Counter(
s for a in itertools.chain.from_iterable(symargs)
for val in a.values() for s in symtuple(val).free_symbols)
assert set(symcnts.keys()).issubset(symvals.keys())
subs_dict.update((s, None)
for s in set(symvals.keys()) - set(symcnts.keys()))
subs_dict.update((s, 0 if str(s).startswith('top') else False)
for s in symcnts if symcnts[s] <= 1)
# Substitute symbols and remove from symbol dict.
for a in itertools.chain.from_iterable(symargs):
for k in a:
a[k] = symtuple(a[k]).subs(subs_dict)[0]
for s in subs_dict:
del symvals[s]
return not subs_dict
def _simplify_symargs(self, symargs, symvals):
''' Simplify symargs and symvals in-place iteratively. '''
while not self._simplify_symargs_one_pass(symargs, symvals):
pass
used_syms = symtuple(
*[symtuple(*a.values())
for a in itertools.chain.from_iterable(symargs)]).free_symbols
assert set(used_syms) == set(symvals.keys())
assert all(val for val in symvals.values())
@staticmethod
def _subs_symargs(symargs, *subs_args):
'''
Substitute symbols. The additional arguments are passed to subs().
Return a new substituted copy without modifying the original one.
'''
# sympify=False is necessary because there may be str in the values.
return [[dict((k, symtuple(a[k], sympify=False).subs(*subs_args)[0])
for k in a) for a in atpl] for atpl in symargs]
class TopOfmUpdateLambda(symbasic):
''' A sympifi-able lambda function to lazily update topofm. '''
def __new__(cls, *args):
return super(PipelineSegment.TopOfmUpdateLambda, cls).__new__(cls)
def __call__(self, arg_s, arg_r):
setattr(arg_s, 'topofm', arg_r.scheme['to'][0])
def _lazify_topofm_symargs(self, symargs, symvals):
'''
Turn qualified topofm constraints into lazily updated rules.
If a symbol is only used as the topofm constraint by a single CONV
layer and some local-region layers, we can turn it into a lazily update
rule.
'''
sym2conv = {} # symbol --> the only CONV layer using it.
sym2lrs = {} # symbol --> list of local-region layer using it.
unqual_syms = set() # symbols used by two or more CONV layers.
for l, a in zip(itertools.chain.from_iterable(self.seg),
itertools.chain.from_iterable(symargs)):
layer = self.network[l]
if isinstance(layer, ConvLayer):
topofm = a.get('topofm', 0)
topifm = a.get('topifm', 0)
for s in symtuple(topofm, topifm).free_symbols:
if s not in unqual_syms:
if s in sym2conv:
# If a symbol is used in two CONV layers, it cannot
# be lazily updated.
del sym2conv[s]
sym2lrs.pop(s, [])
unqual_syms.add(s)
elif topofm == s:
assert s not in sym2lrs
sym2conv[s] = l
else:
topofm = a.get('topofm', 0)
if topofm in sym2conv:
sym2lrs.setdefault(topofm, []).append(l)
assert 0 not in sym2conv and 0 not in sym2lrs
syms = sym2conv.keys() # symbols to be lazily updated.
lr2conv = {} # local-region layer to the CONV layer constraining it.
for s in syms:
for lr in sym2lrs.get(s, []):
lr2conv[lr] = sym2conv[s]
lconvs = set(lr2conv.values()) # CONV layer whose topofm to be removed.
for l, a in zip(itertools.chain.from_iterable(self.seg),
itertools.chain.from_iterable(symargs)):
if l in lconvs:
# Remove CONV topofm.
assert sym2conv[a['topofm']] == l
del a['topofm']
elif l in lr2conv:
# Link local-region layer to the CONV layer.
lconv = lr2conv[l]
assert sym2conv[a['topofm']] == lconv
del a['topofm']
a['update_dict'] = {
lconv: PipelineSegment.TopOfmUpdateLambda()}
for s in syms:
del symvals[s]
```
#### File: nn_dataflow/core/resource.py
```python
from collections import namedtuple
import math
from .node_region import NodeRegion
from .phy_dim2 import PhyDim2
RESOURCE_LIST = ['proc_region',
'dram_region',
'src_data_region',
'dst_data_region',
'dim_array',
'size_gbuf',
'size_regf',
'array_bus_width',
'dram_bandwidth',
'no_time_mux',
]
class Resource(namedtuple('Resource', RESOURCE_LIST)):
'''
Hardware resource specification.
The origins of node region and memory regions are all absolute.
'''
def __new__(cls, *args, **kwargs):
ntp = super(Resource, cls).__new__(cls, *args, **kwargs)
if not isinstance(ntp.proc_region, NodeRegion):
raise TypeError('Resource: proc_region must be '
'a NodeRegion instance.')
if ntp.proc_region.type != NodeRegion.PROC:
raise ValueError('Resource: proc_region must have type PROC.')
if not isinstance(ntp.dram_region, NodeRegion):
raise TypeError('Resource: dram_region must be '
'a NodeRegion instance.')
if ntp.dram_region.type != NodeRegion.DRAM:
raise ValueError('Resource: dram_region must have type DRAM.')
if not isinstance(ntp.src_data_region, NodeRegion):
raise TypeError('Resource: src_data_region must be '
'a NodeRegion instance.')
if not isinstance(ntp.dst_data_region, NodeRegion):
raise TypeError('Resource: dst_data_region must be '
'a NodeRegion instance.')
if not isinstance(ntp.dim_array, PhyDim2):
raise TypeError('Resource: dim_array must be a PhyDim2 object.')
if hasattr(ntp.size_gbuf, '__len__'):
raise TypeError('Resource: size_gbuf must be a scalar')
if hasattr(ntp.size_regf, '__len__'):
raise TypeError('Resource: size_regf must be a scalar')
if not isinstance(ntp.array_bus_width, int) \
and not math.isinf(ntp.array_bus_width):
raise TypeError('Resource: array_bus_width must be an integer '
'or infinity.')
if ntp.array_bus_width <= 0:
raise ValueError('Resource: array_bus_width must be positive.')
if not isinstance(ntp.dram_bandwidth, (float, int)):
raise TypeError('Resource: dram_bandwidth must be a number')
if ntp.dram_bandwidth <= 0:
raise ValueError('Resource: dram_bandwidth must be positive.')
if not isinstance(ntp.no_time_mux, bool):
raise TypeError('Resource: no_time_mux must be boolean')
return ntp
```
#### File: tests/unit_test/test_int_range.py
```python
import unittest
from nn_dataflow.core import IntRange
class TestIntRange(unittest.TestCase):
''' Tests for IntRange. '''
def test_valid_args(self):
''' Valid arguments. '''
ir1 = IntRange(1, 7)
self.assertEqual(ir1.beg, 1)
self.assertEqual(ir1.end, 7)
ir2 = IntRange(-3, 0)
self.assertEqual(ir2.beg, -3)
self.assertEqual(ir2.end, 0)
ir3 = IntRange(4, 4)
self.assertEqual(ir3.beg, 4)
self.assertEqual(ir3.end, 4)
def test_invalid_args(self):
''' Invalid arguments. '''
with self.assertRaisesRegexp(TypeError, 'IntRange: .*beg.*'):
_ = IntRange(7.2, 3)
with self.assertRaisesRegexp(TypeError, 'IntRange: .*end.*'):
_ = IntRange(7, None)
with self.assertRaisesRegexp(ValueError, 'IntRange: .*beg.*end.*'):
_ = IntRange(7, 3)
with self.assertRaisesRegexp(ValueError, 'IntRange: .*beg.*end.*'):
_ = IntRange(-3, -7)
def test_size(self):
''' Get size. '''
ir1 = IntRange(1, 7)
self.assertEqual(ir1.size(), 6)
ir2 = IntRange(-3, 0)
self.assertEqual(ir2.size(), 3)
ir3 = IntRange(4, 4)
self.assertEqual(ir3.size(), 0)
def test_empty(self):
''' Get empty. '''
ir1 = IntRange(1, 7)
self.assertFalse(ir1.empty())
ir2 = IntRange(-3, 0)
self.assertFalse(ir2.empty())
ir3 = IntRange(4, 4)
self.assertTrue(ir3.empty())
def test_range(self):
''' Get range. '''
ir1 = IntRange(1, 7)
self.assertEqual(len(set(ir1.range())), ir1.size())
ir2 = IntRange(-3, 0)
self.assertListEqual(list(ir2.range()), [-3, -2, -1])
ir3 = IntRange(4, 4)
self.assertEqual(len(list(ir3.range())), 0)
def test_overlap(self):
''' Get overlap. '''
ir1 = IntRange(-11, 5)
ir2 = IntRange(3, 8)
ir_ovlp = ir1.overlap(ir2)
self.assertEqual(ir_ovlp, IntRange(3, 5))
self.assertEqual(ir1.overlap(ir2), ir2.overlap(ir1))
ir3 = IntRange(-3, 3)
ir_ovlp = ir1.overlap(ir3)
self.assertEqual(ir_ovlp, IntRange(-3, 3))
ir4 = IntRange(8, 10)
ir_ovlp = ir1.overlap(ir4)
self.assertTrue(ir_ovlp.empty())
def test_overlap_error(self):
''' Get overlap error. '''
ir = IntRange(-11, 5)
with self.assertRaisesRegexp(TypeError, 'IntRange: .*'):
ir.overlap((0, 1))
def test_offset(self):
''' Get offset. '''
ir1 = IntRange(1, 7)
self.assertEqual(ir1.offset(3), IntRange(4, 10))
ir2 = IntRange(-3, 0)
self.assertEqual(ir2.offset(-2), IntRange(-5, -2))
```
#### File: tests/unit_test/test_nn_dataflow_scheme.py
```python
import unittest
from collections import OrderedDict
from nn_dataflow.core import DataLayout
from nn_dataflow.core import FmapPosition, FmapRange
from nn_dataflow.core import InputLayer, ConvLayer, FCLayer, PoolingLayer
from nn_dataflow.core import MemHierEnum as me
from nn_dataflow.core import Network
from nn_dataflow.core import NodeRegion
from nn_dataflow.core import NNDataflowScheme
from nn_dataflow.core import ParallelEnum as pe
from nn_dataflow.core import PartitionScheme
from nn_dataflow.core import PhyDim2
from nn_dataflow.core import SchedulingResult
class TestNNDataflowScheme(unittest.TestCase):
''' Tests for NNDataflowScheme. '''
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-public-methods
def setUp(self):
self.network = Network('test_net')
self.network.set_input_layer(InputLayer(3, 224))
self.network.add('c1', ConvLayer(3, 64, 224, 3))
self.network.add('p1', PoolingLayer(64, 7, 32), prevs='c1')
self.network.add('p2', PoolingLayer(64, 7, 32), prevs='c1')
self.network.add('f1', FCLayer(128, 1000, 7), prevs=['p1', 'p2'])
self.batch_size = 4
input_layer = self.network.input_layer()
self.input_layout = DataLayout(
frngs=(FmapRange((0, 0, 0, 0),
FmapPosition(b=self.batch_size,
n=input_layer.nofm,
h=input_layer.hofm,
w=input_layer.wofm)),),
regions=(NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(2, 1),
type=NodeRegion.DRAM),),
parts=(PartitionScheme(order=range(pe.NUM),
pdims=[(1, 1)] * pe.NUM),))
c1_layer = self.network['c1']
self.c1res = SchedulingResult(
scheme=OrderedDict([('cost', 1.5), ('time', 200.), ('ops', 4.),
('num_nodes', 4),
('cost_op', 0.5), ('cost_access', 1.),
('cost_noc', 0), ('cost_static', 0),
('proc_time', 200), ('bus_time', 0),
('dram_time', 0),
('access', [[7, 8, 9]] * me.NUM),
('remote_gbuf_access', [0] * 3),
('total_nhops', [4, 5, 6]),
('fetch', [[1, 1, 1], [2, 2, 2]]),
('ti', [2, 2, 3]),
('to', [1, 2, 3]),
('tb', [1, 2, 3]),
('tvals', [[2, 1, 1], [2, 2, 2], [3, 3, 3]]),
('orders', [range(3)] * 2),
]),
ofmap_layout=DataLayout(
frngs=(FmapRange((0, 0, 0, 0),
FmapPosition(b=self.batch_size,
n=c1_layer.nofm,
h=c1_layer.hofm,
w=c1_layer.wofm)),),
regions=(NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(1, 2),
type=NodeRegion.DRAM),),
parts=(PartitionScheme(order=range(pe.NUM),
pdims=[(1, 1)] * pe.NUM),)),
sched_seq=(0, 0, 0))
p1_layer = self.network['p1']
self.p1res = SchedulingResult(
scheme=OrderedDict([('cost', 0.6), ('time', 5), ('ops', 0.1),
('num_nodes', 2),
('cost_op', 0.1), ('cost_access', 0.5),
('cost_noc', 0), ('cost_static', 0),
('proc_time', 5), ('bus_time', 0),
('dram_time', 0),
('access', [[.7, .8, .9]] * me.NUM),
('remote_gbuf_access', [0] * 3),
('total_nhops', [.4, .5, .6]),
('fetch', [[1, 1, 1], [2, 2, 2]]),
('ti', [2, 2, 3]),
('to', [1, 2, 3]),
('tb', [1, 2, 3]),
('tvals', [[2, 1, 1], [2, 2, 2], [3, 3, 3]]),
('orders', [range(3)] * 2),
]),
ofmap_layout=DataLayout(
frngs=(FmapRange((0, 0, 0, 0),
FmapPosition(b=self.batch_size,
n=p1_layer.nofm,
h=p1_layer.hofm,
w=p1_layer.wofm)),),
regions=(NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(1, 2),
type=NodeRegion.DRAM),),
parts=(PartitionScheme(order=range(pe.NUM),
pdims=[(1, 1)] * pe.NUM),)),
sched_seq=(0, 1, 0))
self.p2res = SchedulingResult(
scheme=self.p1res.scheme, ofmap_layout=self.p1res.ofmap_layout,
sched_seq=(0, 2, 0))
self.dtfl = NNDataflowScheme(self.network, self.input_layout)
self.dtfl['c1'] = self.c1res
self.dtfl['p1'] = self.p1res
self.dtfl['p2'] = self.p2res
def test_init(self):
''' Initial. '''
df = NNDataflowScheme(self.network, self.input_layout)
self.assertEqual(df.network, self.network)
self.assertEqual(df.input_layout, self.input_layout)
self.assertDictEqual(df.ext_layout_dict, {})
self.assertEqual(df.total_cost, 0)
self.assertEqual(df.total_time, 0)
self.assertFalse(df.res_dict)
self.assertFalse(df)
self.assertEqual(df.total_ops, 0)
self.assertSequenceEqual(df.total_accesses, [0] * me.NUM)
self.assertEqual(df.total_noc_hops, 0)
def test_init_ext(self):
''' Initial with external layers. '''
self.network.add_ext('e0', InputLayer(3, 224))
self.network.add_ext('e1', InputLayer(6, 224))
e0_layout = DataLayout(
frngs=(FmapRange((0, 0, 0, 0),
FmapPosition(b=self.batch_size,
n=self.network['e0'].nofm,
h=self.network['e0'].hofm,
w=self.network['e0'].wofm)),),
regions=self.input_layout.regions,
parts=self.input_layout.parts)
e1_layout = DataLayout(
frngs=(FmapRange((0, 0, 0, 0),
FmapPosition(b=self.batch_size,
n=self.network['e1'].nofm,
h=self.network['e1'].hofm,
w=self.network['e1'].wofm)),),
regions=self.input_layout.regions,
parts=self.input_layout.parts)
ext_layout_dict = {'e0': e0_layout, 'e1': e1_layout}
df = NNDataflowScheme(self.network, self.input_layout,
ext_layout_dict)
self.assertIn('e0', df.ext_layout_dict)
self.assertIn('e1', df.ext_layout_dict)
self.assertEqual(df.ext_layout_dict['e0'], e0_layout)
self.assertEqual(df.ext_layout_dict['e1'], e1_layout)
def test_init_invalid_network(self):
''' Invalid network. '''
with self.assertRaisesRegexp(TypeError,
'NNDataflowScheme: .*network*'):
_ = NNDataflowScheme(self.network['c1'], self.input_layout)
def test_init_invalid_input_layout(self):
''' Invalid input_layout. '''
with self.assertRaisesRegexp(TypeError,
'NNDataflowScheme: .*input_layout*'):
_ = NNDataflowScheme(self.network, self.input_layout.frngs)
def test_init_invalid_eld_keys(self):
''' Invalid ext_layout_dict keys. '''
with self.assertRaisesRegexp(ValueError,
'NNDataflowScheme: .*ext_layout_dict*'):
_ = NNDataflowScheme(self.network, self.input_layout,
{'e0': self.input_layout})
self.network.add_ext('e0', InputLayer(3, 224))
with self.assertRaisesRegexp(ValueError,
'NNDataflowScheme: .*ext_layout_dict*'):
_ = NNDataflowScheme(self.network, self.input_layout)
def test_init_invalid_eld_type(self):
''' Invalid ext_layout_dict value type. '''
self.network.add_ext('e0', InputLayer(3, 224))
self.network.add_ext('e1', InputLayer(3, 224))
with self.assertRaisesRegexp(TypeError,
'NNDataflowScheme: .*ext_layout*'):
_ = NNDataflowScheme(self.network, self.input_layout,
{'e0': self.input_layout,
'e1': self.input_layout.frngs})
def test_setgetitem(self):
''' __set/getitem__. '''
df = NNDataflowScheme(self.network, self.input_layout)
df['c1'] = self.c1res
self.assertEqual(df['c1'], self.c1res)
def test_getitem_not_in(self):
''' __getitem__ not in. '''
df = NNDataflowScheme(self.network, self.input_layout)
with self.assertRaises(KeyError):
_ = df['c1']
def test_setitem_not_in_network(self):
''' __setitem__ not in network. '''
df = NNDataflowScheme(self.network, self.input_layout)
with self.assertRaisesRegexp(KeyError, 'NNDataflowScheme: .*cc.*'):
df['cc'] = self.c1res
def test_setitem_invalid_value(self):
''' __setitem__ invalid value. '''
df = NNDataflowScheme(self.network, self.input_layout)
with self.assertRaisesRegexp(TypeError,
'NNDataflowScheme: .*SchedulingResult*'):
df['c1'] = self.c1res.scheme
def test_setitem_already_exists(self):
''' __setitem__ already exists. '''
df = NNDataflowScheme(self.network, self.input_layout)
df['c1'] = self.c1res
with self.assertRaisesRegexp(KeyError, 'NNDataflowScheme: .*c1*'):
df['c1'] = self.c1res._replace(sched_seq=(1, 0, 0))
def test_setitem_prev_not_in(self):
''' __setitem__ previous not existing. '''
df = NNDataflowScheme(self.network, self.input_layout)
with self.assertRaisesRegexp(KeyError, 'NNDataflowScheme: .*p1*'):
df['p1'] = self.p1res
def test_setitem_prev_input_ext(self):
''' __setitem__ previous is input or external. '''
df = NNDataflowScheme(self.network, self.input_layout)
df['c1'] = self.c1res
self.assertAlmostEqual(df.total_cost, self.c1res.total_cost)
self.network.add_ext('e0', InputLayer(3, 224))
self.network.add('c2', self.network['c1'], prevs=('e0',))
df = NNDataflowScheme(self.network, self.input_layout,
{'e0': self.input_layout})
df['c2'] = self.c1res
self.assertAlmostEqual(df.total_cost, self.c1res.total_cost)
def test_setitem_invalid_seg_idx(self):
''' __setitem__ invalid segment index. '''
df = NNDataflowScheme(self.network, self.input_layout)
with self.assertRaisesRegexp(ValueError,
'NNDataflowScheme: .*segment index*'):
df['c1'] = self.c1res._replace(sched_seq=(1, 0, 0))
df = NNDataflowScheme(self.network, self.input_layout)
df['c1'] = self.c1res
df['p1'] = self.p1res._replace(sched_seq=(1, 0, 0))
with self.assertRaisesRegexp(ValueError,
'NNDataflowScheme: .*segment index*'):
df['p2'] = self.p2res._replace(sched_seq=(0, 0, 0))
def test_delitem(self):
''' __delitem__. '''
df = NNDataflowScheme(self.network, self.input_layout)
df['c1'] = self.c1res
with self.assertRaisesRegexp(KeyError, 'NNDataflowScheme: .*'):
del df['c1']
def test_iter_len(self):
''' __iter__ and __len__. '''
self.assertEqual(len(self.dtfl), 3)
lst = [l for l in self.dtfl]
self.assertIn('c1', lst)
self.assertIn('p1', lst)
self.assertIn('p2', lst)
self.assertNotIn('f1', lst)
def test_copy(self):
''' copy. '''
df = self.dtfl
df2 = df.copy()
self.assertAlmostEqual(df.total_cost, df2.total_cost)
self.assertAlmostEqual(df.total_time, df2.total_time)
self.assertDictEqual(df.res_dict, df2.res_dict)
# Shallow copy.
for layer_name in df:
self.assertEqual(id(df[layer_name]), id(df2[layer_name]))
def test_copy_ext(self):
''' copy external layers. '''
self.network.add_ext('e0', self.network.input_layer())
self.network.add_ext('e1', self.network.input_layer())
df1 = NNDataflowScheme(self.network, self.input_layout,
{'e0': self.input_layout,
'e1': self.input_layout})
df1['c1'] = self.c1res
df1['p1'] = self.p1res
df1['p2'] = self.p2res
df2 = df1.copy()
self.assertAlmostEqual(df1.total_cost, df2.total_cost)
self.assertAlmostEqual(df1.total_time, df2.total_time)
self.assertDictEqual(df1.res_dict, df2.res_dict)
self.assertDictEqual(df1.ext_layout_dict, df2.ext_layout_dict)
def test_fmap_layout(self):
''' fmap_layout. '''
flayout = self.dtfl.fmap_layout(('c1',))
frng = flayout.complete_fmap_range()
self.assertTrue(flayout.is_in(self.c1res.ofmap_layout.regions[0]))
self.assertEqual(frng, self.c1res.ofmap_layout.frngs[0])
flayout = self.dtfl.fmap_layout((None,))
frng = flayout.complete_fmap_range()
self.assertTrue(flayout.is_in(self.input_layout.regions[0]))
self.assertEqual(frng, self.input_layout.frngs[0])
flayout = self.dtfl.fmap_layout(('p1', 'p2'))
frng = flayout.complete_fmap_range()
self.assertEqual(frng.size('n'),
self.network['p1'].nofm + self.network['p2'].nofm)
flayout = self.dtfl.fmap_layout((None, 'c1'))
frng = flayout.complete_fmap_range()
self.assertEqual(frng.size('n'),
self.network.input_layer().nofm
+ self.network['c1'].nofm)
def test_fmap_layout_ext(self):
''' fmap_layout external layers. '''
self.network.add_ext('e0', self.network.input_layer())
self.network.add_ext('e1', self.network.input_layer())
df = NNDataflowScheme(self.network, self.input_layout,
{'e0': self.input_layout,
'e1': self.input_layout})
df['c1'] = self.c1res
df['p1'] = self.p1res
df['p2'] = self.p2res
flayout = df.fmap_layout(('e0',))
self.assertEqual(flayout, self.input_layout)
flayout = df.fmap_layout(('e1', None))
self.assertTrue(flayout.is_in(self.input_layout.regions[0]))
frng = flayout.complete_fmap_range()
self.assertEqual(frng.size('n'),
self.network['e1'].nofm
+ self.network.input_layer().nofm)
def test_properties(self):
''' Property accessors. '''
self.assertAlmostEqual(self.dtfl.total_cost, 1.5 + 0.6 * 2)
self.assertAlmostEqual(self.dtfl.total_time, 200 + 5)
self.assertAlmostEqual(self.dtfl.total_ops, 4 + 0.1 * 2)
for a in self.dtfl.total_accesses:
self.assertAlmostEqual(a, (7 + 8 + 9) + (.7 + .8 + .9) * 2)
self.assertAlmostEqual(self.dtfl.total_noc_hops,
(4 + 5 + 6) + (.4 + .5 + .6) * 2)
def test_time_full_net_single_seg(self):
''' time() when full network fits in a single segment. '''
dtfl = NNDataflowScheme(self.network, self.input_layout)
dtfl['c1'] = self.c1res
dtfl['p1'] = self.p1res._replace(sched_seq=(0, 1, 0))
dtfl['p2'] = self.p2res._replace(sched_seq=(0, 2, 0))
dtfl['f1'] = self.c1res._replace(sched_seq=(0, 3, 0))
self.assertEqual(dtfl.total_time, 200)
def test_static_cost_adjust(self):
''' Adjust static cost portion. '''
# Add static cost.
idl_unit_cost = 1e-3
c1scheme = self.c1res.scheme
c1static = c1scheme['time'] * idl_unit_cost
c1scheme['cost_static'] += c1static
c1scheme['cost_access'] -= c1static
p1scheme = self.p1res.scheme
p1static = p1scheme['time'] * idl_unit_cost
p1scheme['cost_static'] += p1static
p1scheme['cost_access'] -= p1static
# No adjust.
dtfl = NNDataflowScheme(self.network, self.input_layout)
dtfl['c1'] = self.c1res._replace(scheme=c1scheme)
dtfl['p1'] = self.p1res._replace(scheme=p1scheme, sched_seq=(1, 0, 0))
dtfl['p2'] = self.p2res._replace(scheme=p1scheme, sched_seq=(2, 0, 0))
dtfl['f1'] = self.c1res._replace(scheme=c1scheme, sched_seq=(3, 0, 0))
sum_cost = 1.5 + 0.6 + 0.6 + 1.5
sum_time = 200 + 5 + 5 + 200
self.assertAlmostEqual(dtfl.total_cost, sum_cost)
self.assertAlmostEqual(dtfl.total_time, sum_time)
# With adjust.
dtfl = NNDataflowScheme(self.network, self.input_layout)
dtfl['c1'] = self.c1res._replace(scheme=c1scheme)
dtfl['p1'] = self.p1res._replace(scheme=p1scheme, sched_seq=(0, 1, 0))
dtfl['p2'] = self.p2res._replace(scheme=p1scheme, sched_seq=(0, 2, 0))
dtfl['f1'] = self.c1res._replace(scheme=c1scheme, sched_seq=(1, 0, 0))
diff = (sum_time - dtfl.total_time) * idl_unit_cost
self.assertGreater(diff, 0)
self.assertAlmostEqual(dtfl.total_cost, sum_cost -diff)
# All in one segment.
dtfl = NNDataflowScheme(self.network, self.input_layout)
dtfl['c1'] = self.c1res._replace(scheme=c1scheme)
dtfl['p1'] = self.p1res._replace(scheme=p1scheme, sched_seq=(0, 1, 0))
dtfl['p2'] = self.p2res._replace(scheme=p1scheme, sched_seq=(0, 2, 0))
dtfl['f1'] = self.c1res._replace(scheme=c1scheme, sched_seq=(0, 3, 0))
diff = (sum_time - dtfl.total_time) * idl_unit_cost
self.assertGreater(diff, 0)
self.assertAlmostEqual(dtfl.total_cost, sum_cost -diff)
def test_segment_time_list(self):
''' segment_time_list(). '''
dtfl = NNDataflowScheme(self.network, self.input_layout)
dtfl['c1'] = self.c1res
dtfl['p1'] = self.p1res
dtfl['p2'] = self.p2res._replace(sched_seq=(1, 0, 0))
self.assertListEqual(dtfl.segment_time_list(), [205, 5])
def test_segment_dram_time_list(self):
''' segment_dram_time_list(). '''
c1_scheme = self.c1res.scheme.copy()
c1_scheme['dram_time'] = 180
p1_scheme = self.p1res.scheme.copy()
p1_scheme['dram_time'] = 5
p2_scheme = self.p2res.scheme.copy()
p2_scheme['dram_time'] = 10
dtfl = NNDataflowScheme(self.network, self.input_layout)
dtfl['c1'] = self.c1res._replace(scheme=c1_scheme)
dtfl['p1'] = self.p1res._replace(scheme=p1_scheme)
dtfl['p2'] = self.p2res._replace(sched_seq=(1, 0, 0),
scheme=p2_scheme)
self.assertListEqual(dtfl.segment_dram_time_list(), [185, 10])
self.assertListEqual(dtfl.segment_time_list(), [205, 10])
def test_stats_active_node_pes(self):
''' Per-layer stats: active node PEs. '''
stats = self.dtfl.perlayer_stats('active_node_pes')
self.assertEqual(len(stats), len(self.dtfl))
self.assertAlmostEqual(stats['c1'], 0.005)
self.assertAlmostEqual(stats['p1'], 0.01)
self.assertAlmostEqual(stats['p2'], 0.01)
def test_stats_dram_bandwidth(self):
''' Per-layer stats: DRAM bandwidth. '''
stats = self.dtfl.perlayer_stats('dram_bandwidth')
self.assertEqual(len(stats), len(self.dtfl))
self.assertAlmostEqual(stats['c1'], (7. + 8. + 9.) / 200)
self.assertAlmostEqual(stats['p1'], (.7 + .8 + .9) / 5)
self.assertAlmostEqual(stats['p2'], (.7 + .8 + .9) / 5)
def test_stats_not_supported(self):
''' Per-layer stats: not supported. '''
with self.assertRaisesRegexp(AttributeError,
'NNDataflowScheme: .*not_supported.*'):
_ = self.dtfl.perlayer_stats('not_supported')
```
#### File: tests/unit_test/test_util.py
```python
import math
import unittest
from nn_dataflow import util
class TestUtilHashableDict(unittest.TestCase):
''' Tests for util.HashableDict. '''
def test_fromdict(self):
''' fromdict. '''
d = {'k': 1, 3: 'a'}
hd1 = util.HashableDict.fromdict(d)
self.assertSetEqual(set(d.items()), set(hd1.items()))
hd2 = util.HashableDict.fromdict(d)
self.assertNotEqual(id(hd1), id(hd2))
self.assertEqual(hd1, hd2)
self.assertEqual(hash(hd1), hash(hd2))
hd3 = util.HashableDict.fromdict(
d, keyfunc=str, valfunc=lambda x: frozenset([x]))
self.assertNotEqual(hd1, hd3)
def test_fromdict_error(self):
''' fromdict error. '''
with self.assertRaisesRegexp(TypeError, 'HashableDict: .*dict.*'):
_ = util.HashableDict.fromdict([1, 2])
def test_eq(self):
''' __eq__ and __ne__. '''
hd = util.HashableDict([('k', 1), (3, 'a')])
lst = ['k', 3]
self.assertEqual(hd, hd.copy())
self.assertNotEqual(hd, lst)
def test_copy(self):
''' copy. '''
hd = util.HashableDict([('k', 1), (3, 'a')])
self.assertNotEqual(id(hd), id(hd.copy()))
self.assertEqual(hd, hd.copy())
self.assertEqual(hash(hd), hash(hd.copy()))
def test_setitem_delitem(self):
''' __setitem__ and __delitem__. '''
hd = util.HashableDict([('k', 1), (3, 'a')])
self.assertIn('k', hd)
self.assertEqual(hd[3], 'a')
self.assertEqual(len(hd), 2)
with self.assertRaises(KeyError):
hd[2] = 'b'
with self.assertRaises(KeyError):
hd[3] = 'b'
with self.assertRaises(KeyError):
hd.update([(2, 'b')])
with self.assertRaises(KeyError):
hd.setdefault(2, [])
with self.assertRaises(KeyError):
del hd[3]
with self.assertRaises(KeyError):
hd.pop(3)
with self.assertRaises(KeyError):
hd.popitem()
with self.assertRaises(KeyError):
hd.clear()
class TestUtilIdivc(unittest.TestCase):
''' Tests for util.idivc. '''
def test_int(self):
''' Int. '''
self.assertEqual(util.idivc(8, 3), 3)
self.assertEqual(util.idivc(8, 2), 4)
self.assertEqual(util.idivc(8, 1), 8)
def test_negative(self):
''' Negative. '''
self.assertEqual(util.idivc(34, 4), 9, 'idivc: negative')
self.assertEqual(util.idivc(-34, 4), -8, 'idivc: negative')
self.assertEqual(util.idivc(34, -4), -8, 'idivc: negative')
self.assertEqual(util.idivc(-34, -4), 9, 'idivc: negative')
def test_zero(self):
''' Zero. '''
self.assertEqual(util.idivc(0, 3), 0, 'idivc: zero')
with self.assertRaises(ZeroDivisionError):
_ = util.idivc(3, 0)
def test_float(self):
''' Float. '''
self.assertAlmostEqual(util.idivc(4.3, 3), 2)
self.assertAlmostEqual(util.idivc(34.3, 3), 12)
self.assertAlmostEqual(util.idivc(34, 3.), 12)
def test_inf(self):
''' Inf. '''
self.assertEqual(util.idivc(3, float('inf')), 0, 'idivc: inf')
self.assertTrue(math.isnan(util.idivc(float('inf'), float('inf'))),
'idivc: inf')
class TestUtilProd(unittest.TestCase):
''' Tests for util.prod. '''
def test_int(self):
''' Int. '''
self.assertIsInstance(util.prod([3, 5, 7]), int)
self.assertEqual(util.prod([3, 5, 7]), 105)
self.assertEqual(util.prod([3, 5, -1]), -15)
self.assertEqual(util.prod([3, -5, 7]), -105)
self.assertEqual(util.prod([3, -5, 0]), 0)
self.assertEqual(util.prod((3, 5, 7)), 105)
self.assertEqual(util.prod(set([3, 5, 7])), 105)
self.assertEqual(util.prod({3: 'a', 5: 'b', 7: 'c'}), 105)
def test_float(self):
''' Float. '''
self.assertAlmostEqual(util.prod([1.1, 2, 3]), 6.6)
self.assertAlmostEqual(util.prod([1.1, 2, -3.]), -6.6)
def test_empty(self):
''' Empty. '''
self.assertEqual(util.prod([]), 1)
self.assertEqual(util.prod(tuple()), 1)
self.assertEqual(util.prod(set()), 1)
class TestUtilApproxDividable(unittest.TestCase):
''' Tests for util.approx_dividable. '''
def test_int(self):
''' Int. '''
self.assertTrue(util.approx_dividable(24, 2, overhead=0))
self.assertTrue(util.approx_dividable(24, 3, overhead=0))
self.assertTrue(util.approx_dividable(24, 4, overhead=0))
self.assertTrue(util.approx_dividable(11, 2))
self.assertFalse(util.approx_dividable(9, 2))
self.assertTrue(util.approx_dividable(19, 5))
self.assertTrue(util.approx_dividable(7, 2, overhead=0.2))
self.assertTrue(util.approx_dividable(19, 7, overhead=0.2))
self.assertFalse(util.approx_dividable(22, 7, overhead=0.2))
ovhd = util.idivc(19, 7) * 7 / 19. - 1
self.assertFalse(util.approx_dividable(19, 7, overhead=ovhd - 0.01))
self.assertTrue(util.approx_dividable(19, 7, overhead=ovhd + 0.01))
def test_float(self):
''' Float. '''
self.assertTrue(util.approx_dividable(18.4, 3))
self.assertTrue(util.approx_dividable(21.4, 3))
class TestUtilFactorize(unittest.TestCase):
''' Tests for util.factorize. '''
def test_prod(self):
''' Check prod. '''
for fs in util.factorize(24, 3):
self.assertEqual(util.prod(fs), 24)
for fs in util.factorize(1024, 3):
self.assertEqual(util.prod(fs), 1024)
def test_limits(self):
''' Check limits. '''
for fs in util.factorize(1024, 3, limits=(10, 20)):
self.assertLessEqual(fs[0], 10)
self.assertLessEqual(fs[1], 20)
self.assertEqual(util.prod(fs), 1024)
def test_len(self):
''' Length. '''
# Use 4 prime factors, 2, 3, 5, 7.
val = 2 * 3 * 5 * 7
self.assertEqual(len(list(util.factorize(val, 2))), 2 ** 4)
self.assertEqual(len(list(util.factorize(val, 3))), 3 ** 4)
for val in [24, 1024, (2 ** 4) * (3 ** 5) * (5 ** 2)]:
fs = list(util.factorize(val, 2))
self.assertEqual(len(fs), len(set(fs)))
def test_factors(self):
''' Factors. '''
factors2 = set()
for fs in util.factorize(24, 2):
factors2.update(fs)
self.assertSetEqual(factors2, set([1, 2, 3, 4, 6, 8, 12, 24]))
factors3 = set()
for fs in util.factorize(24, 3):
factors3.update(fs)
self.assertSetEqual(factors2, factors3)
def test_perm(self):
''' Permutations. '''
fs_ord = set()
fs_unord = set()
for fs in util.factorize(512, 3):
fs_ord.add(fs)
fs_unord.add(frozenset(fs))
cnt = 0
for fs in fs_unord:
if len(fs) == 3:
# Permutations.
cnt += math.factorial(3)
elif len(fs) == 2:
# Permutations of a, a, b.
cnt += 3
else:
# Pattern a, a, a.
cnt += 1
self.assertEqual(len(fs_ord), cnt)
class TestUtilClosestFactor(unittest.TestCase):
''' Tests for util.closest_factor. '''
def test_int(self):
''' Int. '''
self.assertTupleEqual(util.closest_factor(24, 5), (4, 6))
self.assertTupleEqual(util.closest_factor(24, 10), (8, 12))
self.assertTupleEqual(util.closest_factor(25, 3), (1, 5))
self.assertTupleEqual(util.closest_factor(25, 20), (5, 25))
def test_exact(self):
''' Exact factor. '''
self.assertTupleEqual(util.closest_factor(24, 6), (6, 6))
self.assertTupleEqual(util.closest_factor(24, 2), (2, 2))
self.assertTupleEqual(util.closest_factor(3, 1), (1, 1))
def test_value_float(self):
''' Value is float. '''
with self.assertRaisesRegexp(TypeError, '.*integers.*'):
_ = util.closest_factor(24.3, 5)
with self.assertRaisesRegexp(TypeError, '.*integers.*'):
_ = util.closest_factor(24., 10)
def test_factor_float(self):
''' Factor is float. '''
self.assertTupleEqual(util.closest_factor(24, 5.3), (4, 6))
self.assertTupleEqual(util.closest_factor(24, 10.2), (8, 12))
def test_zero(self):
''' Zero. '''
self.assertTupleEqual(util.closest_factor(0, 3), (3,))
self.assertTupleEqual(util.closest_factor(24, 0), (1,))
def test_negative(self):
''' Negative. '''
with self.assertRaisesRegexp(ValueError, '.*negative.*'):
_ = util.closest_factor(24, -5)
with self.assertRaisesRegexp(ValueError, '.*negative.*'):
_ = util.closest_factor(-24, -5)
with self.assertRaisesRegexp(ValueError, '.*negative.*'):
_ = util.closest_factor(-24, 5)
def test_missing(self):
''' Missing one or both. '''
fs = util.closest_factor(4, 5)
self.assertTupleEqual(fs, (4,))
fs = util.closest_factor(4, 0.2)
self.assertTupleEqual(fs, (1,))
def test_random(self):
''' Random test. '''
for val in range(1, 11):
for f in range(1, 11):
fs = util.closest_factor(val, f)
string = 'closest_factor: {} {} {}'.format(val, f, fs)
if len(fs) == 2:
self.assertEqual(val % fs[0], 0, string)
self.assertGreaterEqual(f, fs[0], string)
self.assertEqual(val % fs[1], 0, string)
self.assertLessEqual(f, fs[1], string)
elif len(fs) == 1:
self.assertEqual(val % fs[0], 0, string)
class TestUtilGetIthRange(unittest.TestCase):
''' Tests for util.get_ith_range. '''
def setUp(self):
self.test_list = [((0, 16), 4),
((0, 44), 5),
((5, 39), 7),
((10, 41), 8),
((10, 43), 8),
]
def test_coverage(self):
''' Coverage. '''
for rng, num in self.test_list:
last_end = rng[0]
for idx in range(num):
beg, end = util.get_ith_range(rng, idx, num)
self.assertEqual(beg, last_end)
last_end = end
self.assertEqual(last_end, rng[1])
def test_equal_size(self):
''' Equal size. '''
for rng, num in self.test_list:
min_size = float('inf')
max_size = -float('inf')
for idx in range(num):
beg, end = util.get_ith_range(rng, idx, num)
min_size = min(min_size, end - beg)
max_size = max(max_size, end - beg)
self.assertLessEqual(max_size - min_size, 1)
class TestUtilGCD(unittest.TestCase):
''' Tests for util.gcd. '''
def test_int(self):
''' Integers. '''
self.assertEqual(util.gcd(3, 4), 1)
self.assertEqual(util.gcd(8, 4), 4)
self.assertEqual(util.gcd(3, 9), 3)
self.assertEqual(util.gcd(15, 12), 3)
self.assertEqual(util.gcd(300, 410), 10)
def test_multi(self):
''' Multiple values. '''
self.assertEqual(util.gcd(4, 8, 10), 2)
self.assertEqual(util.gcd(*range(6, 21, 3)), 3)
def test_single(self):
''' Single value. '''
for v in range(1, 10):
self.assertEqual(util.gcd(v), v)
def test_no_arg(self):
''' No argument. '''
with self.assertRaises(ValueError):
_ = util.gcd()
def test_float(self):
''' Float. '''
with self.assertRaisesRegexp(TypeError, '.*integers.*'):
_ = util.gcd(1., 2)
with self.assertRaisesRegexp(TypeError, '.*integers.*'):
_ = util.gcd(1, 2.2)
with self.assertRaisesRegexp(TypeError, '.*integers.*'):
_ = util.gcd(1, 2, 3, 4.2)
def test_non_positive(self):
''' Non-positive values. '''
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.gcd(-1, 2)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.gcd(1, -2)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.gcd(3, 6, 9, 12, -21)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.gcd(3, 0)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.gcd(0, 3)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.gcd(0, 5, 10, 15, 20)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.gcd(5, 10, 0, 15, 20)
class TestUtilLCM(unittest.TestCase):
''' Tests for util.lcm. '''
def test_int(self):
''' Integers. '''
self.assertEqual(util.lcm(3, 4), 12)
self.assertEqual(util.lcm(8, 4), 8)
self.assertEqual(util.lcm(3, 9), 9)
self.assertEqual(util.lcm(15, 12), 60)
self.assertEqual(util.lcm(300, 410), 12300)
def test_multi(self):
''' Multiple values. '''
self.assertEqual(util.lcm(4, 8, 10), 40)
self.assertEqual(util.lcm(*range(6, 21, 3)), 180)
def test_single(self):
''' Single value. '''
for v in range(1, 10):
self.assertEqual(util.lcm(v), v)
def test_no_arg(self):
''' No argument. '''
with self.assertRaises(ValueError):
_ = util.lcm()
def test_float(self):
''' Float. '''
with self.assertRaisesRegexp(TypeError, '.*integers.*'):
_ = util.lcm(1., 2)
with self.assertRaisesRegexp(TypeError, '.*integers.*'):
_ = util.lcm(1, 2.2)
with self.assertRaisesRegexp(TypeError, '.*integers.*'):
_ = util.lcm(1, 2, 3, 4.2)
def test_non_positive(self):
''' Non-positive values. '''
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.lcm(-1, 2)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.lcm(1, -2)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.lcm(3, 6, 9, 12, -21)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.lcm(3, 0)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.lcm(0, 3)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.lcm(0, 5, 10, 15, 20)
with self.assertRaisesRegexp(ValueError, '.*positive.*'):
_ = util.lcm(5, 10, 0, 15, 20)
class TestUtilIsclose(unittest.TestCase):
''' Tests for util.isclose. '''
def test_default_tol(self):
''' Default tolerants. '''
self.assertTrue(util.isclose(14, 14))
self.assertTrue(util.isclose(-19, -19))
self.assertFalse(util.isclose(14, -14))
self.assertFalse(util.isclose(2, 3))
self.assertFalse(util.isclose(2, 2.01))
def test_rel_tol(self):
''' Relative tolerant. '''
self.assertTrue(util.isclose(14., 14.001, rel_tol=1e-3))
self.assertTrue(util.isclose(0.001, 0.001001, rel_tol=1e-3))
self.assertFalse(util.isclose(-14., 14.001, rel_tol=1e-3))
self.assertFalse(util.isclose(0.001, 0.0011, rel_tol=1e-3))
def test_abs_tol(self):
''' Absolute tolerant. '''
self.assertTrue(util.isclose(14., 16, abs_tol=3))
self.assertTrue(util.isclose(14., 14.001, abs_tol=2e-3))
self.assertTrue(util.isclose(0.001, 0.001001, abs_tol=2e-6))
self.assertTrue(util.isclose(0.001, 0.0011, abs_tol=2e-4))
self.assertFalse(util.isclose(-14., 14.001, abs_tol=1))
def test_both_tol(self):
''' Both tolerant. '''
self.assertTrue(util.isclose(14., 14.001, rel_tol=1e-3, abs_tol=2e-6))
self.assertTrue(util.isclose(14., 14.001, rel_tol=1e-6, abs_tol=2e-3))
self.assertTrue(util.isclose(14., 14.001, rel_tol=1e-3, abs_tol=2e-3))
self.assertFalse(util.isclose(14., 14.001, rel_tol=1e-6, abs_tol=2e-6))
class TestUtilAssertFloatEqInt(unittest.TestCase):
''' Tests for util.assert_float_eq_int. '''
def test_success(self):
''' Success. '''
# pylint: disable=no-self-use
util.assert_float_eq_int(12., 12)
util.assert_float_eq_int(12.3, 12)
util.assert_float_eq_int(12.99, 12)
util.assert_float_eq_int(11.01, 12)
util.assert_float_eq_int(-11.8, -12)
util.assert_float_eq_int(.01, 0)
util.assert_float_eq_int(-.01, 0)
def test_fail(self):
''' Fail. '''
with self.assertRaisesRegexp(AssertionError, '.*12.*'):
util.assert_float_eq_int(13.01, 12)
with self.assertRaisesRegexp(AssertionError, '.*12.*'):
util.assert_float_eq_int(10.99, 12)
with self.assertRaisesRegexp(AssertionError, '.*12.*'):
util.assert_float_eq_int(12., -12)
``` |
{
"source": "joeshow79/WiderFace-Evaluation",
"score": 2
} |
#### File: joeshow79/WiderFace-Evaluation/test_ocv_face_dt_on_wider.py
```python
import os
import tqdm
import argparse
import cv2 as cv
def do_test(imgs_dir):
events = os.listdir(imgs_dir)
ibar = tqdm.tqdm(events)
for event in ibar:
ibar.set_description('Processing image ')
event_dir = os.path.join(imgs_dir, event)
event_images = os.listdir(event_dir)
for img in event_images:
img_name = os.path.join(event_dir, img)
# TODO: inference on this image
print('img_name: {}'.format(img_name))
img_data = cv.imread(img_name)
faceDetector = cv.FaceDetectorYN.create("/workspace/src/github/libfacedetection.train/tasks/task1/onnx/yunet.onnx", "", img_data.shape[:2])
faceDetector.setInputSize((img_data.shape[1], img_data.shape[0]))
ret, faces = faceDetector.detect(img_data)
#print('faces: {}'.format(faces))
# The output is as below
# faces: (1, array([[ 411.48217773, 361.2265625 , 125.81674194, 129.13446045,
# 460.88241577, 389.42178345, 490.09075928, 423.27612305,
# 451.96624756, 423.83236694, 422.34909058, 429.95529175,
# 447.07940674, 458.84817505, 0.99273545]], dtype=float32))
# Write the inference result into txt
img_txt = img_name.rstrip('.jpg')
f = open(img_txt, 'w')
f.write('{}\r\n'.format(img))
f.write('bboxes:\r\n')
if faces is not None:
for idx, face in enumerate(faces):
f.write('{} {} {} {} {}\r\n'.format(face[0], face[1], face[2], face[3], face[14]))
f.close()
# break
# break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--imgs', default='./WIDER_t/WIDER_val/images/')
args = parser.parse_args()
do_test(args.imgs)
``` |
{
"source": "joeShuff/ha-york-binday",
"score": 3
} |
#### File: joeShuff/ha-york-binday/api_hit.py
```python
import requests
import json
from datetime import datetime, timezone
def timestamp_from_api_response(resp):
try:
time = datetime.fromisoformat(resp)
return time.strftime('%d/%m/%Y')
except Exception as e:
return "ERROR (converting time)"
def days_until_collection(resp):
try:
time = datetime.fromisoformat(resp)
time.replace(tzinfo=timezone.utc)
now = datetime.now(timezone.utc)
diff = time - now
return diff.days + 1
except Exception as e:
return "ERROR (calc days)"
def get_from_json(obj, key, default="N/F"):
try:
return obj[key]
except:
return default
property_id = "PROPERTY_ID_HERE"
endpoint = "https://cyc-myaccount-live.azurewebsites.net/api/bins/GetCollectionDetails/" + str(property_id)
print(days_until_collection("2020-12-15T00:00:00+00:00"))
result = requests.request('GET', endpoint)
if result.status_code != 200:
print("Error making get request" + str(result.status_code))
else:
json_response = json.loads(result.content)['services']
bins = len(json_response)
if bins > 0:
print(str(bins) + " bins found.")
for bin in json_response:
waste_type = str(bin['service'].lower().replace(" ", "_").replace("/", "_"))
days_until = days_until_collection(bin['nextCollection'])
print("days is " + str(days_until))
print("bin is " + str(bin))
``` |
{
"source": "joeshull/twosigma_kaggle",
"score": 3
} |
#### File: bin/core/aapl_model.py
```python
import os
import json
import time
import math
import pandas as pd
import matplotlib.pyplot as plt
from src.data_processor import DataLoader
# from src.model import Model
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.preprocessing import StandardScaler
import matplotlib as mpl
import numpy as np
import tensorflow, tensorboard
from keras.callbacks import TensorBoard
from keras.layers import Input, Dense, Activation, Dropout, LSTM
from keras.models import Model
from keras.optimizers import Adam
def plot_results(predicted_data, true_data):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
plt.plot(predicted_data, label='Prediction')
plt.legend()
plt.show()
def plot_results_multiple(predicted_data, true_data, prediction_len):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
# Pad the list of predictions to shift it in the graph to it's correct start
for i, data in enumerate(predicted_data):
padding = [None for p in range(i * prediction_len)]
plt.plot(padding + data, label='Prediction')
plt.legend()
plt.show()
def plot_roc(probs, y_test,ax):
fpr, tpr, thresholds = roc_curve(y_test,probs)
auc_score = round(roc_auc_score(y_test, probs),4)
ax.plot(fpr, tpr, label=f'Initial LSTM = {auc_score} AUC')
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='k',
label='Luck')
ax.set_xlabel("False Positive Rate (1-Specificity)")
ax.set_ylabel("True Positive Rate (Sensitivity, Recall)")
ax.set_title("ROC/AUC: AAPL - Trained AAPL Only - 5day sequence_length")
ax.legend()
def build_model():
neurons = 300
n_steps = 5
n_features = 24
#real input is n_iterations, n_timesteps, n_features
#cat input is n_iterations, n_timesteps, 1
real_input = Input(shape=(n_steps, n_features,))
rnn = LSTM(neurons, input_shape=(n_steps, n_features),return_sequences=True)(real_input)
drop = Dropout(.2)(rnn)
rnn = LSTM(neurons, input_shape=(n_steps, n_features),return_sequences=True)(drop)
rnn = LSTM(neurons, input_shape=(n_steps, n_features),return_sequences=False)(rnn)
drop = Dropout(.2)(rnn)
dense = Dense(neurons, activation='relu')(drop)
dense = Dense(1, activation='sigmoid')(dense)
M = Model(inputs=[real_input], outputs=[dense])
adam = Adam(lr=0.0005)
M.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
return M
def get_test_windows(data_test, testcols, assetNames, seq_len, normalize):
x_test = []
y_test = []
for asset in assetNames:
window = data_test.loc[data_test.assetName==asset, testcols].tail().values
window = np.array(window).astype(float)
if window.shape[0] < seq_len:
pad = np.zeros((seq_len-window.shape[0],len(testcols)))
window = np.vstack((pad,window))
x_test.append(window[:,1:])
y_test.append(window[-1,0])
x_test = np.array(x_test).astype(float)
y_test = np.where(np.array(y_test).astype(float)>0,1,0)
x_test = normalize_windows(x_test, single_window=False) if normalize else x_test
return np.array(x_test), np.array(y_test)
def normalize_windows(window_data, single_window=False):
'''normalize window with a base value of zero'''
normalized_data = []
window_data = [window_data] if single_window else window_data
for window in window_data:
scaler = StandardScaler()
normalized_window = scaler.fit_transform(window)
normalized_data.append(normalized_window)
return np.array(normalized_data)
if __name__ == '__main__':
df = pd.read_pickle('../data/init_train_data.pkl')
test_cols = ["returnsOpenNextMktres10","returnsClosePrevRaw1",
"returnsOpenPrevRaw1", "returnsClosePrevMktres1",
"returnsOpenPrevMktres1",
"returnsClosePrevMktres10",
"returnsOpenPrevMktres10", "dailychange",
"dailyaverage","companyCount", "relevance",
"sentimentNegative", "sentimentNeutral", "sentimentPositive",
"noveltyCount12H", "noveltyCount24H", "noveltyCount3D",
"noveltyCount5D", "noveltyCount7D", "volumeCounts12H",
"volumeCounts24H", "volumeCounts3D", "volumeCounts5D",
"volumeCounts7D", "coverage"
]
dflate = df.loc[((df.time<20160601) & (df.time>20160501))]
xall, yall = get_test_windows(dflate, test_cols, dflate.assetName.unique(),5,normalize=True)
configs = json.load(open('config.json', 'r'))
# if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])
data = DataLoader(
os.path.join('data', configs['data']['filename']),
configs['data']['train_test_split'],
configs['data']['columns']
)
# #Get Embedded X,y for each company
# #Configs
config_aapl = json.load(open('config_aapl.json', 'r'))
config_advance = json.load(open('config_advance.json', 'r'))
config_allstate = json.load(open('config_allstate.json', 'r'))
data_aapl = DataLoader(
os.path.join('data', config_aapl['data']['filename']),
config_aapl['data']['train_test_split'],
config_aapl['data']['columns']
)
data_adv = DataLoader(
os.path.join('data', config_advance['data']['filename']),
config_advance['data']['train_test_split'],
config_advance['data']['columns']
)
data_alls = DataLoader(
os.path.join('data', config_allstate['data']['filename']),
config_allstate['data']['train_test_split'],
config_allstate['data']['columns']
)
#AAPL Data
xapl, yapl = data_aapl.get_train_data(
seq_len=config_aapl['data']['sequence_length'],
normalize=config_aapl['data']['normalize']
)
Xapl = [xapl,np.zeros((xapl.shape[0],xapl.shape[1],1))]
#Advance Data
xadv, yadv = data_adv.get_train_data(
seq_len=config_advance['data']['sequence_length'],
normalize=config_advance['data']['normalize']
)
Xadv = [xadv,np.ones((xadv.shape[0],xadv.shape[1],1))]
#Allstate Data
xalls, yalls = data_alls.get_train_data(
seq_len=config_allstate['data']['sequence_length'],
normalize=config_allstate['data']['normalize']
)
allemb = np.ones((xalls.shape[0],xalls.shape[1],1))+1
Xalls = [xadv,allemb]
# #Test With Embedding
# #AAPL
x_test_apl, y_test_apl = data_aapl.get_test_data(
seq_len=config_aapl['data']['sequence_length'],
normalize=config_aapl['data']['normalize']
)
em_apl = np.zeros((x_test_apl.shape[0], x_test_apl.shape[1],1))
X_test_apl = [x_test_apl, em_apl]
#Advance
x_test_adv, y_test_adv = data_adv.get_test_data(
seq_len=config_advance['data']['sequence_length'],
normalize=config_advance['data']['normalize']
)
em_adv = np.ones((x_test_adv.shape[0], x_test_adv.shape[1],1))
X_test_adv = [x_test_adv, em_adv]
# #Allstate
x_test_alls, y_test_alls = data_alls.get_test_data(
seq_len=config_allstate['data']['sequence_length'],
normalize=config_allstate['data']['normalize']
)
em_alls = np.ones((x_test_alls.shape[0], x_test_alls.shape[1],1)) + 1
X_test_alls = [x_test_alls, em_alls]
#Build Model for Embedding
tbCallBack = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
model = build_model()
model.fit(xapl, yapl, epochs=5, batch_size=50, validation_data=[x_test_apl, y_test_apl], callbacks=[tbCallBack])
# for X,y in zip([Xapl, Xadv, Xalls],[yapl, yadv, yalls]):
# model.fit(X,y,
# epochs=config_allstate['training']['epochs'],
# batch_size=config_allstate['training']['batch_size'],
# validation_data=(X_test_alls, y_test_alls))
##NO EMBED
# # out-of memory generative training
# data = DataLoader(
# os.path.join('data', configs['data']['filename']),
# configs['data']['train_test_split'],
# configs['data']['columns']
# )
# model = Model()
# model.build_model(configs)
# steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
# model.train_generator(
# data_gen=data.generate_train_batch(
# seq_len=configs['data']['sequence_length'],
# batch_size=configs['training']['batch_size'],
# normalize=configs['data']['normalize']
# ),
# epochs=configs['training']['epochs'],
# batch_size=configs['training']['batch_size'],
# steps_per_epoch=steps_per_epoch,
# save_dir=configs['model']['save_dir']
# )
# x_test, y_test = data.get_test_data(
# seq_len=configs['data']['sequence_length'],
# normalize=configs['data']['normalize']
# )
#no embedding
# predictions = model.predict_point_by_point(x_test)
predictions = model.predict(xall)
predictions = np.reshape(predictions, (predictions.size,))
fig, ax = plt.subplots(figsize=(12,12))
plot_roc(predictions, yall, ax)
plt.title('ROC/AUC on Final LSTM, All Companies, 06/01/2016')
plt.show()
```
#### File: bin/core/data_processor.py
```python
import math
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
class DataPrepper():
"""A class for loading and merging the stock and news data
Instantiates with:
INPUTS:
Columns to drop from the market df - List of strings
Columns to drop from the news df - List of strings
A Split date for Cross Validation data - integer YYYYMMDD
"""
def __init__(self,train_cutoff=20100101):
self.train_cutoff = train_cutoff
self.train_data = None
def load_data(self, market_file, news_file):
"""
Load data into class for processing:
Inputs:
market_file - string location of pickled df
news_file - string location of pickled df
Outputs:
None
"""
self.market_train = pd.read_pickle(market_file)
self.news_train = pd.read_pickle(news_file)
def make_price_diff(self, market_train):
"""eda function to find outliers
Inputs:
market_train - df of financial data
Output:
Dataframe with new columns:
'closeOverOpen' - Close/Open
'priceDiff' - Close minus Open
"""
market_train['closeOverOpen'] = market_train['Close']/market_train['Open']
market_train['priceDiff'] = np.abs(market_train['Close'] - market_train['Open'])
return market_train
def _replace_price_outliers(self, market_train=None):
"""
Hidden Function to replace outlier/incorrect open and close data
"""
if market_train is None:
market_train = self.market_train
trainprep = True
market_train['dailychange'] = market_train['close']/market_train['open']
market_train.loc[market_train['dailychange'] < .33,'open'] = market_train['close']
market_train.loc[market_train['dailychange'] > 2, 'close'] = market_train['open']
if trainprep:
self.market_train = market_train
else:
return market_train
def prepare_market(self, market_train=None):
"""
Prepares the market_train dataframe for merging.
Performs all aggregation and datacleaning functions
Input:
market_train - (optional) Dataframe
Output:
(optional) Dataframe of prepared data (or stored to object)
"""
if market_train is None:
market_train = self.market_train
self._replace_price_outliers()
trainprep = True
else:
market_train = self._replace_price_outliers(market_train)
market_train['time'] = market_train['time'].dt.strftime("%Y%m%d").astype(int)
market_train = market_train[market_train.time >= self.train_cutoff]
market_train['todayreturnraw'] = market_train['close']/market_train['open']
market_train['pricevolume'] = market_train['volume']/market_train['close']
self.tradingdays = market_train['time'].unique()
if trainprep:
self.market_train = market_train
else:
return market_train
def prepare_news(self, news_train=None, market_train=None):
"""
Prepares the news_train dataframe for merging.
Performs all aggregation and datacleaning functions
Input:
news_train - (optional) Dataframe
market_train - (optional) Dataframe. If news_train, is passed, market_train must also be passed
for trading-day news merge to work.
Output:
(optional) Dataframe of prepared data (or stored to object)
"""
if news_train is None:
news_train = self.news_train
self.tradingdays = self.market_train['time'].unique()
trainprep= True
else:
self.tradingdays = market_train['time'].unique()
news_train['time'] = news_train['time'].dt.strftime("%Y%m%d").astype(int)
news_train = news_train[news_train.time >= self.train_cutoff]
news_train['time'] = news_train['time'].apply(self._map_trading_day)
news_train['coverage'] = news_train['sentimentWordCount']/news_train['wordCount']
if trainprep:
self.news_train = news_train
else:
return market_train
def _map_trading_day(self, news_date):
"""
Hidden function for datafame.map.
Maps the news_date to its respective trading day.
"""
if news_date in self.tradingdays:
return news_date
else:
values = self.tradingdays - news_date
mask = values >= 0
try:
return self.tradingdays[mask][0]
except:
return 0
def merge_data(self, market_df=None, news_df=None):
"""
Merges Market and News Data
Input:
market_df - (optional) previously prepared market dataframe
news_df - (optional) previously prepared news dataframe
Output:
Dataframe
"""
if market_df is None and news_df is None:
market_df = self.market_train
news_df = self.news_train
trainprep = True
newsgroup = news_df.groupby(['time', 'assetName'], sort=False).agg(np.mean).reset_index()
merged = pd.merge(market_df, newsgroup, how='left', on=['time', 'assetName'], copy=False)
merged.fillna(value=0, inplace=True)
if trainprep:
self.train_data = merged
else:
return merged
def prepare_train_data(self):
"""
If data is training data, run this after calling load_data()
"""
self.prepare_market()
self.prepare_news()
self.merge_data()
def train_test_split(self, X, y, split_date=20151231):
"""
Splitting function to create a validation set from Training Data,
Inputs:
X - Dataframe of feature data including 'time' as integer
y - Np.array or Dataframe - The target of the same length as X
split_date - (Integer) Date to make split
Outputs:
X_train, X_test, y_train, y_test
"""
mask = X['time'] <= split_date
return X[mask], X[~mask], y[mask], y[~mask]
if __name__ == '__main__':
env_market = '../data/market_train_env.pkl'
env_news = '../data/news_train_env.pkl'
data = DataPrepper()
data.load_data(env_market, env_news)
data.prepare_train_data()
data.merge_data()
# data.train_data.to_pickle('../data/original_merged_train.pkl')
```
#### File: bin/core/evaluator.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
class ReturnsEvaluator():
'''A stock prediction evaluator.
Requires a dataframe with returns and predictions in the form of the
Kaggle/Two-Sigma Competition circa fall 2018.
'''
def __init__(self,
timecol = 'time',
targetcol = 'returnsOpenNextMktres10',
universecol = 'universe',
predcol='confidenceValue',
rawcol='returnsOpenNextRaw10'):
self.timecol = timecol
self.targetcol = targetcol
self.universecol = universecol
self.predcol = predcol
self.rawcol = rawcol
def get_kaggle_mean_variance(self, df, model=True, universe=False):
'''Returns the Mean-variance metric used in the Kaggle competition.
Input: Dataframe with columns defined in object instantiation
Output: (float) The model's performance as evaluated by Kaggle
'''
if universe:
if model:
df = self._create_model_returns(df, self.targetcol)
daily_returns = df.groupby(self.timecol).model_returns.mean()
else:
df = self._create_market_returns(df, self.targetcol)
daily_returns = df.groupby(self.timecol).market_returns.mean()
else:
if model:
df['model_returns'] = df[self.predcol] * df[returnscol]
daily_returns = df.groupby(self.timecol).model_returns.mean()
else:
df['market_returns'] = df[returnscol]
daily_returns = df.groupby(self.timecol).market_returns.mean()
mean_return = daily_returns.mean()
std_return = daily_returns.std()
return mean_return/std_return
def get_returns(self,df):
'''
Input: Dataframe with columns defined in object instantiation
Output: (dict)
1. 'model_raw' - (array) Cumulative Raw Return over the evaluation period
2. 'model_res' - (array) Cumulative Market Residualized return over the evaluation period
3. 'market_raw' - (array) The avg daily residualized return of the entire market
4. 'market_res' - (array) The avg daily residualized return of the entire market
5. 'dates' - (pd.Series) The series of dates for prediction time period
'''
model_raw = self._calc_raw(df)
model_res = self._calc_res(df)
market_raw = self._calc_market_raw(df)
market_res = self._calc_market_res(df)
dates = self._get_date_series(df)
return {'model_raw' : model_raw, 'model_res' : model_res,
'market_raw' : market_raw, 'market_res' : market_res,
'dates' : dates}
def _create_model_returns(self, df, returnscol):
df['model_returns'] = df[self.predcol] * df[returnscol] * df[self.universecol]
return df
def _create_market_returns(self, df, returnscol):
df['market_returns'] = df[returnscol] * df[self.universecol]
return df
def groupby_time(self, df):
df = df.groupby(self.timecol).mean()
df.reset_index(level=self.timecol, inplace=True)
df.sort_values(self.timecol, inplace=True)
return df
def get_cumulative_return(self, df, returnscol):
model_returns = df[returnscol].values
invest = np.ones(len(model_returns))
principal_return = np.zeros((len(model_returns)))
raw_returns = np.zeros(len(model_returns))
for i in range(len(model_returns)):
if i-11 < 0:
raw_returns[i] = model_returns[i]
continue
invest[i] = invest[i-11] + ((invest[i-11] - principal_return[i-11]) * model_returns[i-11])
raw_returns[i] = invest[i] * model_returns[i]
principal_return[i] = invest[i-11]
portfolio_return = raw_returns/11
portfolio_return[:11] = 0
return portfolio_return.cumsum()
def _calc_raw(self, df):
'''
Hidden Function that calculates the cumulative return of the model.
'''
df = self._create_model_returns(df, self.rawcol)
df = self.groupby_time(df)
return self.get_cumulative_return(df, 'model_returns')
def _calc_res(self, df):
'''
Hidden Function that calculates the cumulative return of the model.
'''
df = self._create_model_returns(df, self.targetcol)
df = self.groupby_time(df)
return self.get_cumulative_return(df, 'model_returns')
def _calc_market_raw(self, df):
'''
Hidden Function that calculates the cumulative return of the market.
'''
df = self._create_market_returns(df, self.rawcol)
df = self.groupby_time(df)
return self.get_cumulative_return(df, 'market_returns')
def _calc_market_res(self, df):
'''
Hidden Function that calculates the cumulative return of the market.
'''
df = self._create_market_returns(df, self.targetcol)
df = self.groupby_time(df)
return self.get_cumulative_return(df, 'market_returns')
def _get_date_series(self, df):
'''
Hidden function that returns the series of dates for prediction time-period
'''
df = self.groupby_time(df)
df['DateTime'] = pd.to_datetime(df[self.timecol].astype(str), format='%Y%m%d')
return df['DateTime']
def plot_model_vs_market(dates, model_returns, market_returns, ax, title='Model vs Market'):
X = date2num(dates.values)
ax.plot_date(X, model_returns,
linestyle='-',
linewidth=2,
markersize=.1,
label=f'Model Returns : {round(model_returns[-1],2)}')
ax.plot_date(X, market_returns,
linestyle='-',
linewidth=2,
markersize=.2,
label=f'Market Returns : {round(market_returns[-1],2)}')
for tick in ax.get_xticklabels():
tick.set_rotation(90)
ax.set_xlabel('Dates')
ax.set_ylabel('Cumulative Return')
ax.set_title(title)
ax.legend()
if __name__ == '__main__':
#test the class
np.random.seed(31)
df_test = pd.read_pickle('../data/5dayapple_pred.pkl')
df_test = df_test.loc[(df_test.time>=20170101) & (df_test.time<=20181101)]
test_data = {'time': np.arange(60),
'confidenceValue' : np.ones(60),
'universe' : np.ones(60),
'returnsOpenNextRaw10' : (np.ones(60)*.03),
'returnsOpenNextMktres10' : (np.ones(60)*.03)
}
test = pd.DataFrame.from_dict(test_data, orient='columns')
evaluator = ReturnsEvaluator()
print(evaluator.get_kaggle_mean_variance(test))
portfolio_return = evaluator._calc_raw(test)
metrics_dict = evaluator.get_returns(df_test)
dates = metrics_dict['dates']
model_returns = metrics_dict['model_raw']
market_returns = metrics_dict['market_raw']
fig, ax = plt.subplots()
plot_model_vs_market(dates, model_returns, market_returns, ax)
plt.show()
```
#### File: bin/plot_helpers/classifier_performance_plothelper.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import itertools
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, roc_auc_score
import matplotlib as mpl
font_size = 24
mpl.rcParams.update({'font.size': font_size})
mpl.rcParams['xtick.labelsize'] = font_size-5
mpl.rcParams['ytick.labelsize'] = font_size-5
plt.style.use('fivethirtyeight')
def plot_coefficient_importances(fitted_model, features, ax):
sort_idx = np.flip(fitted_model.coef_[0].argsort())
coefs = [fitted_model[0][i] for i in sort_idx]
features = [features[i] for i in sort_idx]
pos = np.arange(1, len(coefs)+1, 1)
ax.bar(pos,coefs)
ax.set_xticklabels(features, rotation=90)
ax.set_title('LogisticRegression Coefficients')
def plot_confusion_matrix(cm, ax, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
"""
p = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.set_title(title,fontsize=24)
plt.colorbar(p)
tick_marks = np.arange(len(classes))
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes, rotation=0)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes)
if normalize:
cm = np.round(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis],2)
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 1.5
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, cm[i, j],
horizontalalignment="center", size = 24,
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
ax.set_ylabel('True label',fontsize=24)
ax.set_xlabel('Predicted label',fontsize=24)
def plot_roc(fitted_model, X, y, ax):
probs = fitted_model.predict_proba(X)
fpr, tpr, thresholds = roc_curve(y, probs[:,1])
auc_score = round(roc_auc_score(y,probs[:,1]), 4)
ax.plot(fpr, tpr, label= f'{fitted_model.__class__.__name__} = {auc_score} AUC')
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='k',
label='Luck')
# ax.set_xlabel("False Positive Rate (1-Specificity)")
# ax.set_ylabel("True Positive Rate (Sensitivity, Recall)")
# ax.set_title("ROC plot of 'Churn, Not Churn'")
def standard_confusion_matrix(y_true, y_pred):
"""Make confusion matrix with format:
-----------
| TP | FP |
-----------
| FN | TN |
-----------
Parameters
----------
y_true : ndarray - 1D
y_pred : ndarray - 1D
Returns
-------
ndarray - 2D
"""
[[tn, fp], [fn, tp]] = confusion_matrix(y_true, y_pred)
return np.array([[tp, fp], [fn, tn]])
def profit_curve(cost_benefit, predicted_probs, labels):
"""Function to calculate list of profits based on supplied cost-benefit
matrix and prediced probabilities of data points and thier true labels.
Parameters
----------
cost_benefit : ndarray - 2D, with profit values corresponding to:
-----------
| TP | FP |
-----------
| FN | TN |
-----------
predicted_probs : ndarray - 1D, predicted probability for each datapoint
in labels, in range [0, 1]
labels : ndarray - 1D, true label of datapoints, 0 or 1
Returns
-------
profits : ndarray - 1D
thresholds : ndarray - 1D
"""
n_obs = float(len(labels))
# Make sure that 1 is going to be one of our thresholds
maybe_one = [] if 1 in predicted_probs else [1]
thresholds = maybe_one + sorted(predicted_probs, reverse=True)
profits = []
for threshold in thresholds:
y_predict = predicted_probs >= threshold
confusion_matrix = standard_confusion_matrix(labels, y_predict)
threshold_profit = np.sum(confusion_matrix * cost_benefit) / n_obs
profits.append(threshold_profit)
return np.array(profits), np.array(thresholds)
def get_model_profits(model, cost_benefit, X_train, X_test, y_train, y_test):
"""Fits passed model on training data and calculates profit from cost-benefit
matrix at each probability threshold.
Parameters
----------
model : sklearn model - need to implement fit and predict
cost_benefit : ndarray - 2D, with profit values corresponding to:
-----------
| TP | FP |
-----------
| FN | TN |
-----------
X_train : ndarray - 2D
X_test : ndarray - 2D
y_train : ndarray - 1D
y_test : ndarray - 1D
Returns
-------
model_profits : model, profits, thresholds
"""
model.fit(X_train, y_train)
predicted_probs = model.predict_proba(X_test)[:, 1]
profits, thresholds = profit_curve(cost_benefit, predicted_probs, y_test)
return profits, thresholds
def plot_model_profits(model_profits, save_path=None):
"""Plotting function to compare profit curves of different models.
Parameters
----------
model_profits : list((model, profits, thresholds))
save_path : str, file path to save the plot to. If provided plot will be
saved and not shown.
"""
for model, profits, _ in model_profits:
percentages = np.linspace(0, 100, profits.shape[0])
plt.plot(percentages, profits, label=model.__class__.__name__)
plt.title("Profit Curves")
plt.xlabel("Percentage of test instances (decreasing by score)")
plt.ylabel("Profit")
plt.legend(loc='best')
if save_path:
plt.savefig(save_path)
else:
plt.show()
def find_best_threshold(model_profits):
"""Find model-threshold combo that yields highest profit.
Parameters
----------
model_profits : list((model, profits, thresholds))
Returns
-------
max_model : str
max_threshold : float
max_profit : float
"""
max_model = None
max_threshold = None
max_profit = None
for model, profits, thresholds in model_profits:
max_index = np.argmax(profits)
if not max_model or profits[max_index] > max_profit:
max_model = model
max_threshold = thresholds[max_index]
max_profit = profits[max_index]
return max_model, max_threshold, max_profit
def profit_curve_main(filepath, cost_benefit):
"""Main function to test profit curve code.
Parameters
----------
filepath : str - path to find churn.csv
cost_benefit : ndarray - 2D, with profit values corresponding to:
-----------
| TP | FP |
-----------
| FN | TN |
-----------
"""
X_train, X_test, y_train, y_test = get_train_test(filepath)
models = [RF(), LR(), GBC(), SVC(probability=True)]
model_profits = []
for model in models:
profits, thresholds = get_model_profits(model, cost_benefit,
X_train, X_test,
y_train, y_test)
model_profits.append((model, profits, thresholds))
plot_model_profits(model_profits)
max_model, max_thresh, max_profit = find_best_threshold(model_profits)
max_labeled_positives = max_model.predict_proba(X_test) >= max_thresh
proportion_positives = max_labeled_positives.mean()
reporting_string = ('Best model:\t\t{}\n'
'Best threshold:\t\t{:.2f}\n'
'Resulting profit:\t{}\n'
'Proportion positives:\t{:.2f}')
print(reporting_string.format(max_model.__class__.__name__, max_thresh,
max_profit, proportion_positives))
def plot_decision_boundary(clf, X, y, n_classes):
"""Plot the decision boundary of a kNN classifier.
Plots decision boundary for up to 4 classes.
Colors have been specifically chosen to be color blindness friendly.
Assumes classifier, clf, has a .predict() method that follows the
sci-kit learn functionality.
X must contain only 2 continuous features.
Function modeled on sci-kit learn example.
Parameters
----------
clf: instance of classifier object
A fitted classifier with a .predict() method.
X: numpy array, shape = [n_samples, n_features]
Test data.
y: numpy array, shape = [n_samples,]
Target labels.
n_classes: int
The number of classes in the target labels.
"""
mesh_step_size = .1
# Colors are in the order [red, yellow, blue, cyan]
cmap_light = ListedColormap(['#FFAAAA', '#FFFFAA', '#AAAAFF', '#AAFFFF'])
cmap_bold = ListedColormap(['#FF0000', '#FFFF00', '#0000FF', '#00CCCC'])
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
feature_1 = X[:, 0]
feature_2 = X[:, 1]
x_min, x_max = feature_1.min() - 1, feature_1.max() + 1
y_min, y_max = feature_2.min() - 1, feature_2.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, mesh_step_size),
np.arange(y_min, y_max, mesh_step_size))
dec_boundary = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
dec_boundary = dec_boundary.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, dec_boundary, cmap=cmap_light)
# Plot also the training points
plt.scatter(feature_1, feature_2, c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title(
"{0}-Class classification (k = {1}, metric = '{2}')"
.format(n_classes, clf.k, clf.distance))
plt.show()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.