content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# -*- coding: UTF-8 -*-
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torchvision import datasets, models, transforms
from tensorboardX import SummaryWriter
import sys
import json
import scipy
import os, time
import argparse
import numpy as np
import torchvision
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from PIL import Image
from shutil import copyfile
from model import ft_net
from test_eval_cython import get_test_acc, extr_fea_train
from utils import *
import loader, loss
import pdb
version = torch.__version__
# #####################################################################
# argsions
# --------
parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--gpu',default='0', type=str,help='gpu ids: e.g. 0 0,1,2 0,2')
parser.add_argument('--seed', default=1, type=int, help='rng seed')
parser.add_argument('--model_dir',default='.checkpoint/', type=str, help='output model name')
parser.add_argument('--data_dir',default='/home/comp/mangye/dataset/', type=str, help='data dir')
parser.add_argument('--dataset',default='duke',type=str, help='training data:Market1501, DukeMTMCreID')
parser.add_argument('--pretrained',default='',type=str, help='path of pretrained "model:./model/baseline/net_8.pth"')
parser.add_argument('--batchsize', default=32, type=int, help='batchsize')
parser.add_argument('--noise_ratio', default=0.2, type=float, help='percentage of noise data in the training')
parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')
parser.add_argument('--alpha', default=2, type=float, help='beta distribution: alpha')
parser.add_argument('--beta', default=6, type=float, help='beta distribution: beta')
parser.add_argument('--LabelWt', default=60, type=int, help='label refinment weight')
parser.add_argument('--weighttype', default=0, type=int, help='weight type: instance weight, class weight')
parser.add_argument('--stage2', action='store_true', help='training stage 2')
args = parser.parse_args()
torch.manual_seed(args.seed)
start_epoch = 0
if args.stage2:
start_epoch = start_epoch + 20
best_acc = 0
test_epoch = 2
lr = args.lr
data_dir = args.data_dir + args.dataset
suffix = args.dataset + '_noise_{}_'.format(args.noise_ratio)
if args.LabelWt > 0 or args.stage2:
suffix = suffix + 'batch_{}_wt_{}'.format(args.batchsize,args.LabelWt)
else:
suffix = suffix + 'batch_{}_baseline'.format(args.batchsize)
if args.stage2:
suffix = suffix + '_beta_{}_{}_lr_{:1.1e}'.format(args.alpha, args.beta, args.lr)
suffix = suffix + '_w_st2_new'
else:
suffix = suffix + '_lr_{:1.1e}'.format(args.lr)
suffix = suffix + '_w_st1'
print ('model: ' + suffix)
# define the log path
log_dir = './new_res/' + args.dataset + '_log/'
checkpoint_path = './res/checkpoint/'
vis_log_dir = log_dir + suffix + '/'
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if not os.path.isdir(vis_log_dir):
os.makedirs(vis_log_dir)
writer = SummaryWriter(vis_log_dir)
test_log_file = open(log_dir + suffix + '.txt', "w")
sys.stdout = Logger(log_dir + suffix + '_os.txt')
# define the gpu id
str_ids = args.gpu.split(',')
gpu_ids = []
for str_id in str_ids:
gid = int(str_id)
if gid >=0:
gpu_ids.append(gid)
# set gpu ids
if len(gpu_ids)>0:
torch.cuda.set_device(gpu_ids[0])
print ('using gpu: {}'.format(gpu_ids))
# #####################################################################
# Load Data
train_transform = transforms.Compose([
#transforms.RandomResizedCrop(size=128, scale=(0.75,1.0), ratio=(0.75,1.3333), interpolation=3), #Image.BICUBIC)
transforms.Resize((288,144), interpolation=3),
transforms.RandomCrop((256,128)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transform = transforms.Compose([
transforms.Resize((256,128), interpolation=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# load training dataDatasetFolder
print('Starting loading training data: ', args.dataset )
train_dataset = loader.DatasetFolder(os.path.join(data_dir, 'train'), transform=train_transform)
class_names = train_dataset.classes
dataset_sizes_train = len(train_dataset)
use_gpu = torch.cuda.is_available()
# Define a model
model = ft_net(len(class_names))
if use_gpu:
model = model.cuda()
# Load a pretrainied model
if args.pretrained or args.stage2:
# model_name = 'market_noise_0.2_batch_32_lambda_0.4_lr_1.0e-02_st1_epoch_best.t'
model_name = '{}_noise_{}_batch_32_wt_60_lr_1.0e-02_w_st1_epoch_best.t'.format(args.dataset, args.noise_ratio)
print('Initilizaing weights with {}'.format(model_name))
model_path = checkpoint_path + model_name
model.load_state_dict(torch.load(model_path))
else:
print('Initilizaing weights with ImageNet')
# generate noisy label
if args.noise_ratio >= 0:
trainLabels = torch.LongTensor([y for (p, y, w) in train_dataset.imgs])
trainLabels_nsy, if_truelbl = gen_nosiy_lbl(trainLabels, args.noise_ratio, len(class_names))
print('Finish adding noisy label')
# generate instance weight
if args.stage2:
print('Generating sef-generated weights......')
weight_file = './new_res/' + 'new_{}_{}_weights.npy'.format(args.dataset, args.noise_ratio)
label_file = './new_res/' + 'new_{}_{}_label.npy'.format(args.dataset, args.noise_ratio)
# if os.path.exists(weight_file):
# all_weights = np.load(weight_file)
# pre_pids = np.load(label_file)
# else:
tansform_bak = train_transform
train_dataset.transform = test_transform
temploader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=False, num_workers=8)
model.eval() # Set model to evaluate mode
print('Start extract features...')
start = time.time()
train_feas, pre_pids = extr_fea_train(model, train_dataset, temploader, use_gpu)
print('Evaluation time: {}'.format(time.time()-start))
indexs, ori_weight = gen_weights_dist(train_feas, trainLabels_nsy, class_names, args.alpha, args.beta)
order = np.argsort(indexs)
all_weights = ori_weight[order]
np.save(weight_file, all_weights)
np.save(label_file, pre_pids)
train_dataset.transform = tansform_bak
all_weights = all_weights.astype(np.float32)
for i in range(len(trainLabels_nsy)):
train_dataset.imgs[i] = (train_dataset.imgs[i][0], int(pre_pids[i]), all_weights[i])
else:
print('Setting same weights for all the instances...')
for i in range(len(trainLabels_nsy)):
train_dataset.imgs[i] = (train_dataset.imgs[i][0], trainLabels_nsy[i],1)
dataloaders_train = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=True, num_workers=8) # 8 workers may work faster
# load testing dataDatasetFolder
test_dataset = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,test_transform) for x in ['gallery','query']}
dataloaders_test = {x: torch.utils.data.DataLoader(test_dataset[x], batch_size=args.batchsize, shuffle=False, num_workers=8) for x in ['gallery','query']}
# Define loss functions
# if args.LabelWt>0:
# criterion = loss.LabelRefineLoss(lambda1=args.LabelWt)
if args.stage2:
criterion = loss.InstanceWeightLoss(weighted = 1)
else:
criterion = nn.CrossEntropyLoss()
# optimizer
ignored_params = list(map(id, model.model.fc.parameters() )) + list(map(id, model.classifier.parameters() ))
base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
optimizer_ft = optim.SGD([
{'params': base_params, 'lr': lr},
{'params': model.model.fc.parameters(), 'lr': lr*10},
{'params': model.classifier.parameters(), 'lr': lr*10}
], weight_decay=5e-4, momentum=0.9, nesterov=True)
# Decay LR by a factor of 0.1 every 40 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=20, gamma=0.1)
def save_network(network, epoch_label, is_best = False):
if is_best:
save_path = checkpoint_path + suffix + '_epoch_best.t'
else:
save_path = checkpoint_path + suffix + '_epoch_{}.t'.format(epoch_label)
torch.save(network.state_dict(), save_path)
def sigmoid_rampup(current, rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
w = float(np.exp(-2.0 * phase * phase))
return min(w,0.5)
def train_model(model, criterion, optimizer_ft, scheduler, epoch):
scheduler.step()
lambda1 = sigmoid_rampup(epoch, args.LabelWt)
train_loss = AverageMeter()
data_time = AverageMeter()
batch_time = AverageMeter()
model.train()
correct = 0
total = 0
end = time.time()
for batch_idx, (inputs, targets, weights) in enumerate(dataloaders_train):
if use_gpu:
inputs = Variable(inputs.cuda())
targets = Variable(targets.cuda())
weights = Variable(weights.cuda())
data_time.update(time.time() - end)
optimizer_ft.zero_grad()
outputs = model(inputs)
if args.stage2:
loss = criterion(outputs, targets, weights)
else:
loss = criterion(outputs, targets, lambda1)
loss.backward()
optimizer_ft.step()
train_loss.update(loss.item(), inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
total += inputs.size(0)
if batch_idx%10==0:
print('Epoch: [{}][{}/{}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Data: {data_time.val:.3f} ({data_time.avg:.3f}) '
'Loss: {train_loss.val:.4f} ({train_loss.avg:.4f}) '
'Accu: {:.2f}'.format(
epoch, batch_idx, len(dataloaders_train),100.*correct/total, batch_time=batch_time, data_time=data_time, train_loss=train_loss))
writer.add_scalar('training acc (train)', 100.*correct/total, epoch)
writer.add_scalar('loss', train_loss.avg, epoch)
for epoch in range(start_epoch, start_epoch+41):
# training
print('Start Training..........')
train_model(model, criterion, optimizer_ft, exp_lr_scheduler, epoch)
# evaluation
if epoch%test_epoch ==0:
model.eval() # Set model to evaluate mode
start = time.time()
cmc, mAP = get_test_acc(model, test_dataset, dataloaders_test, use_gpu, max_rank=10)
if cmc[0] > best_acc:
best_epoch = epoch
best_acc = cmc[0]
save_network(model, epoch, is_best = True)
print('Epoch {}: R1:{:.4%} R5:{:.4%} R10:{:.4%} mAP:{:.4%} (Best Epoch[{}])'.format(
epoch, cmc[0],cmc[4],cmc[9], mAP ,best_epoch))
print('Epoch {}: R1:{:.4%} R5:{:.4%} R10:{:.4%} mAP:{:.4%} (Best Epoch[{}])'.format(
epoch, cmc[0],cmc[4],cmc[9], mAP ,best_epoch), file = test_log_file)
test_log_file.flush()
print('Evaluation time: {}'.format(time.time()-start))
# if epoch%20==0:
# save_network(model, epoch, is_best = False) | 38.755776 | 154 | 0.664311 | [
"MIT"
] | mangye16/ReID-Label-Noise | PNet/train_pnet.py | 11,743 | Python |
from abc import abstractmethod
from collections import deque
from copy import deepcopy
from libcheckers import BoardConfig, InvalidMoveException
from libcheckers.enum import Player, PieceClass, GameOverReason
from libcheckers.utils import (
index_to_coords,
coords_to_index,
get_indexes_between,
get_lines_of_sight,
is_black_home_row,
is_white_home_row,
)
class BaseMove(object):
"""
Represents a move a player can make in the checkers game.
"""
@abstractmethod
def apply(self, board):
"""
Apply a move to a board and retrieve the board produced by the move.
Parameters
----------
board
The board to apply the move to.
Returns
-------
Board
A new board that will be produced after applying this move.
"""
return board
@abstractmethod
def __eq__(self, other):
return False
@abstractmethod
def __repr__(self):
return super(BaseMove, self).__repr__()
class ForwardMove(BaseMove):
"""
Represents a free movement action (the one that does not capture any opponent pieces).
"""
def __init__(self, start_index, end_index):
self.start_index = start_index
self.end_index = end_index
def apply(self, board):
if not board.owner[self.start_index]:
msg = 'Cannot move from an empty square ({0})'.format(self.start_index)
raise InvalidMoveException(msg)
if board.owner[self.end_index]:
msg = 'Cannot move to a non-empty square ({0})'.format(self.end_index)
raise InvalidMoveException(msg)
is_backward_move = (
(board.owner[self.start_index] == Player.WHITE and self.end_index > self.start_index) or
(board.owner[self.start_index] == Player.BLACK and self.end_index < self.start_index)
)
if is_backward_move and board.piece_class[self.start_index] != PieceClass.KING:
msg = 'Cannot freely move backwards unless the piece is a king'
raise InvalidMoveException(msg)
new_board = board.clone()
new_board.move_piece(self.start_index, self.end_index)
return new_board
def __eq__(self, other):
return (isinstance(other, ForwardMove) and
self.start_index == other.start_index and
self.end_index == other.end_index)
def __repr__(self):
return 'Move: {0} -> {1}'.format(self.start_index, self.end_index)
class CaptureMove(BaseMove):
"""
Represents a move that captures a single opponent piece.
"""
def __init__(self, start_index, end_index):
self.start_index = start_index
self.end_index = end_index
def find_opponent_square(self, board):
"""
Retrieve the index of the square that contains the enemy piece to be captured.
"""
path_indexes = get_indexes_between(self.start_index, self.end_index)
own_color = board.owner[self.start_index]
own_path_squares = [
index
for index in path_indexes
if board.owner[index] == own_color
]
opponent_path_squares = [
index
for index in path_indexes
if board.owner[index] and board.owner[index] != own_color
]
if len(own_path_squares) > 0:
msg = 'Cannot capture when own pieces are in the way: {0}'
raise InvalidMoveException(msg.format(', '.join(str(index) for index in own_path_squares)))
if len(opponent_path_squares) != 1:
msg = 'Cannot capture: must have exactly one opponent piece along the way'
raise InvalidMoveException(msg)
if not board.owner[self.start_index]:
msg = 'Cannot move from an empty square ({0})'.format(self.start_index)
raise InvalidMoveException(msg)
if board.owner[self.end_index]:
msg = 'Cannot move to a non-empty square ({0})'.format(self.end_index)
raise InvalidMoveException(msg)
return opponent_path_squares[0]
def apply(self, board):
opponent_square = self.find_opponent_square(board)
new_board = board.clone()
new_board.move_piece(self.start_index, self.end_index)
new_board.remove_piece(opponent_square)
return new_board
def __eq__(self, other):
return (isinstance(other, CaptureMove) and
self.start_index == other.start_index and
self.end_index == other.end_index)
def __repr__(self):
return 'Capture: {0} -> {1}'.format(self.start_index, self.end_index)
class ComboCaptureMove(BaseMove):
"""
Represents a chain of capture moves.
"""
def __init__(self, moves):
self.moves = moves
def apply(self, board):
new_board = board
zombies_to_clear = []
for i, move in enumerate(self.moves):
# According to the rules, men should not be promoted when merely passing through
# the home row. They actually need to finish the move there to be promoted.
old_class = new_board.piece_class[move.start_index]
# Remove captured pieces only after the move is finished. Otherwise king moves
# like "forward, capture right, then capture left" would be allowed.
opponent_square = move.find_opponent_square(new_board)
zombies_to_clear.append(opponent_square)
new_board = move.apply(new_board)
new_board.owner[opponent_square] = Player.ZOMBIE
# Restore the piece class if it was "accidentally" promoted in between the moves.
if i < len(self.moves) - 1:
new_board.piece_class[move.end_index] = old_class
# Wipe the zombies.
for zombie in zombies_to_clear:
new_board.remove_piece(zombie)
return new_board
def __eq__(self, other):
return (isinstance(other, ComboCaptureMove) and
len(self.moves) == len(other.moves) and
all(self.moves[i] == other.moves[i] for i in range(len(self.moves))))
def __repr__(self):
return 'Combo x{0}: [{1}]'.format(len(self.moves), ', '.join(str(move) for move in self.moves))
class Board(object):
"""
Represents an international checkers game board and
contains the movement logic of the game pieces.
"""
def __init__(self):
self.owner = [None] * (BoardConfig.total_squares + 1)
self.piece_class = [None] * (BoardConfig.total_squares + 1)
def move_piece(self, start_index, end_index):
"""
Move an existing game piece from point A to point B.
"""
self.owner[end_index] = self.owner[start_index]
self.owner[start_index] = None
self.piece_class[end_index] = self.piece_class[start_index]
self.piece_class[start_index] = None
# Promote the piece if it has reached the opponent's home row.
if self.owner[end_index] == Player.WHITE and is_black_home_row(end_index):
self.piece_class[end_index] = PieceClass.KING
if self.owner[end_index] == Player.BLACK and is_white_home_row(end_index):
self.piece_class[end_index] = PieceClass.KING
def add_piece(self, index, player, piece_class):
"""
Place a new piece on the board with the specified owner and class.
"""
self.owner[index] = player
self.piece_class[index] = piece_class
def remove_piece(self, index):
"""
Clear the specified square from the board.
"""
self.owner[index] = None
self.piece_class[index] = None
def get_player_squares(self, player):
"""
Get all squares on the board owned by the specified player.
"""
return [
index
for index in range(1, BoardConfig.total_squares + 1)
if self.owner[index] == player
]
def get_free_movement_destinations(self, index):
"""
Get all allowed destinations for free movement for the piece at the specified square.
"""
own_color = self.owner[index]
own_class = self.piece_class[index]
visibility_range = BoardConfig.board_dim if own_class == PieceClass.KING else 1
lines_of_sight = get_lines_of_sight(index, visibility_range)
# Men can only move forward, and the direction of forward depends on the color.
if own_class == PieceClass.MAN and own_color == Player.WHITE:
lines_of_sight = lines_of_sight[:2]
if own_class == PieceClass.MAN and own_color == Player.BLACK:
lines_of_sight = lines_of_sight[-2:]
result = []
for line in lines_of_sight:
for i in range(0, len(line)):
# Cannot move beyond another piece if not capturing.
if self.owner[line[i]]:
break
result.append(line[i])
return result
def get_capturable_pieces(self, index):
"""
Get all squares that contain opponent's pieces capturable from the specified position.
"""
own_color = self.owner[index]
own_class = self.piece_class[index]
visibility_range = BoardConfig.board_dim if own_class == PieceClass.KING else 2
lines_of_sight = get_lines_of_sight(index, visibility_range)
result = []
for line in lines_of_sight:
for i in range(0, len(line) - 1):
# Cannot jump over own pieces or previously captured pieces.
if self.owner[line[i]] in (own_color, Player.ZOMBIE):
break
# Cannot capture protected pieces.
if self.owner[line[i]] and self.owner[line[i + 1]]:
break
# Can only capture if the square following the piece is empty.
if self.owner[line[i]] and self.owner[line[i]] != own_color and not self.owner[line[i + 1]]:
result.append(line[i])
break
return result
def get_available_capture_landing_positions(self, attacker_index, capture_index):
"""
If the specified square is captured by the specified attacker,
get all possible squares the attacker can land on.
"""
own_class = self.piece_class[attacker_index]
attacker_row, attacker_col = index_to_coords(attacker_index)
capture_row, capture_col = index_to_coords(capture_index)
# Calculate the unit movement vector.
movement_row = (capture_row - attacker_row) // abs(capture_row - attacker_row)
movement_col = (capture_col - attacker_col) // abs(capture_col - attacker_col)
result = []
current_row = capture_row + movement_row
current_col = capture_col + movement_col
if own_class == PieceClass.MAN:
return [coords_to_index(current_row, current_col)]
# Kings can make arbitrarily long jumps as long as they capture only one piece.
while 1 <= current_row <= BoardConfig.board_dim and 1 <= current_col <= BoardConfig.board_dim:
current_index = coords_to_index(current_row, current_col)
if not self.owner[current_index]:
result.append(current_index)
current_row += movement_row
current_col += movement_col
else:
break
return result
def get_capture_sequence_candidates(self, player):
"""
Get all possible capture move sequences (not necessarily maximum ones)
starting from every piece owned by the specified player.
"""
player_squares = self.get_player_squares(player)
# Check if there are any pieces in our line of sight that can be captured.
attack_options = []
for attacker in player_squares:
attack_options.extend([
(attacker, target)
for target in self.get_capturable_pieces(attacker)
])
# Run a tree traversal (BFS) to find all capture sequences, and choose the longest ones.
capture_sequences = []
# Each item in the queue is a 3-tuple: (board, move, previous moves).
queue = deque()
# Initial queue items: first step in each possible sequence.
for attacker, target in attack_options:
queue.extend([
(self, CaptureMove(attacker, landing), [])
for landing in self.get_available_capture_landing_positions(attacker, target)
])
# Main search queue.
while queue:
board_before, move, prev_moves = queue.popleft()
# No not allow promoting the piece if it does not finish the move on the home row.
class_before = board_before.piece_class[move.start_index]
# Keep the captured pieces because they cannot be removed till the end of turn.
opponent_quare = move.find_opponent_square(board_before)
board_after = move.apply(board_before)
board_after.owner[opponent_quare] = Player.ZOMBIE
board_after.piece_class[move.end_index] = class_before
next_attack_options = [
(move.end_index, target)
for target in board_after.get_capturable_pieces(move.end_index)
]
# Terminal position, nothing more to capture.
if not next_attack_options:
capture_sequences.append(prev_moves + [move])
# Search deeper for the consecutive captures.
for attacker, target in next_attack_options:
queue.extend([
(board_after, CaptureMove(attacker, landing), prev_moves + [move])
for landing in board_after.get_available_capture_landing_positions(attacker, target)
])
return capture_sequences
def get_available_moves(self, player):
"""
For the specified player, get the list of all allowed moves that are applicable
to this board according to the game rules.
"""
result = []
capture_sequences = self.get_capture_sequence_candidates(player)
if not capture_sequences:
# There are no pieces we must capture. Free movement is allowed.
for source in self.get_player_squares(player):
result.extend([
ForwardMove(source, destination)
for destination in self.get_free_movement_destinations(source)
])
else:
# There's a piece we must capture. Rules demand we capture as many as possible.
max_seq_length = max(len(seq) for seq in capture_sequences)
result.extend([
ComboCaptureMove(seq) if len(seq) > 1 else seq[0]
for seq in capture_sequences
if len(seq) == max_seq_length
])
return result
def check_game_over(self, player_turn):
"""
Check if the game board is in a terminal state from the specified player's point of view.
(e.g. a certain player has won or lost, or there is a draw).
"""
white_moves = self.get_available_moves(Player.WHITE)
black_moves = self.get_available_moves(Player.BLACK)
# If a player is unable to move, they lose.
if player_turn == Player.WHITE and not white_moves:
return GameOverReason.BLACK_WON
if player_turn == Player.BLACK and not black_moves:
return GameOverReason.WHITE_WON
# If both players have only one king left, the game is a draw.
white_squares = self.get_player_squares(Player.WHITE)
black_squares = self.get_player_squares(Player.BLACK)
only_one_king_each = (
len(white_squares) == 1 and
len(black_squares) == 1 and
self.piece_class[white_squares[0]] == PieceClass.KING and
self.piece_class[black_squares[0]] == PieceClass.KING and
not self.get_capturable_pieces(white_squares[0]) and
not self.get_capturable_pieces(black_squares[0])
)
if only_one_king_each:
return GameOverReason.DRAW
return None
def clone(self):
"""
Create an independent copy of this board.
"""
return deepcopy(self)
def __repr__(self):
return 'White: {0} | Black: {1}'.format(
', '.join(str(idx) for idx in self.get_player_squares(Player.WHITE)),
', '.join(str(idx) for idx in self.get_player_squares(Player.BLACK)),
)
| 36.308026 | 108 | 0.623193 | [
"MIT"
] | YuriyGuts/libcheckers | libcheckers/movement.py | 16,738 | Python |
# This utility function comes up with which racers to use in the next race
def racerCalculator(raceNum, numCars):
print("RaceNum",raceNum)
print("numCars",numCars)
if raceNum is None:
raceNum = 1
f = lambda x : (raceNum*3+x) % numCars
return f(0),f(1),f(2)
| 28.6 | 74 | 0.657343 | [
"MIT"
] | Sam-Gram/PiWood-Derby | racerCalculator.py | 286 | Python |
from diversity_filters import NoFilter, NoFilterWithPenalty
from diversity_filters.base_diversity_filter import BaseDiversityFilter
from diversity_filters.diversity_filter_parameters import DiversityFilterParameters
class DiversityFilter:
def __new__(cls, parameters: DiversityFilterParameters) -> BaseDiversityFilter:
all_filters = dict(NoFilter=NoFilter,
NoFilterWithPenalty=NoFilterWithPenalty)
div_filter = all_filters.get(parameters.name)
return div_filter(parameters)
| 40.923077 | 83 | 0.789474 | [
"Apache-2.0"
] | MolecularAI/Lib-INVENT | diversity_filters/diversity_filter.py | 532 | Python |
"""
The ``transaction`` submodule contains a wrapper class to simplify the usage of transactions::
t = revitron.Transaction()
...
t.close()
"""
# from pyrevit import script
class Transaction:
"""
A transaction helper class.
"""
def __init__(self):
"""
Inits a new transaction.
"""
import revitron
bundle = script.get_bundle_name().replace('.pushbutton', '')
self.transaction = revitron.DB.Transaction(revitron.DOC, bundle)
self.transaction.Start()
def commit(self):
"""
Commits the open transaction.
"""
self.transaction.Commit()
def rollback(self):
"""
Rolls back the open transaction.
"""
self.transaction.RollBack()
| 21.325 | 95 | 0.532239 | [
"MIT"
] | YKato521/revitron-for-RevitPythonShell | revitron/transaction.py | 853 | Python |
import requests
import json
from pprint import pprint as print
def getCode(res:str) :
return str(res).split("[")[1].split("]")[0]
url = 'http://localhost:4042'
guid = '2012491924' # get guid from connexion.json()
guid2 = '0'
gurl = f"{url}/{guid}"
home = requests.post(url)
print (getCode(home))
print (home.json())
print ("\n\n##################\n\n")
connexion = requests.post('http://localhost:4042/connect')
print (getCode(connexion))
print (connexion.json())
print ("\n\n##################\n\n")
# regarder = requests.get(f"{gurl}/regarder")
# print (getCode(regarder))
# print (regarder.json())
# print ("\n\n##################\n\n")
# myobj = {"direction": "N"}
# deplacement = requests.post(f"{gurl}/deplacement", json=myobj)
# print (getCode(deplacement))
# print (deplacement.json())
# print ("\n\n##################\n\n")
# examiner = requests.get(f"{gurl}/examiner/{guid2}")
# print (getCode(examiner))
# print (examiner.json())
# print ("\n\n##################\n\n")
# taper = requests.get(f"{gurl}/taper/{guid2}")
# print (getCode(taper))
# print (taper.json())
| 25.418605 | 65 | 0.601098 | [
"MIT"
] | jonelleamio/AdventureGameServer | bin/test_server.py | 1,093 | Python |
import configargparse as cfargparse
import os
import torch
import onmt.opts as opts
from onmt.utils.logging import logger
class ArgumentParser(cfargparse.ArgumentParser):
def __init__(
self,
config_file_parser_class=cfargparse.YAMLConfigFileParser,
formatter_class=cfargparse.ArgumentDefaultsHelpFormatter,
**kwargs):
super(ArgumentParser, self).__init__(
config_file_parser_class=config_file_parser_class,
formatter_class=formatter_class,
**kwargs)
@classmethod
def defaults(cls, *args):
"""Get default arguments added to a parser by all ``*args``."""
dummy_parser = cls()
for callback in args:
callback(dummy_parser)
defaults = dummy_parser.parse_known_args([])[0]
return defaults
@classmethod
def update_model_opts(cls, model_opt):
if model_opt.word_vec_size > 0:
model_opt.src_word_vec_size = model_opt.word_vec_size
model_opt.tgt_word_vec_size = model_opt.word_vec_size
if model_opt.layers > 0:
model_opt.enc_layers = model_opt.layers
model_opt.dec_layers = model_opt.layers
if model_opt.rnn_size > 0:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
model_opt.brnn = model_opt.encoder_type == "brnn"
if model_opt.copy_attn_type is None:
model_opt.copy_attn_type = model_opt.global_attention
@classmethod
def validate_model_opts(cls, model_opt):
assert model_opt.model_type in ["text", "img", "audio", "vec"], \
"Unsupported model type %s" % model_opt.model_type
# this check is here because audio allows the encoder and decoder to
# be different sizes, but other model types do not yet
same_size = model_opt.enc_rnn_size == model_opt.dec_rnn_size
assert model_opt.model_type == 'audio' or same_size, \
"The encoder and decoder rnns must be the same size for now"
assert model_opt.rnn_type != "SRU" or model_opt.gpu_ranks, \
"Using SRU requires -gpu_ranks set."
if model_opt.share_embeddings:
if model_opt.model_type != "text":
raise AssertionError(
"--share_embeddings requires --model_type text.")
@classmethod
def ckpt_model_opts(cls, ckpt_opt):
# Load default opt values, then overwrite with the opts in
# the checkpoint. That way, if there are new options added,
# the defaults are used.
opt = cls.defaults(opts.model_opts)
opt.__dict__.update(ckpt_opt.__dict__)
return opt
@classmethod
def validate_train_opts(cls, opt):
if opt.epochs:
raise AssertionError(
"-epochs is deprecated please use -train_steps.")
if opt.truncated_decoder > 0 and max(opt.accum_count) > 1:
raise AssertionError("BPTT is not compatible with -accum > 1")
if opt.gpuid:
raise AssertionError(
"gpuid is deprecated see world_size and gpu_ranks")
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.info("WARNING: You have a CUDA device, \
should run with -gpu_ranks")
if opt.world_size < len(opt.gpu_ranks):
raise AssertionError(
"parameter counts of -gpu_ranks must be less or equal "
"than -world_size.")
if opt.world_size == len(opt.gpu_ranks) and \
min(opt.gpu_ranks) > 0:
raise AssertionError(
"-gpu_ranks should have master(=0) rank "
"unless -world_size is greater than len(gpu_ranks).")
assert len(opt.data_ids) == len(opt.data_weights), \
"Please check -data_ids and -data_weights options!"
assert len(opt.dropout) == len(opt.dropout_steps), \
"Number of dropout values must match accum_steps values"
assert len(opt.attention_dropout) == len(opt.dropout_steps), \
"Number of attention_dropout values must match accum_steps values"
@classmethod
def validate_translate_opts(cls, opt):
if opt.beam_size != 1 and opt.random_sampling_topk != 1:
raise ValueError('Can either do beam search OR random sampling.')
@classmethod
def validate_preprocess_args(cls, opt):
assert opt.max_shard_size == 0, \
"-max_shard_size is deprecated. Please use \
-shard_size (number of examples) instead."
assert opt.shuffle == 0, \
"-shuffle is not implemented. Please shuffle \
your data before pre-processing."
assert len(opt.train_src) == len(opt.train_tgt), \
"Please provide same number of src and tgt train files!"
assert len(opt.train_src) == len(opt.train_ids), \
"Please provide proper -train_ids for your data!"
for file in opt.train_src + opt.train_tgt:
assert os.path.isfile(file), "Please check path of %s" % file
assert not opt.valid_src or os.path.isfile(opt.valid_src), \
"Please check path of your valid src file!"
assert not opt.valid_tgt or os.path.isfile(opt.valid_tgt), \
"Please check path of your valid tgt file!"
assert not opt.src_vocab or os.path.isfile(opt.src_vocab), \
"Please check path of your src vocab!"
assert not opt.tgt_vocab or os.path.isfile(opt.tgt_vocab), \
"Please check path of your tgt vocab!"
| 41.307143 | 79 | 0.6175 | [
"MIT"
] | ACL2020-Submission/ACL2020 | onmt/utils/parse.py | 5,783 | Python |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import HotelGroupViewServiceTransport
from .grpc import HotelGroupViewServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[HotelGroupViewServiceTransport]]
_transport_registry["grpc"] = HotelGroupViewServiceGrpcTransport
__all__ = (
"HotelGroupViewServiceTransport",
"HotelGroupViewServiceGrpcTransport",
)
| 31.441176 | 74 | 0.77362 | [
"Apache-2.0"
] | FoIIon/google-ads-python | google/ads/googleads/v7/services/services/hotel_group_view_service/transports/__init__.py | 1,069 | Python |
import kanu
while True:
print('\n Select one:')
print('\t1 -> Solve a linear equation')
print('\t2 -> Simplify any expression')
print('\t3 -> Is this number a Perfect Square?')
print('\t4 -> Get Prime Numbers')
print('\t5 -> START A NUCLEAR WAR :)')
print('\t6 -> Factor Integers')
choice = input()
if choice == '1':
print('Enter the equation:', end=' ')
try:
print(kanu.solve_single_linear_equation(input()))
except kanu.NonLinearEquationError:
print('You entered a non-linear equation.')
elif choice == '2':
print('Enter the expression:', end=' ')
print(kanu.all_together_now(input()))
elif choice == '3':
import math
def is_perfect_square(y):
sqrt_value = math.sqrt(y)
return int(sqrt_value) ** 2 == y
number = int(input('Enter a number: '))
if is_perfect_square(number):
print("It is a perfect square!")
else:
print("It is NOT a perfect square!")
elif choice == '4':
number = int(input("Input an integer:"))
factors = []
while number % 2 == 0:
factors.append(2)
number //= 2
divisor = 3
while number != 1 and divisor <= number:
if number % divisor == 0:
factors.append(divisor)
number //= divisor
else:
divisor += 2
print("The Prime Factors are: ")
for i in range(len(factors)):
print(factors[i], end=',')
elif choice == '5':
print('Executing "GETTING THE FOOTBALL" ')
if choice == '5':
from tqdm import tqdm
x = 1
for i in tqdm(range(0, 1000000)):
for x in range(0, 100):
x *= 4
print("DONE")
print("HERE ARE THE NUCLEAR LAUNCH CODES...")
print(" 56 58 10 62 11 1 25 29 55 62")
print(" 5 8 1 9 6 7 4 3 10 20")
print(" 41 16 18 50 9 51 48 5 37 30")
print(" 40 3 34 61 59 2 39 46 28 47")
print(" 38 7 42 26 63 45 17 27 60 21")
print("Launch Nukes?")
print("\t1 -> YES")
print('\t2 -> NO')
choice = input()
if choice == '1':
print('Please Wait...')
from tqdm import tqdm
x = 1
for i in tqdm(range(0, 100000)):
for x in range(0, 95):
x *= 4
print('BYE BYE WORLD')
input('press ENTER to continue')
elif choice == '2':
print('Maybe Another Day.')
input('press ENTER to continue')
elif choice == '6':
import math
number = int(input("Enter a number: "))
factors = []
for i in range(1, int(math.sqrt(number)) + 1):
if number % i == 0:
factors.append(i)
factor_pair = number // i
if factor_pair != i:
factors.append(factor_pair)
factors.sort()
print(factors)
| 27.849558 | 61 | 0.479504 | [
"MIT"
] | ThiccTT/nivek-maths | Nivek maths/nivek-maths.py | 3,147 | Python |
"""Custom data update coordinators for the GitHub integration."""
from __future__ import annotations
from typing import Literal, TypedDict
from aiogithubapi import (
GitHubAPI,
GitHubCommitModel,
GitHubException,
GitHubReleaseModel,
GitHubRepositoryModel,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, T
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DEFAULT_UPDATE_INTERVAL, DOMAIN, LOGGER, IssuesPulls
CoordinatorKeyType = Literal["information", "release", "issue", "commit"]
class GitHubBaseDataUpdateCoordinator(DataUpdateCoordinator[T]):
"""Base class for GitHub data update coordinators."""
def __init__(
self,
hass: HomeAssistant,
entry: ConfigEntry,
client: GitHubAPI,
repository: str,
) -> None:
"""Initialize GitHub data update coordinator base class."""
self.config_entry = entry
self.repository = repository
self._client = client
super().__init__(
hass,
LOGGER,
name=DOMAIN,
update_interval=DEFAULT_UPDATE_INTERVAL,
)
async def fetch_data(self) -> T:
"""Fetch data from GitHub API."""
async def _async_update_data(self) -> T:
try:
return await self.fetch_data()
except GitHubException as exception:
LOGGER.exception(exception)
raise UpdateFailed(exception) from exception
class RepositoryInformationDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[GitHubRepositoryModel]
):
"""Data update coordinator for repository information."""
async def fetch_data(self) -> GitHubRepositoryModel:
"""Get the latest data from GitHub."""
result = await self._client.repos.get(self.repository)
return result.data
class RepositoryReleaseDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[GitHubReleaseModel]
):
"""Data update coordinator for repository release."""
async def fetch_data(self) -> GitHubReleaseModel | None:
"""Get the latest data from GitHub."""
result = await self._client.repos.releases.list(
self.repository, **{"params": {"per_page": 1}}
)
if not result.data:
return None
for release in result.data:
if not release.prerelease:
return release
# Fall back to the latest release if no non-prerelease release is found
return result.data[0]
class RepositoryIssueDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[IssuesPulls]
):
"""Data update coordinator for repository issues."""
async def fetch_data(self) -> IssuesPulls:
"""Get the latest data from GitHub."""
base_issue_response = await self._client.repos.issues.list(
self.repository, **{"params": {"per_page": 1}}
)
pull_response = await self._client.repos.pulls.list(
self.repository, **{"params": {"per_page": 1}}
)
pulls_count = pull_response.last_page_number or 0
issues_count = (base_issue_response.last_page_number or 0) - pulls_count
issue_last = base_issue_response.data[0] if issues_count != 0 else None
if issue_last is not None and issue_last.pull_request:
issue_response = await self._client.repos.issues.list(self.repository)
for issue in issue_response.data:
if not issue.pull_request:
issue_last = issue
break
return IssuesPulls(
issues_count=issues_count,
issue_last=issue_last,
pulls_count=pulls_count,
pull_last=pull_response.data[0] if pulls_count != 0 else None,
)
class RepositoryCommitDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[GitHubCommitModel]
):
"""Data update coordinator for repository commit."""
async def fetch_data(self) -> GitHubCommitModel | None:
"""Get the latest data from GitHub."""
result = await self._client.repos.list_commits(
self.repository, **{"params": {"per_page": 1}}
)
return result.data[0] if result.data else None
class DataUpdateCoordinators(TypedDict):
"""Custom data update coordinators for the GitHub integration."""
information: RepositoryInformationDataUpdateCoordinator
release: RepositoryReleaseDataUpdateCoordinator
issue: RepositoryIssueDataUpdateCoordinator
commit: RepositoryCommitDataUpdateCoordinator
| 32.577465 | 88 | 0.676827 | [
"Apache-2.0"
] | Arquiteto/core | homeassistant/components/github/coordinator.py | 4,626 | Python |
def add(a: int, b: int) -> int:
return a + b
def subtract(a: int, b: int) -> int:
return a - b
def multiply(a: int, b: int or str) -> int:
return a * b
"""
...
----------------------------------------------------------------------
Ran 3 tests in 0.000s
OK
"""
| 13.95 | 70 | 0.387097 | [
"Apache-2.0"
] | MahanBi/python-tests | unittest/code.py | 279 | Python |
# coding=utf-8
VERSION = '1.8.1'
__VERSION__ = VERSION
def increase():
import os
import re
filename = os.path.abspath(__file__)
with open(filename, encoding='utf8') as file:
content = file.read()
match = re.search(r"VERSION = '(\d+).(\d+).(\d+)'", content)
old = f'{match.group(1)}.{match.group(2)}.{match.group(3)}'
new = f'{match.group(1)}.{match.group(2)}.{int(match.group(3)) + 1}'
content = content.replace(old, new)
with open(filename, 'w', encoding='utf8') as file:
file.write(content)
| 25.045455 | 72 | 0.598911 | [
"MIT"
] | StevenBaby/chess | src/version.py | 551 | Python |
import os
import time
from WebKit.URLParser import ServletFactoryManager
from WebUtils.Funcs import htmlEncode
from AdminSecurity import AdminSecurity
class ServletCache(AdminSecurity):
"""Display servlet cache.
This servlet displays, in a readable form, the internal data
structure of the cache of all servlet factories.
This can be useful for debugging WebKit problems and the
information is interesting in general.
"""
def title(self):
return 'Servlet Cache'
def writeContent(self):
wr = self.writeln
factories = [factory for factory in ServletFactoryManager._factories
if factory._classCache]
if not factories:
wr('<h4>No caching servlet factories found.</h4>')
wr('<p>Caching can be activated by setting'
' <code>CacheServletClasses = True</code>.</p>')
return
if len(factories) > 1:
factories.sort()
wr('<h3>Servlet Factories:</h3>')
wr('<table>')
for factory in factories:
wr('<tr><td><a href="#%s">%s</a></td></tr>'
% ((factory.name(),)*2))
wr('</table>')
req = self.request()
wr('<form action="ServletCache" method="post">')
for factory in factories:
name = factory.name()
wr('<a id="%s"></a><h4>%s</h4>' % ((name,)*2))
if req.hasField('flush_' + name):
factory.flushCache()
wr('<p style="color:green">'
'The servlet cache has been flushed. '
'<input type="submit" name="reload" value="Reload"></p>')
continue
wr(htCache(factory))
wr('</form>')
def htCache(factory):
"""Output the cache of a servlet factory."""
html = []
wr = html.append
cache = factory._classCache
keys = sorted(cache)
wr('<p>Uniqueness: %s</p>' % factory.uniqueness())
wr('<p>Extensions: %s</p>' % ', '.join(map(repr, factory.extensions())))
wr('<p>Unique paths in the servlet cache: <strong>%d</strong>'
' <input type="submit" name="flush_%s" value="Flush"></p>'
% (len(keys), factory.name()))
wr('<p>Click any link to jump to the details for that path.</p>')
wr('<h5>Filenames:</h5>')
wr('<table class="NiceTable">')
wr('<tr><th>File</th><th>Directory</th></tr>')
paths = []
for key in keys:
head, tail = os.path.split(key)
path = dict(dir=head, base=tail, full=key)
paths.append(path)
paths.sort(key=lambda p: (p['base'].lower(), p['dir'].lower()))
# At this point, paths is a list where each element is a dictionary
# with directory name, base name, full path name sorted first by
# base name and second by dir name.
for path in paths:
wr('<tr><td><a href="#id%s">%s</a></td><td>%s</td></tr>'
% (id(path['full']), path['base'], path['dir']))
wr('</table>')
wr('<h5>Full paths:</h5>')
wr('<table class="NiceTable">')
wr('<tr><th>Servlet path</th></tr>')
for key in keys:
wr('<tr><td><a href="#%s">%s</a></td></tr>' % (id(key), key))
wr('</table>')
wr('<h5>Details:</h5>')
wr('<table class="NiceTable">')
for path in paths:
wr('<tr class="NoTable"><td colspan="2">'
'<a id="id%s"></a><strong>%s</strong> - %s</td></tr>'
% (id(path['full']), path['base'], path['dir']))
record = cache[path['full']].copy()
record['path'] = path['full']
if path['full'] in factory._threadsafeServletCache:
record['instances'] = 'one servlet instance (threadsafe)'
else:
record['instances'] = ('free reusable servlets: %d'
% len(factory._servletPool))
wr(htRecord(record))
wr('</table>')
return '\n'.join(html)
def htRecord(record):
html = []
wr = html.append
for key in sorted(record):
htKey = htmlEncode(key)
# determine the HTML for the value
value = record[key]
htValue = None
# check for special cases where we want a custom display
if hasattr(value, '__name__'):
htValue = value.__name__
if key == 'mtime':
htValue = '%s (%s)' % (time.asctime(time.localtime(value)), value)
# the general case:
if not htValue:
htValue = htmlEncode(str(value))
wr('<tr><th>%s</th><td>%s</td></tr>' % (htKey, htValue))
return '\n'.join(html)
| 37.254098 | 78 | 0.551155 | [
"MIT"
] | Cito/w4py | WebKit/Admin/ServletCache.py | 4,545 | Python |
n=(input("Enter a number"))
a=len(n)
s=int(n)
sum=0
p=s
while s>0:
b=s%10
sum=sum+b**a
s=s//10
if sum==p:
print("It is an Amstrong Number")
else:
print("It is Not an Amstrong Number") | 15.615385 | 41 | 0.591133 | [
"Apache-2.0"
] | nikhilsamninan/python-files | day2/amstrongnospl.py | 203 | Python |
# Generated by Django 2.1 on 2019-01-12 15:12
import django.db.models.deletion
from django.db import migrations, models
import pretix.base.models.fields
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0104_auto_20181114_1526'),
]
operations = [
migrations.AddField(
model_name='invoiceaddress',
name='beneficiary',
field=models.TextField(blank=True, verbose_name='Beneficiary'),
),
migrations.AddField(
model_name='invoice',
name='invoice_to_beneficiary',
field=models.TextField(blank=True, null=True, verbose_name='Beneficiary'),
),
]
| 25.703704 | 86 | 0.635447 | [
"Apache-2.0"
] | Janfred/pretix | src/pretix/base/migrations/0105_auto_20190112_1512.py | 694 | Python |
import sys
sys.path.append('../')
import torch
import numpy as np
import random
import math
import time
import argparse
from data_tlp_cite import DataHelper_t
from torch.utils.data import DataLoader
from model import Model
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
FType = torch.FloatTensor
LType = torch.LongTensor
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def main(args):
setup_seed(args.seed)
Data = DataHelper_t(args.file_path, args.node_feature_path, args.neg_size, args.hist_len, args.directed,
tlp_flag=args.tlp_flag)
loader = DataLoader(Data, batch_size=args.batch_size, shuffle=False, num_workers=5)
model = Model(args).to(device)
model.load_state_dict(torch.load('../res/cite/model.pkl'))
s_emb_list = []
t_emb_list = []
dup_s_emb_list = []
neg_embs_list = []
loss_list = []
model.eval()
for i_batch, sample_batched in enumerate(loader):
loss, s_emb, t_emb, dup_s_emb, neg_embs = model.forward(
sample_batched['s_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['s_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['s_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['event_time'].type(FType).to(device),
sample_batched['s_history_times'].type(FType).to(device),
sample_batched['s_his_his_times_list'].type(FType).to(device),
sample_batched['t_history_times'].type(FType).to(device),
sample_batched['t_his_his_times_list'].type(FType).to(device),
sample_batched['neg_his_times_list'].type(FType).to(device),
sample_batched['neg_his_his_times_list'].type(FType).to(device),
sample_batched['s_edge_rate'].type(FType).to(device),
training=False
)
s_emb_list.append(s_emb)
t_emb_list.append(t_emb)
dup_s_emb_list.append(dup_s_emb.reshape(-1, args.out_dim))
neg_embs_list.append(neg_embs.reshape(-1, args.out_dim))
loss_list.append(loss)
s_emb_list = torch.cat(s_emb_list, dim=0)
t_emb_list = torch.cat(t_emb_list, dim=0)
dup_s_emb_list = torch.cat(dup_s_emb_list, dim=0)
neg_embs_list = torch.cat(neg_embs_list, dim=0)
truth = torch.ones(s_emb_list.size(0), dtype=torch.int)
truth_neg = torch.zeros(neg_embs_list.size(0), dtype=torch.int)
s_list = torch.cat((s_emb_list, dup_s_emb_list), dim=0)
t_list = torch.cat((t_emb_list, neg_embs_list), dim=0)
truth_list = torch.cat((truth, truth_neg), dim=0)
dif_list = torch.abs(s_list - t_list)
x_train, x_test, y_train, y_test = train_test_split(dif_list, truth_list, test_size=1 - args.train_ratio,
random_state=args.seed, stratify=truth_list)
lr = LogisticRegression(max_iter=10000)
lr.fit(x_train, y_train)
y_test_pred = lr.predict(x_test)
acc = accuracy_score(y_test, y_test_pred)
f1 = f1_score(y_test, y_test_pred)
print('acc:{}'.format(round(acc, 4)))
print('f1:{}'.format(round(f1, 4)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', type=str, default='./data/cite/emb_edges.pt')
parser.add_argument('--node_feature_path', type=str, default='./data/cite/sorted_emb_feat.pt')
parser.add_argument('--neg_size', type=int, default=1)
parser.add_argument('--hist_len', type=int, default=10)
parser.add_argument('--directed', type=bool, default=False)
parser.add_argument('--epoch_num', type=int, default=10, help='epoch number')
parser.add_argument('--tlp_flag', type=bool, default=True)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--hid_dim', type=int, default=16)
parser.add_argument('--feat_dim', type=int, default=128)
parser.add_argument('--out_dim', type=int, default=16)
parser.add_argument('--seed', type=int, default=4)
parser.add_argument('--ncoef', type=float, default=0.01)
parser.add_argument('--l2_reg', type=float, default=0.001)
parser.add_argument('--train_ratio', type=float, default=0.8)
args = parser.parse_args()
start = time.perf_counter()
main(args) | 41.418605 | 109 | 0.688564 | [
"MIT"
] | WenZhihao666/TREND | main_test.py | 5,343 | Python |
import numpy as np
import random
random.seed(200)
# Create Sigmoid Function
def sig(inp):
return (1/(1+np.exp(-1*inp)))
# For Back Propagation, make Desigmoid function
def dsig(inp):
return (1.0-inp)*inp
# Define class for neuron
class Neuron:
def __init__(self,weights,func,dfunc):
# member variables for class
self.weights = weights
self.output = None
self.func = func
# dfunc is the derivative of the function
self.dfunc = dfunc
# No delta yet because we haven't defined anything
self.delta = None
def agr(self,x):
bias = self.weights[-1]
out = np.inner(self.weights.copy()[:-1],x) + bias
return out
def activation(self,inp):
self.output = self.func(inp)
return self.output
# Definition for weights
def gen_weights(dim):
# Add 1 to the dimension for the bias
return np.random.uniform(-0.1,0.1,dim+1)
# Definition of the actual network
# Activations correspond to activation funcitons used
def gen_net(structure, activations):
# Create empty list
net = []
for i in range(1,len(structure)):
layer = []
for j in range(structure[i]):
# feed in neuron weights from last layer
weights = gen_weights(structure[i-1])
layer.append(Neuron(weights, activations[0][i-1], activations[1][i-1]))
net.append(layer)
return net
# Define feed forward
def feed_fwd(net, inp):
# It stores the current input associated with the given layer
inp_store = inp
for layer in net:
out_of_curr_layer = []
for neuron in layer:
# Calculate accumulated output value
accum = neuron.agr(inp_store)
output = neuron.activation(accum)
# Store output for later use
out_of_curr_layer.append(output)
inp_store = out_of_curr_layer
return inp_store
# Define back propagation
def back_prop(net, target):
back_len = len(net)
for i in range(back_len):
ind = back_len-i-1
layer = net[ind]
errors = []
if ind == back_len-1:
j=0
for neuron in layer:
errors.append(target[j]-neuron.output)
j+=1
else:
for j in range(len(layer)):
error = 0.0
# For neuron in front of current neuron, check deltas
for neuron in net[ind+1]:
error+=(neuron.weights[j]*neuron.delta)
errors.append(error)
j=0
for neuron in layer:
neuron.delta = errors[j]*neuron.dfunc(neuron.output)
j+=1
return net
# Define how much to update the weights by everytime
# Alpha is the learning rate, but if too high it may overshoot
def update_weights(net,inp,alpha):
for i in range(len(net)):
if i==0:
inputs = inp
else:
inputs = []
prev_layer = net[i-1]
for neuron in prev_layer:
inputs.append(neuron.output)
curr_layer = net[i]
for neuron in curr_layer:
for j in range(len(inputs)):
neuron.weights[j] += alpha*neuron.delta*inputs[j]
neuron.weights[-1]+=alpha*neuron.delta
#Define training approach
def train(net,train_data,alpha,epoch):
for curr_epoch_no in range(epoch):
sums = 0
sample_no = 0
# Accuracy Count (number of samples that are right)
acc_cnt = 0
for sample in train_data:
outputs = feed_fwd(net,sample[0])
expected = sample[1]
sums+=sum([(expected[i]-outputs[i])**2 for i in range(len(expected))])
if expected.index(max(expected) == outputs.index(max(outputs))):
acc_cnt += 1
back_prop(net,expected)
update_weights(net,sample[0],alpha)
# Metadata on how well it's doing
print('epoch_no:', curr_epoch_no,'loss:', sums, 'accuracy:', acc_cnt)
net = gen_net([2,100,100,2],[(sig,sig,sig),[dsig,dsig,dsig]])
train(net,[[[0,0],[0,1]],
[[0,1],[1,0]],
[[1,0],[1,0]],
[[1,1],[0,1]]],
2, 100)
# Code to test out neural network output
# net = gen_net([2,2,2],[(sig,sig),[dsig,dsig]])
# print(feed_fwd(net,[0.2,0.3]))
# for i in range(len(net)):
# for j in range(len(net[i])):
# print(net[i][j].weights)
# print("--------------------------")
# net = back_prop(net,[1,0])
# net = update_weights(net,[0.2,0.3],0.2)
# for i in range(len(net)):
# for j in range(len(net[i])):
# print(net[i][j].weights) | 31.90411 | 83 | 0.572349 | [
"MIT"
] | Henrynaut/ML | scripts/neural_net_workshop.py | 4,658 | Python |
# License: MIT License
import unittest
import glob
import os
import networkx as nx
import numpy as np
import itertools
from ...PyCTBN.structure_graph.sample_path import SamplePath
from ...PyCTBN.structure_graph.network_graph import NetworkGraph
from ...PyCTBN.utility.json_importer import JsonImporter
class TestNetworkGraph(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.read_files = glob.glob(os.path.join('./PyCTBN/test_data', "*.json"))
cls.importer = JsonImporter(cls.read_files[2], 'samples', 'dyn.str', 'variables', 'Time', 'Name')
cls.importer.import_data(0)
cls.s1 = SamplePath(cls.importer)
cls.s1.build_trajectories()
cls.s1.build_structure()
def test_init(self):
g1 = NetworkGraph(self.s1.structure)
self.assertEqual(self.s1.structure, g1._graph_struct)
self.assertIsInstance(g1._graph, nx.DiGraph)
self.assertIsNone(g1.time_scalar_indexing_strucure)
self.assertIsNone(g1.transition_scalar_indexing_structure)
self.assertIsNone(g1.transition_filtering)
self.assertIsNone(g1.p_combs)
def test_add_nodes(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
for n1, n2 in zip(g1.nodes, self.s1.structure.nodes_labels):
self.assertEqual(n1, n2)
def test_add_edges(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_edges(self.s1.structure.edges)
for e in self.s1.structure.edges:
self.assertIn(tuple(e), g1.edges)
def test_fast_init(self):
g1 = NetworkGraph(self.s1.structure)
for node in self.s1.structure.nodes_labels:
g1.fast_init(node)
self.assertIsNotNone(g1._graph.nodes)
self.assertIsNotNone(g1._graph.edges)
self.assertIsInstance(g1._time_scalar_indexing_structure, np.ndarray)
self.assertIsInstance(g1._transition_scalar_indexing_structure, np.ndarray)
self.assertIsInstance(g1._time_filtering, np.ndarray)
self.assertIsInstance(g1._transition_filtering, np.ndarray)
self.assertIsInstance(g1._p_combs_structure, np.ndarray)
self.assertIsInstance(g1._aggregated_info_about_nodes_parents, tuple)
def test_get_ordered_by_indx_set_of_parents(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
for indx in range(len(aggr_info[0]) - 1 ):
self.assertLess(g1.get_node_indx(aggr_info[0][indx]), g1.get_node_indx(aggr_info[0][indx + 1]))
for par, par_indx in zip(aggr_info[0], aggr_info[1]):
self.assertEqual(g1.get_node_indx(par), par_indx)
for par, par_val in zip(aggr_info[0], aggr_info[2]):
self.assertEqual(g1._graph_struct.get_states_number(par), par_val)
def test_build_time_scalar_indexing_structure_for_a_node(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_time_scalar_indexing_structure_for_a_node(g1, node, aggr_info[1],
aggr_info[0], aggr_info[2])
def aux_build_time_scalar_indexing_structure_for_a_node(self, graph, node_id, parents_indxs, parents_labels, parents_vals):
node_states = graph.get_states_number(node_id)
time_scalar_indexing = NetworkGraph.build_time_scalar_indexing_structure_for_a_node(node_states, parents_vals)
self.assertEqual(len(time_scalar_indexing), len(parents_indxs) + 1)
merged_list = parents_labels[:]
merged_list.insert(0, node_id)
vals_list = []
for node in merged_list:
vals_list.append(graph.get_states_number(node))
t_vec = np.array(vals_list)
t_vec = t_vec.cumprod()
self.assertTrue(np.array_equal(time_scalar_indexing, t_vec))
def test_build_transition_scalar_indexing_structure_for_a_node(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_transition_scalar_indexing_structure_for_a_node(g1, node, aggr_info[1],
aggr_info[0], aggr_info[2])
def aux_build_transition_scalar_indexing_structure_for_a_node(self, graph, node_id, parents_indxs, parents_labels,
parents_values):
node_states = graph.get_states_number(node_id)
transition_scalar_indexing = graph.build_transition_scalar_indexing_structure_for_a_node(node_states,
parents_values)
self.assertEqual(len(transition_scalar_indexing), len(parents_indxs) + 2)
merged_list = parents_labels[:]
merged_list.insert(0, node_id)
merged_list.insert(0, node_id)
vals_list = []
for node_id in merged_list:
vals_list.append(graph.get_states_number(node_id))
m_vec = np.array([vals_list])
m_vec = m_vec.cumprod()
self.assertTrue(np.array_equal(transition_scalar_indexing, m_vec))
def test_build_time_columns_filtering_structure_for_a_node(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_time_columns_filtering_structure_for_a_node(g1, node, aggr_info[1])
def aux_build_time_columns_filtering_structure_for_a_node(self, graph, node_id, p_indxs):
graph.build_time_columns_filtering_for_a_node(graph.get_node_indx(node_id), p_indxs)
single_filter = []
single_filter.append(graph.get_node_indx(node_id))
single_filter.extend(p_indxs)
self.assertTrue(np.array_equal(graph.build_time_columns_filtering_for_a_node(graph.get_node_indx(node_id),
p_indxs),np.array(single_filter)))
def test_build_transition_columns_filtering_structure(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_time_columns_filtering_structure_for_a_node(g1, node, aggr_info[1])
def aux_build_transition_columns_filtering_structure(self, graph, node_id, p_indxs):
single_filter = []
single_filter.append(graph.get_node_indx(node_id) + graph._graph_struct.total_variables_number)
single_filter.append(graph.get_node_indx(node_id))
single_filter.extend(p_indxs)
self.assertTrue(np.array_equal(graph.build_transition_filtering_for_a_node(graph.get_node_indx(node_id),
p_indxs), np.array(single_filter)))
def test_build_p_combs_structure(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_p_combs_structure(g1, aggr_info[2])
def aux_build_p_combs_structure(self, graph, p_vals):
p_combs = graph.build_p_comb_structure_for_a_node(p_vals)
p_possible_vals = []
for val in p_vals:
vals = [v for v in range(val)]
p_possible_vals.extend(vals)
comb_struct = set(itertools.product(p_possible_vals,repeat=len(p_vals)))
for comb in comb_struct:
self.assertIn(np.array(comb), p_combs)
def test_get_parents_by_id(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in g1.nodes:
self.assertListEqual(g1.get_parents_by_id(node), list(g1._graph.predecessors(node)))
def test_get_states_number(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node, val in zip(g1.nodes, g1.nodes_values):
self.assertEqual(val, g1.get_states_number(node))
def test_get_node_indx(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node, indx in zip(g1.nodes, g1.nodes_indexes):
self.assertEqual(indx, g1.get_node_indx(node))
if __name__ == '__main__':
unittest.main()
| 48.989691 | 127 | 0.675295 | [
"MIT"
] | madlabunimib/PyCTBN | PyCTBN/tests/structure_graph/test_networkgraph.py | 9,504 | Python |
import os
import urllib
import torch
from torch.utils import model_zoo
class CheckpointIO(object):
''' CheckpointIO class.
It handles saving and loading checkpoints.
Args:
checkpoint_dir (str): path where checkpoints are saved
'''
def __init__(self, checkpoint_dir='./chkpts', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
''' Registers modules in current module dictionary.
'''
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
''' Saves the current module dictionary.
Args:
filename (str): name of output file
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
def load(self, filename):
'''Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
'''
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
def load_file(self, filename):
'''Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileNotFoundError
def load_url(self, url):
'''Load a module dictionary from url.
Args:
url (str): url to saved model
'''
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict):
'''Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
'''
for k, v in self.module_dict.items():
if k in state_dict:
v.load_state_dict(state_dict[k])
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def is_url(url):
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https') | 29.346535 | 70 | 0.584345 | [
"MIT"
] | 1ucky40nc3/mednerf | graf-main/submodules/GAN_stability/gan_training/checkpoints.py | 2,964 | Python |
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class Appeaser(Player):
"""A player who tries to guess what the opponent wants.
Switch the classifier every time the opponent plays D.
Start with C, switch between C and D when opponent plays D.
Names:
- Appeaser: Original Name by Jochen Müller
"""
name = "Appeaser"
classifier = {
"memory_depth": float("inf"), # Depends on internal memory.
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
if not len(opponent.history):
return C
else:
if opponent.history[-1] == D:
if self.history[-1] == C:
return D
else:
return C
return self.history[-1]
| 25.923077 | 68 | 0.575668 | [
"MIT"
] | DPros/Axelrod | axelrod/strategies/appeaser.py | 1,012 | Python |
from django.contrib import admin
from .models import Sport, Activity, Settings, Traces, Lap
admin.site.register(Sport)
admin.site.register(Activity)
admin.site.register(Settings)
admin.site.register(Traces)
admin.site.register(Lap)
| 23.4 | 58 | 0.807692 | [
"MIT"
] | lucasace/workoutizer | wizer/admin.py | 234 | Python |
import uuid
from turkit2.common import TextClassification
from turkit2.qualifications import Unique, Locale, AcceptRate
from utils import get_client
client = get_client()
quals = [Locale(), AcceptRate()]
task = TextClassification(client, 'Test3', '0.01', 'test test', 600, 6000, ['positive', 'negative'], question='Which class does this text match, positive or negative?', qualifications=quals)
documents = [f'test{i}' for i in range(5)]
def proc(text):
for answer, assignment in task.ask(verbosity=100, text=text):
print(answer)
print(assignment)
def main():
tasks = []
for text in documents:
proc(text)
main()
| 26.2 | 190 | 0.700763 | [
"MIT"
] | anthliu/turkit2 | tests/text_classification_nonsync.py | 655 | Python |
from typing import Dict
from exaslct_src.lib.export_container_task import ExportContainerTask
from exaslct_src.lib.data.required_task_info import RequiredTaskInfo
from exaslct_src.lib.docker.docker_create_image_task import DockerCreateImageTask
class ExportContainerTasksCreator():
def __init__(self, export_path:str, release_name:str):
self.release_name = release_name
self.export_path = export_path
def create_export_tasks_for_flavors(
self, build_tasks: Dict[str, Dict[str, DockerCreateImageTask]]):
return {flavor_path: self.create_export_tasks(flavor_path, build_task)
for flavor_path, build_task in build_tasks.items()}
def create_export_tasks(self, flavor_path: str,
build_tasks: Dict[str, DockerCreateImageTask]):
return {release_type: self.create_export_task(release_type, flavor_path, build_task)
for release_type, build_task in build_tasks.items()}
def create_export_task(self, release_type: str, flavor_path: str,
build_task: DockerCreateImageTask):
required_task_info = self.create_required_task_info(build_task)
return \
ExportContainerTask(
required_task_info_json=required_task_info.to_json(indent=None),
export_path=self.export_path,
release_name=self.release_name,
release_type=release_type,
flavor_path=flavor_path)
def create_required_task_info(self, build_task):
required_task_info = \
RequiredTaskInfo(module_name=build_task.__module__,
class_name=build_task.__class__.__name__,
params=build_task.param_kwargs)
return required_task_info | 45.275 | 92 | 0.692435 | [
"MIT"
] | mace84/script-languages | exaslct_src/lib/export_container_tasks_creator.py | 1,811 | Python |
from subprocess import check_output
def isrunning(processName):
tasklist = check_output('tasklist', shell=False)
tasklist = str(tasklist)
return(processName in tasklist) | 26.142857 | 52 | 0.759563 | [
"MIT"
] | ShaderLight/autochampselect | modules/isrunning.py | 183 | Python |
from lithops.multiprocessing import Process, JoinableQueue
def worker(q):
working = True
while working:
x = q.get()
# Do work that may fail
assert x < 10
# Confirm task
q.task_done()
if x == -1:
working = False
if __name__ == '__main__':
q = JoinableQueue()
p = Process(target=worker, args=(q,))
p.start()
for x in range(10):
q.put(x)
# uncomment to hang on the q.join
#q.put(11)
q.join()
q.put(-1) # end loop
p.join()
| 16.484848 | 58 | 0.527574 | [
"Apache-2.0"
] | GEizaguirre/lithops | examples/multiprocessing/joinable_queue.py | 544 | Python |
# Python program to draw smile
# face emoji using turtle
import turtle
# turtle object
pen = turtle.Turtle()
# function for creation of eye
def eye(col, rad):
pen.down()
pen.fillcolor(col)
pen.begin_fill()
pen.circle(rad)
pen.end_fill()
pen.up()
# draw face
pen.fillcolor('yellow')
pen.begin_fill()
pen.circle(100)
pen.end_fill()
pen.up()
# draw eyes
pen.goto(-40, 120)
eye('white', 15)
pen.goto(-37, 125)
eye('black', 5)
pen.goto(40, 120)
eye('white', 15)
pen.goto(40, 125)
eye('black', 5)
# draw nose
pen.goto(0, 75)
eye('black', 8)
# draw mouth
pen.goto(-40, 85)
pen.down()
pen.right(90)
pen.circle(40, 180)
pen.up()
# draw tongue
pen.goto(-10, 45)
pen.down()
pen.right(180)
pen.fillcolor('red')
pen.begin_fill()
pen.circle(10, 180)
pen.end_fill()
pen.hideturtle()
| 15.672727 | 31 | 0.616009 | [
"MIT"
] | ShashankShenoy21/Matlab-and-Python | Smiley_face.py | 862 | Python |
from __future__ import annotations
from typing import NoReturn
from . import LinearRegression
from ...base import BaseEstimator
import numpy as np
# import linear_regression
class PolynomialFitting(BaseEstimator):
"""
Polynomial Fitting using Least Squares estimation
"""
def __init__(self, k: int) -> PolynomialFitting:
"""
Instantiate a polynomial fitting estimator
Parameters
----------
k : int
Degree of polynomial to fit
"""
super().__init__()
self.deg_ = k
self.vander_, self.vander_linear_ = None, LinearRegression(False)
# raise NotImplementedError()
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit Least Squares model to polynomial transformed samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
# self.vander_ = np.vander(X, self.deg_, increasing=True)
self.vander_linear_.fit(self.__transform(X), y)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
return self.vander_linear_.predict(self.__transform(X))
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under MSE loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under MSE loss function
"""
return self.vander_linear_.loss(self.__transform(X), y)
def __transform(self, X: np.ndarray) -> np.ndarray:
"""
Transform given input according to the univariate polynomial transformation
Parameters
----------
X: ndarray of shape (n_samples,)
Returns
-------
transformed: ndarray of shape (n_samples, k+1)
Vandermonde matrix of given samples up to degree k
"""
X_vander = np.vander(X, self.deg_ + 1, increasing=True)
return X_vander
| 28.451613 | 83 | 0.586546 | [
"MIT"
] | shirlevy007/IML.HUJI | IMLearn/learners/regressors/polynomial_fitting.py | 2,646 | Python |
"""BackPACK extensions/hooks for computing low-rank factors of the GGN."""
| 37.5 | 74 | 0.76 | [
"MIT"
] | PwLo3K46/vivit | vivit/extensions/secondorder/__init__.py | 75 | Python |
import math
import warnings
from functools import reduce
import numpy as np
import torch
from backpack import backpack, extend
from backpack.extensions import BatchGrad
from gym.utils import seeding
from torchvision import datasets, transforms
from dacbench import AbstractEnv
warnings.filterwarnings("ignore")
class SGDEnv(AbstractEnv):
"""
Environment to control the learning rate of adam
"""
def __init__(self, config):
"""
Initialize SGD Env
Parameters
-------
config : objdict
Environment configuration
"""
super(SGDEnv, self).__init__(config)
self.batch_size = config.training_batch_size
self.validation_batch_size = config.validation_batch_size
self.no_cuda = config.no_cuda
self.current_batch_size = config.training_batch_size
self.env_seed = config.seed
self.seed(self.env_seed)
self.use_cuda = not self.no_cuda and torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
self.training_validation_ratio = 0.8
# self.test_dataset = None
self.train_dataset = None
self.validation_dataset = None
self.train_loader = None
# self.test_loader = None
self.validation_loader = None
self.train_loader_it = None
self.validation_loader_it = None
self.train_batch_index = 0
self.epoch_index = 0
self.current_training_loss = None
self.loss_batch = None
self.model = None
self.parameter_count = 0
self.layer_sizes = []
self.loss_function = torch.nn.NLLLoss(reduction="none")
self.loss_function = extend(self.loss_function)
self.initial_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
self.current_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
# Adam parameters
self.beta1 = config.beta1
self.beta2 = config.beta2
self.m = 0
self.v = 0
self.epsilon = 1.0e-08
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.prev_descent = None
self.learning_rate = 0.001
self.predictiveChangeVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.predictiveChangeVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.discount_factor = 0.9
self.firstOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.secondOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.writer = None
if "reward_function" in config.keys():
self.get_reward = config["reward_function"]
else:
self.get_reward = self.get_default_reward
if "state_method" in config.keys():
self.get_state = config["state_method"]
else:
self.get_state = self.get_default_state
def seed(self, seed=None):
"""
Set rng seed
Parameters
----------
seed:
seed for rng
"""
_, seed = seeding.np_random(seed)
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
return [seed]
def step(self, action):
"""
Execute environment step
Parameters
----------
action : list
action to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, info
"""
done = super(SGDEnv, self).step_()
self.step_count += 1
index = 0
if not isinstance(action, float):
action = action[0]
action = torch.Tensor([action]).to(self.device)
new_lr = 10 ** (-action)
self.current_lr = new_lr
delta_w = torch.mul(
new_lr,
self.firstOrderMomentum
/ (torch.sqrt(self.secondOrderMomentum) + self.epsilon),
)
for i, p in enumerate(self.model.parameters()):
layer_size = self.layer_sizes[i]
p.data = p.data - delta_w[index: index + layer_size].reshape(
shape=p.data.shape
)
index += layer_size
self._set_zero_grad()
reward = self.get_reward(self)
return self.get_state(self), reward, done, {}
def reset(self):
"""
Reset environment
Returns
-------
np.array
Environment state
"""
super(SGDEnv, self).reset_()
dataset = self.instance[0]
instance_seed = self.instance[1]
construct_model = self.instance[2]
self.seed(instance_seed)
self.model = construct_model().to(self.device)
self.training_validation_ratio = 0.8
train_dataloader_args = {"batch_size": self.batch_size}
validation_dataloader_args = {"batch_size": self.validation_batch_size}
if self.use_cuda:
param = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_dataloader_args.update(param)
validation_dataloader_args.update(param)
if dataset == "MNIST":
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
train_dataset = datasets.MNIST(
"../data", train=True, download=True, transform=transform
)
# self.test_dataset = datasets.MNIST('../data', train=False, transform=transform)
else:
raise NotImplementedError
training_dataset_limit = math.floor(
len(train_dataset) * self.training_validation_ratio
)
validation_dataset_limit = len(train_dataset)
self.train_dataset = torch.utils.data.Subset(
train_dataset, range(0, training_dataset_limit - 1)
)
self.validation_dataset = torch.utils.data.Subset(
train_dataset, range(training_dataset_limit, validation_dataset_limit)
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset, **train_dataloader_args
)
# self.test_loader = torch.utils.data.DataLoader(self.test_dataset, **train_dataloader_args)
self.validation_loader = torch.utils.data.DataLoader(
self.validation_dataset, **validation_dataloader_args
)
self.train_batch_index = 0
self.epoch_index = 0
self.train_loader_it = iter(self.train_loader)
self.validation_loader_it = iter(self.validation_loader)
self.parameter_count = 0
self.layer_sizes = []
for p in self.model.parameters():
layer_size = reduce(lambda x, y: x * y, p.shape)
self.layer_sizes.append(layer_size)
self.parameter_count += layer_size
self.model = extend(self.model)
self._set_zero_grad()
self.model.train()
self.current_training_loss = None
self.loss_batch = None
# Adam parameters
self.m = 0
self.v = 0
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.current_lr = self.initial_lr
self.prev_descent = torch.zeros(
(self.parameter_count,), device=self.device, requires_grad=False
)
self.get_default_reward(self)
return self.get_state(self)
def set_writer(self, writer):
self.writer = writer
def close(self):
"""
No additional cleanup necessary
Returns
-------
bool
Cleanup flag
"""
return True
def render(self, mode: str = "human"):
"""
Render env in human mode
Parameters
----------
mode : str
Execution mode
"""
if mode != "human":
raise NotImplementedError
pass
def get_default_state(self, _):
"""
Gather state description
Returns
-------
dict
Environment state
"""
gradients = self._get_gradients()
self.firstOrderMomentum, self.secondOrderMomentum = self._get_momentum(
gradients
)
(
predictiveChangeVarDiscountedAverage,
predictiveChangeVarUncertainty,
) = self._get_predictive_change_features(
self.current_lr, self.firstOrderMomentum, self.secondOrderMomentum
)
lossVarDiscountedAverage, lossVarUncertainty = self._get_loss_features()
state = {
"predictiveChangeVarDiscountedAverage": predictiveChangeVarDiscountedAverage,
"predictiveChangeVarUncertainty": predictiveChangeVarUncertainty,
"lossVarDiscountedAverage": lossVarDiscountedAverage,
"lossVarUncertainty": lossVarUncertainty,
"currentLR": self.current_lr,
"trainingLoss": self.current_training_loss,
"validationLoss": self.current_validation_loss,
}
return state
def _set_zero_grad(self):
index = 0
for i, p in enumerate(self.model.parameters()):
if p.grad is None:
continue
layer_size = self.layer_sizes[i]
p.grad.zero_()
index += layer_size
def _train_batch_(self):
(data, target) = self.train_loader_it.next()
data, target = data.to(self.device), target.to(self.device)
self.current_batch_size = data.size()[0]
output = self.model(data)
loss = self.loss_function(output, target)
with backpack(BatchGrad()):
loss.mean().backward()
loss_value = loss.mean()
reward = self._get_validation_loss()
self.loss_batch = loss
self.current_training_loss = torch.unsqueeze(loss_value.detach(), dim=0)
self.train_batch_index += 1
return reward
def get_default_reward(self, _):
try:
reward = self._train_batch_()
except StopIteration:
self.train_batch_index = 0
self.epoch_index += 1
self.train_loader_it = iter(self.train_loader)
reward = self._train_batch_()
return reward
def _get_val_loss(self):
self.model.eval()
validation_loss = torch.zeros(1, device=self.device, requires_grad=False)
with torch.no_grad():
for data, target in self.validation_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
validation_loss += self.loss_function(output, target).mean()
validation_loss /= len(self.validation_loader.dataset)
self.model.train()
return validation_loss
def _get_validation_loss_(self):
self.model.eval()
(data, target) = self.validation_loader_it.next()
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
validation_loss = self.loss_function(output, target).mean()
validation_loss = torch.unsqueeze(validation_loss.detach(), dim=0)
self.current_validation_loss = validation_loss
self.model.train()
return -validation_loss.item() # negative because it is the reward
def _get_validation_loss(self):
try:
validation_loss = self._get_validation_loss_()
except StopIteration:
self.validation_loader_it = iter(self.validation_loader)
validation_loss = self._get_validation_loss_()
return validation_loss
def _get_gradients(self):
gradients = []
for p in self.model.parameters():
if p.grad is None:
continue
gradients.append(p.grad.flatten())
gradients = torch.cat(gradients, dim=0)
return gradients
def _get_momentum(self, gradients):
self.t += 1
self.m = self.beta1 * self.m + (1 - self.beta1) * gradients
self.v = self.beta2 * self.v + (1 - self.beta2) * torch.square(gradients)
bias_corrected_m = self.m / (1 - self.beta1 ** self.t)
bias_corrected_v = self.v / (1 - self.beta2 ** self.t)
return bias_corrected_m, bias_corrected_v
def _get_adam_feature(self, learning_rate, m, v):
epsilon = 1.0e-8
return torch.mul(learning_rate, m / (torch.sqrt(v) + epsilon))
def _get_loss_features(self):
with torch.no_grad():
loss_var = torch.log(torch.var(self.loss_batch))
self.lossVarDiscountedAverage = (
self.discount_factor * self.lossVarDiscountedAverage
+ (1 - self.discount_factor) * loss_var
)
self.lossVarUncertainty = (
self.discount_factor * self.lossVarUncertainty
+ (1 - self.discount_factor)
* (loss_var - self.lossVarDiscountedAverage) ** 2
)
return self.lossVarDiscountedAverage, self.lossVarUncertainty
def _get_predictive_change_features(self, lr, m, v):
batch_gradients = []
for i, (name, param) in enumerate(self.model.named_parameters()):
grad_batch = param.grad_batch.reshape(
self.current_batch_size, self.layer_sizes[i]
)
batch_gradients.append(grad_batch)
batch_gradients = torch.cat(batch_gradients, dim=1)
update_value = self._get_adam_feature(lr, m, v)
predictive_change = torch.log(
torch.var(-1 * torch.matmul(batch_gradients, update_value))
)
self.predictiveChangeVarDiscountedAverage = (
self.discount_factor * self.predictiveChangeVarDiscountedAverage
+ (1 - self.discount_factor) * predictive_change
)
self.predictiveChangeVarUncertainty = (
self.discount_factor * self.predictiveChangeVarUncertainty
+ (1 - self.discount_factor)
* (predictive_change - self.predictiveChangeVarDiscountedAverage) ** 2
)
return (
self.predictiveChangeVarDiscountedAverage,
self.predictiveChangeVarUncertainty,
)
| 31.41189 | 100 | 0.59973 | [
"Apache-2.0"
] | goktug97/DACBench | dacbench/envs/sgd.py | 14,795 | Python |
from __future__ import print_function
import sys
import os
import subprocess
from .simsalabim import __version__, __copyright__
from . import add_quant_info as quant
from . import helpers
def main(argv):
print('dinosaur-adapter version %s\n%s' % (__version__, __copyright__))
print('Issued command:', os.path.basename(__file__) + " " + " ".join(map(str, sys.argv[1:])))
args, params = parseArgs()
run_dinosaur(args.dinosaur_jar_path, args.mzml_fns, args.output_folder, args.spectrum_output_format, params)
def parseArgs():
import argparse
apars = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
requiredNamed = apars.add_argument_group('required arguments')
requiredNamed.add_argument('--dinosaur_jar_path', metavar = "JAR", required = True,
help='''Path to the Dinosaur .jar file.
''')
apars.add_argument('--mzml_fns', default=None, metavar = "M", nargs='*',
help='''mzML file(s). To easily specify multiple files one can use wildcards, e.g. my_spectrum_files/*.mzML
''')
apars.add_argument('--file_list_file', default=None, metavar = "L",
help='''Text file with paths to mzML files, one per line.
''')
apars.add_argument('--output_folder', default="./dinosaur/", metavar='O',
help='''Output folder.
''')
apars.add_argument('--dinosaur_mem', default=8.0, metavar='M', type=float,
help='''Memory for allocated for Dinosaur in GB.
''')
apars.add_argument('--dinosaur_flags', default="", metavar='O',
help='''Extra command line flags to pass to Dinosaur, as indicated in Dinosaur's help text.
''')
apars.add_argument('--spectrum_output_format', default=None, metavar='F',
help='''If you want updated spectrum files with the new MS1 features assigned to the MS2 spectra, set this to the desired output format (ms2, mgf or mzML).
''')
apars.add_argument('--split_precursors',
help='''for .mzML or .ms2 output this creates a new spectrum for each precursor, e.g.
if spectrum with scan number 132 matches two precursors, we generate two spectra
with scan numbers 13201 and 13202. This can be useful if your downstream
analysis includes tools that do not support multiple precursors per spectrum,
such as MSGF+. For MGF output this flag is always set, as it does not support
multiple precursors per spectrum.
''',
action='store_true')
# ------------------------------------------------
args = apars.parse_args()
if not args.mzml_fns:
if args.file_list_file and len(args.file_list_file) > 0:
with open(args.file_list_file, 'r') as f:
args.mzml_fns = list(filter(lambda x : len(x) > 0, map(lambda x : re.sub(r"[\n\t\s]*", "", x), f.read().splitlines())))
else:
sys.exit("No input mzML files specified. Use either --mzml_fns or --file_list_file.")
elif args.file_list_file and len(args.file_list_file) > 0:
sys.exit("Ambiguous mzML input. Use either --mzml_fns or --file_list_file, not both.")
params = dict()
params['splitPrecursors'] = args.split_precursors
params['dinosaurMemory'] = args.dinosaur_mem
params['dinosaurFlags'] = args.dinosaur_flags
return args, params
def run_dinosaur(dinosaur_jar_path, mzml_fns, output_folder, spectrum_output_format, params):
dinosaur_binary = "java -Xmx%dM -jar %s --seed=1" % (int(params['dinosaurMemory']*1000), dinosaur_jar_path)
helpers.createDir(output_folder)
for mzml_fn in mzml_fns:
baseFN = helpers.getBase(helpers.getFileName(mzml_fn))
dinosaur_output_file = os.path.join(output_folder, baseFN + ".features.tsv")
if not os.path.isfile(dinosaur_output_file):
cmd_dinosaur = "%s --force --outDir=%s %s %s;" % (dinosaur_binary, output_folder, params['dinosaurFlags'], mzml_fn)
helpers.executeCmd(cmd_dinosaur)
else:
print("Found dinosaur output file at %s, remove this file to re-run Dinosaur on this file" % (dinosaur_output_file))
output_fn = os.path.join(output_folder, baseFN + ".dummy.txt")
if spectrum_output_format:
output_fn = os.path.join(output_folder, baseFN + ".recalibrated." + spectrum_output_format)
params['specPrecMapFile'] = os.path.join(output_folder, baseFN + ".feature_map.tsv")
if not os.path.isfile(params['specPrecMapFile']):
quant.add_accurate_precursors(dinosaur_output_file, mzml_fn, output_fn, params)
if output_fn.endswith(".dummy.txt"):
os.remove(output_fn)
else:
print("Found dinosaur mapping file at %s, remove this file to re-run Dinosaur on this file" % (params['specPrecMapFile']))
if __name__ == '__main__':
main(sys.argv[1:])
| 47.5 | 176 | 0.622775 | [
"Apache-2.0"
] | MatthewThe/simsalabim | simsalabim/dinosaur_adapter.py | 5,225 | Python |
# _*_ coding: utf-8 _*_
"""
util_urlfilter.py by xianhu
"""
import re
import pybloom_live
from .util_config import CONFIG_URLPATTERN_ALL
class UrlFilter(object):
"""
class of UrlFilter, to filter url by regexs and (bloomfilter or set)
"""
def __init__(self, black_patterns=(CONFIG_URLPATTERN_ALL,), white_patterns=(r"^http",), capacity=None):
"""
constructor, use variable of BloomFilter if capacity else variable of set
"""
self._re_black_list = [re.compile(pattern, flags=re.IGNORECASE) for pattern in black_patterns] if black_patterns else []
self._re_white_list = [re.compile(pattern, flags=re.IGNORECASE) for pattern in white_patterns] if white_patterns else []
self._url_set = set() if not capacity else None
self._bloom_filter = pybloom_live.ScalableBloomFilter(capacity, error_rate=0.001) if capacity else None
return
def update(self, url_list):
"""
update this urlfilter using url_list
"""
if self._url_set is not None:
self._url_set.update(url_list)
else:
for url in url_list:
self._bloom_filter.add(url)
return
def check(self, url):
"""
check the url based on self._re_black_list and self._re_white_list
"""
# if url in black_list, return False
for re_black in self._re_black_list:
if re_black.search(url):
return False
# if url in white_list, return True
for re_white in self._re_white_list:
if re_white.search(url):
return True
return False if self._re_white_list else True
def check_and_add(self, url):
"""
check the url to make sure that the url hasn't been fetched, and add url to urlfilter
"""
result = False
if self.check(url):
if self._url_set is not None:
result = url not in self._url_set
self._url_set.add(url)
else:
result = not self._bloom_filter.add(url)
return result
| 31.716418 | 128 | 0.618824 | [
"BSD-2-Clause"
] | charlesXu86/PSpider | spider/utilities/util_urlfilter.py | 2,125 | Python |
#!/usr/bin/env python
'''command long'''
import threading
import time, os
import math
from pymavlink import mavutil
from MAVProxy.modules.lib import mp_module
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from threading import Thread
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
class CmdlongModule(mp_module.MPModule):
def __init__(self, mpstate):
super(CmdlongModule, self).__init__(mpstate, "cmdlong")
self.add_command('setspeed', self.cmd_do_change_speed, "do_change_speed")
self.add_command('setyaw', self.cmd_condition_yaw, "condition_yaw")
self.add_command('offboard', self.offboard_mode, "offboard")
self.add_command('p_mode', self.position_mode, "p_mode")
self.add_command('m_mode', self.manual_mode, "m_mode")
self.add_command('a_mode', self.altitude_mode, "a_mode")
self.add_command('takeoff2', self.cmd_takeoff_2, "takeoff2")
self.add_command('takeoff3', self.takeoff_3, "takeoff3")
self.add_command('music',self.music,"music")
self.add_command('land2', self.land_2, "land2")
self.add_command('fly', self.fly, "fly")
self.add_command('x', self.x, "x")
self.add_command('y', self.y, "y")
self.add_command('z', self.z, "z")
self.add_command('h', self.h, "h")
self.add_command('yaw', self.yaw, "yaw")
self.add_command('takeoff', self.cmd_takeoff, "takeoff")
self.add_command('velocity', self.cmd_velocity, "velocity")
self.add_command('position', self.cmd_position, "position")
self.add_command('st', self.start_position_thread, "start_position_thread")
self.add_command('attitude', self.cmd_attitude, "attitude")
self.add_command('cammsg', self.cmd_cammsg, "cammsg")
self.add_command('camctrlmsg', self.cmd_camctrlmsg, "camctrlmsg")
self.add_command('posvel', self.cmd_posvel, "posvel")
self.add_command('parachute', self.cmd_parachute, "parachute",
['<enable|disable|release>'])
self.add_command('long', self.cmd_long, "execute mavlink long command",
self.cmd_long_commands())
self.dis_max = 0
self.dis_min = 100
self.dis_diff = self.dis_max - self.dis_min
self.svo_x_max = 0
self.svo_x_min = 0
self.svo_y_max = 0
self.svo_y_min = 0
self.x_diff = self.svo_x_max - self.svo_x_min
self.y_diff = self.svo_y_max - self.svo_y_min
self.list_x = []
self.list_y = []
self.list_z = []
self.svo_x = 0
self.svo_y = 0
self.svo_z = 0
#thread_obj = Thread(target = self.show_svo_2d)
#thread_obj = Thread(target = self.show_svo)
#thread_obj.setDaemon(True)
#thread_obj.start()
def cmd_long_commands(self):
atts = dir(mavutil.mavlink)
atts = filter( lambda x : x.lower().startswith("mav_cmd"), atts)
ret = []
for att in atts:
ret.append(att)
ret.append(str(att[8:]))
return ret
def cmd_takeoff(self, args):
'''take off'''
if ( len(args) != 1):
print("Usage: takeoff ALTITUDE_IN_METERS")
return
if (len(args) == 1):
altitude = float(args[0])
print("Take Off started")
self.master.mav.command_long_send(
self.settings.target_system, # target_system
mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL, # target_component
mavutil.mavlink.MAV_CMD_NAV_TAKEOFF, # command
0, # confirmation
0, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
altitude) # param7
def cmd_parachute(self, args):
'''parachute control'''
usage = "Usage: parachute <enable|disable|release>"
if len(args) != 1:
print(usage)
return
cmds = {
'enable' : mavutil.mavlink.PARACHUTE_ENABLE,
'disable' : mavutil.mavlink.PARACHUTE_DISABLE,
'release' : mavutil.mavlink.PARACHUTE_RELEASE
}
if not args[0] in cmds:
print(usage)
return
cmd = cmds[args[0]]
self.master.mav.command_long_send(
self.settings.target_system, # target_system
0, # target_component
mavutil.mavlink.MAV_CMD_DO_PARACHUTE,
0,
cmd,
0, 0, 0, 0, 0, 0)
def cmd_camctrlmsg(self, args):
'''camctrlmsg'''
print("Sent DIGICAM_CONFIGURE CMD_LONG")
self.master.mav.command_long_send(
self.settings.target_system, # target_system
0, # target_component
mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONFIGURE, # command
0, # confirmation
10, # param1
20, # param2
30, # param3
40, # param4
50, # param5
60, # param6
70) # param7
def cmd_cammsg(self, args):
'''cammsg'''
print("Sent DIGICAM_CONTROL CMD_LONG")
self.master.mav.command_long_send(
self.settings.target_system, # target_system
0, # target_component
mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL, # command
0, # confirmation
10, # param1
20, # param2
30, # param3
40, # param4
50, # param5
60, # param6
70) # param7
def cmd_do_change_speed(self, args):
'''speed value'''
if ( len(args) != 1):
print("Usage: speed SPEED_VALUE")
return
if (len(args) == 1):
speed = float(args[0])
print("SPEED %s" % (str(speed)))
self.master.mav.command_long_send(
self.settings.target_system, # target_system
mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL, # target_component
mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED, # command
0, # confirmation
0, # param1
speed, # param2 (Speed value)
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
def cmd_condition_yaw(self, args):
'''yaw angle angular_speed angle_mode'''
if ( len(args) != 3):
print("Usage: yaw ANGLE ANGULAR_SPEED MODE:[0 absolute / 1 relative]")
return
if (len(args) == 3):
angle = float(args[0])
angular_speed = float(args[1])
angle_mode = float(args[2])
print("ANGLE %s" % (str(angle)))
self.master.mav.command_long_send(
self.settings.target_system, # target_system
mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL, # target_component
mavutil.mavlink.MAV_CMD_CONDITION_YAW, # command
0, # confirmation
angle, # param1 (angle value)
angular_speed, # param2 (angular speed value)
0, # param3
angle_mode, # param4 (mode: 0->absolute / 1->relative)
0, # param5
0, # param6
0) # param7
def cmd_velocity(self, args):
'''velocity x-ms y-ms z-ms'''
if (len(args) != 3):
print("Usage: velocity x y z (m/s)")
return
if (len(args) == 3):
x_mps = float(args[0])
y_mps = float(args[1])
z_mps = float(args[2])
print("x:%f, y:%f, z:%f" % (x_mps, y_mps, z_mps))
self.master.mav.set_position_target_local_ned_send(
0, # system time in milliseconds
1, # target system
0, # target component
8, # coordinate frame MAV_FRAME_BODY_NED
455, # type mask (vel only)
0, 0, 0, # position x,y,z
x_mps, y_mps, z_mps, # velocity x,y,z
0, 0, 0, # accel x,y,z
0, 0) # yaw, yaw rate
def mavlink_packet(self, msg):
type = msg.get_type()
if type == 'DISTANCE_SENSOR':
#print "distance find\n"
#print isinstance(msg,subclass)
#print msg.current_distance
#print msg.__class__
#self.console.set_status('distance','distance %s' % msg.current_distance)
#print msg.current_distance
if self.dis_max < msg.current_distance:
self.dis_max = msg.current_distance
if self.dis_min > msg.current_distance:
self.dis_min = msg.current_distance
self.dis_diff = self.dis_max - self.dis_min
#self.msg.current_distance =
if type == 'SVO_POSITION_RAW':
#self.svo_x = msg.position_x
#self.svo_y = msg.position_y
#self.svo_z = msg.position_z
if self.svo_x_max < msg.position_x:
self.svo_x_max = msg.position_x
if self.svo_x_min > msg.position_x:
self.svo_x_min = msg.position_x
if self.svo_y_max < msg.position_y:
self.svo_y_max = msg.position_y
if self.svo_y_min > msg.position_y:
self.svo_y_min = msg.position_y
self.x_diff = self.svo_x_max - self.svo_x_min
self.y_diff = self.svo_y_max - self.svo_y_min
#print self.dis_max
#print self.dis_min
elif type == 'LOCAL_POSITION_NED':
self.console.set_status('position_ned_x','position_x %s' % msg.x)
self.svo_x = msg.x
#print type(self.svo_x)
#self.console.set_status('position_ned_y','position_y %s' % msg.y)
self.svo_y = msg.y
#print (svo_y)
#self.console.set_status('position_ned_z','position_ned %s' % msg.z)
self.svo_z = msg.z
def show_svo_2d(self):
fig = plt.figure()
#self.ax = p3.Axes3D(fig)
self.ax = fig.add_subplot(1, 1, 1)
num = 0
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.ax.set_title('2D Test')
self.ax.set_xlim([-1, 1])
self.ax.set_ylim([-1, 1])
self.num = 0
#self.lineData = self.ax.scatter(1, 1, c = 'b', marker = '.')
self.lineData, = self.ax.plot([],[])
line_ani = animation.FuncAnimation(fig, self.update_lines_2d,self.Gen_RandLine_2d,
interval=100, blit=False)
plt.show()
def show_svo(self):
fig = plt.figure()
#self.ax = p3.Axes3D(fig)
self.ax = fig.add_subplot(1, 1, 1, projection="3d")
num = 0
self.ax.set_xlabel('X')
num = 0
self.ax.set_xlabel('X')
self.ax.set_xlim3d([-1.0, 1.0])
self.ax.set_ylabel('Y')
self.ax.set_ylim3d([-1.0, 1.0])
self.ax.set_zlabel('Z')
self.ax.set_zlim3d([-1.0, 1.0])
self.ax.set_title('3D Test')
self.num = 0
#line_ani = animation.FuncAnimation(fig, self.update_lines,self.Gen_RandLine,
# interval=10, blit=False)
self.lineData = self.ax.scatter([1], [1], [1], c = 'b', marker = '.')
line_ani = animation.FuncAnimation(fig, self.update_lines,self.Gen_RandLine,
interval=10, blit=False)
plt.show()
def data_stream(self):
pass
def Gen_RandLine_2d(self):
if len(self.list_x)<200:
self.list_x.append(self.svo_x)
self.list_y.append(self.svo_y)
self.list_z.append(self.svo_z)
else:
self.list_x.append(self.svo_x)
self.list_x = self.list_x[1:]
self.list_y.append(self.svo_y)
self.list_y = self.list_y[1:]
self.list_z.append(self.svo_z)
self.list_z = self.list_z[1:]
#for i in range(2):
#list_x = self.svo_x
#list_y = self.svo_y
self.list_x.append(float(self.svo_x))
self.list_y.append(float(self.svo_y))
#self.list_z.append(float(self.svo_z))
lineData = [self.list_x,self.list_y]
#lineData = [list_x,list_y]
#print type(list_x)
#print lineData
#time.sleep(0.02)
#self.ax.set_zlim(min(data[2]), max(data[2]))
#lineData = [self.list_x,self.list_y,self.list_z]
yield lineData
def update_lines_2d(self,data):
#print "data",data
#lineData = self.ax.scatter(data[0], data[1], data[2], c = 'b', marker = '.')
#self.lineData.set_data([(data[0], data[1])])
self.lineData.set_xdata(data[0])
self.lineData.set_ydata(data[1])
self.num = self.num + 1
#self.ax.set_xlim(min(data[0]), max(data[0]))
#self.ax.set_ylim(min(data[1]), max(data[1]))
if self.num == 100:
#self.ax.cla()
#print self.num
self.num = 0
self.ax.set_xlim(min(data[0])-1, max(data[0])+1)
self.ax.set_ylim(min(data[1])-1, max(data[1])+1)
return self.lineData,
def Gen_RandLine(self):
'''
if len(self.list_x)<70:
self.list_x.append(self.svo_x)
self.list_y.append(self.svo_y)
self.list_z.append(self.svo_z)
else:
self.list_x.append(self.svo_x)
self.list_x = self.list_x[1:]
self.list_y.append(self.svo_y)
self.list_y = self.list_y[1:]
self.list_z.append(self.svo_z)
self.list_z = self.list_z[1:]
'''
#for i in range(2):
list_x = self.svo_x
list_y = self.svo_y
list_z = self.svo_z
#self.list_x.append(float(self.svo_x))
#self.list_y.append(float(self.svo_y))
#self.list_z.append(float(self.svo_z))
#lineData = [self.list_x,self.list_y,self.list_z]
lineData = [[list_x],[list_y],[list_z]]
#print type(list_x)
#print lineData
#self.ax.set_xlim(min(data[0]), max(data[0]))
#self.ax.set_ylim(min(data[1]), max(data[1]))
#self.ax.set_zlim(min(data[2]), max(data[2]))
#lineData = [self.list_x,self.list_y,self.list_z]
yield lineData
def update_lines(self,data):
#print "data",data
#lineData = self.ax.scatter(data[0], data[1], data[2], c = 'b', marker = '.')
self.lineData.set_offsets([(data[0], data[1])])
#self.lineData.set_data([data[0], data[1]])
self.lineData.set_3d_properties([data[2]], "z")
self.num = self.num + 1
if self.num == 200:
#self.ax.cla()
#print self.num
self.num = 0
self.ax.set_xlabel('X')
#self.ax.set_xlim3d([-1.0, 1.0])
self.ax.set_ylabel('Y')
#self.ax.set_ylim3d([-1.0, 1.0])
self.ax.set_zlabel('Z')
#self.ax.set_zlim3d([-1.0, 1.0])
self.ax.set_title('3D Test')
print "xdiff",self.x_diff
print "ydiff",self.y_diff
#lineData = ax.scatter(data[0], data[1], data[2], c = 'b', marker = '.')
#plt.pause(0.01)
#ax = p3.Axes3D(fig)
return self.lineData
def position_mode(self,args):
print "position mode!!!!!!!!!!!!!!!!!"
self.list_x = []
self.list_y = []
self.list_z = []
#self.start_position_thread(1)
time.sleep(0.5)
#self.master.set_mode(221,6,0)
self.master.set_mode(129,3,0)
self.dis_max = 0
self.dis_min = 100
self.svo_x_max = 0
self.svo_x_min = 0
self.svo_y_max = 0
self.svo_y_min = 0
self.x_diff = self.svo_x_max - self.svo_x_min
self.y_diff = self.svo_y_max - self.svo_y_min
def manual_mode(self,args):
print "manual mode!!!!!!!!!!!!!!!!!"
print self.master.__class__
#self.start_position_thread(1)
#time.sleep(0.5)
#self.master.set_mode(221,6,0)
self.master.set_mode(129,1,0)
self.v_z = float(args[0])
def altitude_mode(self,args):
print "altitude mode!!!!!!!!!!!!!!!!!"
#self.start_position_thread(1)
#time.sleep(0.5)
#self.master.set_mode(221,6,0)
self.master.set_mode(129,2,0)
#self.v_z = float(370)
#self.dis_max = 0
#self.dis_min = 100
def offboard_mode(self,args):
print "offboard!!!!!!!!!!!!!!!!!"
#self.cmd_position_2(1)
self.start_offboard_thread(1)
time.sleep(0.5)
self.master.set_mode(221,6,0)
#self.master.set_mode(1,3,0)
def cmd_takeoff_2(self, args):
'''position z-m'''
if (len(args) != 1):
print("Usage: position z (meters)")
return
if (len(args) == 1):
# x_m = float(0)
# y_m = float(0)
z_m = float(args[0])
# print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))
self.master.mav.set_position_target_local_ned_send(
0, # system time in milliseconds
1, # target system
0, # target component
8, # coordinate frame MAV_FRAME_BODY_NED
5571, # type mask (pos only)
0, 0, z_m, # position x,y,z
0, 0, 0, # velocity x,y,z
0, 0, 0, # accel x,y,z
0, 0) # yaw, yaw rate
def takeoff_3(self,args):
self.type_mask = 5571
#self.type_mask = 3576
self.x_m = float(0)
self.y_m = float(0)
self.z_m = float(1.5)
self.v_x = float(0)
self.v_y = float(0)
self.v_z = float(0)
#self.cmd_position([1,1,1])
def music(self,args):
self.master.mav.command_long_send(
self.settings.target_system, # target_system
1, # target_component
0, # command
1, # confirmation
0, # param1
0, # param2 (Speed value)
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
print self.settings.target_system
print mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL
def land_2(self,args):
self.type_mask = 9671
self.v_x = float(0)
self.v_y = float(0)
self.v_z = float(0)
#def h(self,args):
# self.type_mask = 1479
# self.v_x = float(0)
# self.v_y = float(0)
# self.v_z = float(0)
def x(self,args):
#print self.master.flightmode
self.type_mask = 1479
if self.master.flightmode == "POSCTL":
#print self.master
self.v_x = float(args[0])*0.5
elif self.master.flightmode == "ALTCTL":
#print self.master
self.v_x = float(args[0])*1
elif self.master.flightmode == "MANUAL":
#print self.master
self.v_x = float(args[0])*1
#self.v_z = -4
self.button = 1
def y(self,args):
self.type_mask = 1479
if self.master.flightmode == "POSCTL":
self.v_y = float(args[0])*0.5
elif self.master.flightmode == "ALTCTL":
self.v_y = float(args[0])*1
elif self.master.flightmode == "MANUAL":
self.v_y = float(args[0])*1
#self.v_z = -4
self.button = 1
def z(self,args):
self.type_mask = 1479
#self.v_z = float(args[0])
if self.master.flightmode == "POSCTL":
self.v_z = self.v_z + int(args[0])
elif self.master.flightmode == "ALTCTL":
self.v_z = self.v_z + int(args[0])
elif self.master.flightmode == "MANUAL":
self.v_z = self.v_z + int(args[0])*0.1
self.button = 1
def yaw(self,args):
self.type_mask = 1479
#self.yaw_rate = float(float(args[0])*(math.pi/6.0))
self.yaw_rate = float(args[0])*1.5
self.button = 1
#time.sleep(0.5)
#self.yaw_rate = float(0)
def h(self,args):
self.type_mask = 1479
self.v_x = float(0)
self.v_y = float(0)
if self.master.flightmode == "POSCTL":
self.v_z = float(args[0])
elif self.master.flightmode == "ALTCTL":
self.v_z = float(args[0])
elif self.master.flightmode == "MANUAL":
pass
self.yaw_rate = float(0)
self.button = 0
def fly(self,args):
self.type_mask = 1479
self.v_x = float(1)
time.sleep(2)
self.v_x = float(0)
self.v_y = float(1)
time.sleep(2)
self.v_y = float(0)
self.v_x = float(-1)
time.sleep(2)
self.v_x = float(0)
self.v_y = float(-1)
time.sleep(2)
self.v_y = float(0)
def start_position_thread(self,args):
thread_obj = threading.Thread(target=self._cmd_position_2)
thread_obj.setDaemon(True)
thread_obj.start()
#pass
def start_offboard_thread(self,args):
thread_obj = threading.Thread(target=self._cmd_position_2_offboard)
thread_obj.start()
def _cmd_position_2_offboard(self):
'''position x-m y-m z-m'''
#if (len(args) != 3):
# print("Usage: position x y z (meters)")
# return
#if (len(args) == 3):
self.type_mask = 17863
self.x_m = float(0)
self.y_m = float(0)
self.z_m = float(0)
self.v_x = float(0)
self.v_y = float(0)
self.v_z = float(0)
self.yaw_rate = float(0)
#print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))
while 1:
time.sleep(0.05)
#print "type_mask:%s\n" % self.type_mask
#print "v_x:%s\n" % self.v_x
#print "v_y:%s\n" % self.v_y
#print "v_z:%s\n" % self.v_z
#print "z_m:%s\n" % self.z_m
#print "send idle"
self.master.mav.set_position_target_local_ned_send(
0, # system time in milliseconds
1, # target system
0, # target component
8, # coordinate frame MAV_FRAME_BODY_NED
self.type_mask, # type mask (pos only) 42707
self.x_m, self.y_m, self.z_m, # position x,y,z
self.v_x, self.v_y, self.v_z, # velocity x,y,z
0, 0, 0, # accel x,y,z
0, self.yaw_rate) # yaw, yaw rate
def _cmd_position_2(self):
print "position2"
'''position x-m y-m z-m'''
#if (len(args) != 3):
# print("Usage: position x y z (meters)")
# return
#if (len(args) == 3):
#self.type_mask = 17863
#self.x_m = float(0)
#self.y_m = float(0)
#self.z_m = float(0)
self.v_x = 0
self.v_y = 0
self.v_z = 0
self.yaw_rate = 0
self.button = 0
#print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))
i = 0
while 1:
time.sleep(0.05)
#print "type_mask:%s\n" % self.type_mask
#print "v_x:%s\n" % self.v_x
#print "v_y:%s\n" % self.v_y
#print "v_z:%s\n" % self.v_z
#print "z_m:%s\n" % self.z_m
#print "send idle"
self.master.mav.manual_control_send(self.master.target_system,
self.v_x, self.v_y,
self.v_z, self.yaw_rate,
self.button)
i = i + 1
if 0:
#if i == 100:
print "x",(int(self.v_x))
print "y",(int(self.v_y))
print "z",(int(self.v_z))
print "yaw",(int(self.yaw_rate))
print "dis_diff",(self.dis_diff)
print "x_diff",(self.x_diff)
print "y_diff",(self.y_diff)
print "button",self.button
print "target",(self.master.target_system)
i = 0
def cmd_position3(self, args):
'''position x-m y-m z-m'''
if (len(args) != 3):
print("Usage: position x y z (meters)")
return
if (len(args) == 3):
x_m = float(args[0])
y_m = float(args[1])
z_m = float(args[2])
print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))
self.master.mav.set_position_target_local_ned_send(
0, # system time in milliseconds
1, # target system
0, # target component
8, # coordinate frame MAV_FRAME_BODY_NED
1479, # type mask (pos only)
0, 0, 0,# position x,y,z
x_m, y_m, z_m, # velocity x,y,z
0, 0, 0, # accel x,y,z
0, 0) # yaw, yaw rate
def cmd_position(self, args):
'''position x-m y-m z-m'''
if (len(args) != 3):
print("Usage: position x y z (meters)")
return
if (len(args) == 3):
x_m = float(args[0])
y_m = float(args[1])
z_m = float(args[2])
print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))
self.master.mav.set_position_target_local_ned_send(
0, # system time in milliseconds
1, # target system
0, # target component
8, # coordinate frame MAV_FRAME_BODY_NED
3576, # type mask (pos only)
x_m, y_m, z_m, # position x,y,z
0, 0, 0, # velocity x,y,z
0, 0, 0, # accel x,y,z
0, 0) # yaw, yaw rate
def cmd_attitude(self, args):
'''attitude q0 q1 q2 q3 thrust'''
if len(args) != 5:
print("Usage: attitude q0 q1 q2 q3 thrust (0~1)")
return
if len(args) == 5:
q0 = float(args[0])
q1 = float(args[1])
q2 = float(args[2])
q3 = float(args[3])
thrust = float(args[4])
att_target = [q0, q1, q2, q3]
print("q0:%.3f, q1:%.3f, q2:%.3f q3:%.3f thrust:%.2f" % (q0, q1, q2, q3, thrust))
self.master.mav.set_attitude_target_send(
0, # system time in milliseconds
1, # target system
0, # target component
63, # type mask (ignore all except attitude + thrust)
att_target, # quaternion attitude
0, # body roll rate
0, # body pich rate
0, # body yaw rate
thrust) # thrust
def cmd_posvel(self, args):
'''posvel mapclick vN vE vD'''
ignoremask = 511
latlon = None
try:
latlon = self.module('map').click_position
except Exception:
pass
if latlon is None:
print "set latlon to zeros"
latlon = [0, 0]
else:
ignoremask = ignoremask & 504
print "found latlon", ignoremask
vN = 0
vE = 0
vD = 0
if (len(args) == 3):
vN = float(args[0])
vE = float(args[1])
vD = float(args[2])
ignoremask = ignoremask & 455
print "ignoremask",ignoremask
print latlon
self.master.mav.set_position_target_global_int_send(
0, # system time in ms
1, # target system
0, # target component
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT,
ignoremask, # ignore
int(latlon[0] * 1e7),
int(latlon[1] * 1e7),
10,
vN, vE, vD, # velocity
0, 0, 0, # accel x,y,z
0, 0) # yaw, yaw rate
def cmd_long(self, args):
'''execute supplied command long'''
if len(args) < 1:
print("Usage: long <command> [arg1] [arg2]...")
return
command = None
if args[0].isdigit():
command = int(args[0])
else:
try:
command = eval("mavutil.mavlink." + args[0])
except AttributeError as e:
try:
command = eval("mavutil.mavlink.MAV_CMD_" + args[0])
except AttributeError as e:
pass
if command is None:
print("Unknown command long ({0})".format(args[0]))
return
floating_args = [ float(x) for x in args[1:] ]
while len(floating_args) < 7:
floating_args.append(float(0))
self.master.mav.command_long_send(self.settings.target_system,
self.settings.target_component,
command,
0,
*floating_args)
def init(mpstate):
'''initialise module'''
return CmdlongModule(mpstate)
| 36.367371 | 93 | 0.487333 | [
"CC0-1.0"
] | joakimzhang/python-electron | pycalc/MAVProxy/modules/mavproxy_cmdlong.py | 30,985 | Python |
""" This module is intended to extend functionality of the code provided by original authors.
The process is as follows:
1. User has to provide source root path containing (possibly nested) folders with dicom files
2. The program will recreate the structure in the destination root path and anonymize all
dicom files.
"""
import argparse
import json
import logging
import logging.config
import random
from pathlib import Path
from typing import Optional
import pydicom
from dicomanonymizer.anonym_state import AnonState
from dicomanonymizer.dicom_utils import fix_exposure
from dicomanonymizer.simpledicomanonymizer import (
anonymize_dicom_file,
initialize_actions,
)
from dicomanonymizer.utils import (
LOGS_PATH,
PROJ_ROOT,
ActionsDict,
Path_Str,
get_dirs,
to_Path,
try_valid_dir,
)
# setup logging (create dirs, if it is first time)
LOGS_PATH.mkdir(parents=True, exist_ok=True)
logging.config.fileConfig(
PROJ_ROOT / "dicomanonymizer/config/logging.ini",
defaults={"logfilename": (LOGS_PATH / "file.log").as_posix()},
disable_existing_loggers=False,
)
logger = logging.getLogger(__name__)
_STATE_PATH = Path.home() / ".dicomanonymizer/cache"
_STATE_PATH.mkdir(parents=True, exist_ok=True)
def get_extra_rules(
use_extra: bool,
extra_json_path: Path_Str,
) -> Optional[ActionsDict]:
"""Helper to provide custom (project level/user level) anonymization
rules as a mapping of tags -> action function.
Args:
use_extra (bool): If use extra rules.
extra_json_path (Path_Str): Path to extra rules json file.
It should be flat json with action as a key and list of tags as value.
Returns:
Optional[ActionsDict]: extra rules mapping (tags -> action function)
"""
# Define the actions dict for additional tags (customization)
extra_rules = None
if use_extra:
# default or user provided path to extra rules json file
with open(extra_json_path, "r") as fout:
extra_rules = json.load(fout)
for key in extra_rules:
tag_list = extra_rules[key]
tag_list = [tuple(elem) for elem in tag_list]
extra_rules[key] = tag_list
extra_rules = initialize_actions(extra_rules)
return extra_rules
def anonymize_dicom_folder(
in_path: Path_Str, out_path: Path_Str, debug: bool = False, **kwargs
):
"""Anonymize dicom files in `in_path`, if `in_path` doesn't
contain dicom files, will do nothing. Debug == True will do
sort of dry run to check if all good for the large data storages
Args:
in_path (Path_Str): path to the folder containing dicom files
out_path (Path_Str): path to the folder there anonymized copies
will be saved
debuf (bool): if true, will do a "dry" run
"""
# check and prepare
in_path = to_Path(in_path)
try_valid_dir(in_path)
out_path = to_Path(out_path)
out_path.mkdir(parents=True, exist_ok=True)
logger.info(f"Processing: {in_path}")
# work itself
in_files = [p for p in in_path.iterdir() if p.is_file()]
if not in_files:
logger.info(f"Folder {in_path} doesn't have dicom files, skip.")
return
if debug:
# anonymize just one file
f_in = random.choice(in_files)
f_out = out_path / f_in.name
try:
anonymize_dicom_file(f_in, f_out)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
else:
for f_in in in_files:
f_out = out_path / f_in.name
try:
anonymize_dicom_file(f_in, f_out, **kwargs)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
def anonymize_root_folder(
in_root: Path_Str,
out_root: Path_Str,
**kwargs,
):
"""The fuction will get all nested folders from `in_root`
and perform anonymization of all folders containg dicom-files
Will recreate the `in_root` folders structure in the `out_root`
Args:
in_root (Path_Str): source root folder (presumably has
some dicom-files inide, maybe nested)
out_root (Path_Str): destination root folder, will create
if not exists
"""
in_root = to_Path(in_root)
try_valid_dir(in_root)
out_root = to_Path(out_root)
out_root.mkdir(parents=True, exist_ok=True)
in_dirs = get_dirs(in_root)
state = AnonState(_STATE_PATH)
state.init_state()
state.load_state()
def get_tags_callback(dataset: pydicom.Dataset):
state.tag_counter.update(dataset.dir())
logger.info(
"Processed paths will be added to the cache, if cache exist and has some paths included, they will be skipped"
)
logger.info(
f"if, you need to process data again delete files {_STATE_PATH}, please"
)
# will try to process all folders, if exception will dump state before raising
try:
for in_d in in_dirs:
rel_path = in_d.relative_to(in_root)
if str(rel_path) in state.visited_folders:
logger.info(f"{in_d} path is in cache, skipping")
continue
else:
out_d = out_root / rel_path
anonymize_dicom_folder(
in_d, out_d, ds_callback=get_tags_callback, **kwargs
)
# update state
state.visited_folders[str(rel_path)] = True
except Exception as e:
raise e
finally:
# before saving updated state let's flag tags not seen previously
prev_state = AnonState(_STATE_PATH)
prev_state.init_state()
prev_state.load_state()
new_tags = set(state.tag_counter.keys()).difference(
prev_state.tag_counter.keys()
)
if new_tags:
logger.warning(
f"During the anonymization new tags: {new_tags} were present"
)
else:
logger.info("No new tags werer present")
# now we can save the current state
state.save_state()
# Add CLI args
parser = argparse.ArgumentParser(description="Batch dicom-anonymization CLI")
parser.add_argument(
"--type",
type=str,
choices=["batch", "folder"],
default="batch",
help="Process only one folder - folder or all nested folders - batch, default = batch",
)
parser.add_argument(
"--extra-rules",
default="",
help="Path to json file defining extra rules for additional tags. Defalult in project.",
)
parser.add_argument(
"--no-extra",
action="store_true",
help="Only use a rules from DICOM-standard basic de-id profile",
)
parser.add_argument(
"--debug", action="store_true", help="Will do a dry run (one file per folder)"
)
parser.add_argument(
"src",
type=str,
help="Absolute path to the folder containing dicom-files or nested folders with dicom-files",
)
parser.add_argument(
"dst",
type=str,
help="Absolute path to the folder where to save anonymized copy of src",
)
def main():
# parse args
args = parser.parse_args()
in_path = Path(args.src)
out_path = Path(args.dst)
debug = args.debug
path = args.extra_rules
if not path:
path = PROJ_ROOT / "dicomanonymizer/resources/extra_rules.json"
extra_rules = get_extra_rules(use_extra=not args.no_extra, extra_json_path=path)
# fix known issue with dicom
fix_exposure()
msg = f"""
Start a job: {args.type}, debug set to {args.debug}
Will anonymize data at: {in_path} and save to {out_path}
"""
logger.info(msg)
# anonymize
if args.type == "batch":
anonymize_root_folder(
in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules
)
elif args.type == "folder":
anonymize_dicom_folder(
in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules
)
logger.info("Well done!")
if __name__ == "__main__":
main()
| 31.026923 | 118 | 0.655882 | [
"BSD-3-Clause"
] | ademyanchuk/dicom-anonymizer | dicomanonymizer/batch_anonymizer.py | 8,067 | Python |
import pickle
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('Train.csv')
# check for categorical attributes
cat_col = []
for x in df.dtypes.index:
if df.dtypes[x] == 'object':
cat_col.append(x)
cat_col.remove('Item_Identifier')
cat_col.remove('Outlet_Identifier')
item_weight_mean = df.pivot_table(values = "Item_Weight", index = 'Item_Identifier')
miss_bool = df['Item_Weight'].isnull()
for i, item in enumerate(df['Item_Identifier']):
if miss_bool[i]:
if item in item_weight_mean:
df['Item_Weight'][i] = item_weight_mean.loc[item]['Item_Weight']
else:
df['Item_Weight'][i] = np.mean(df['Item_Weight'])
outlet_size_mode = df.pivot_table(values='Outlet_Size', columns='Outlet_Type', aggfunc=(lambda x: x.mode()[0]))
miss_bool = df['Outlet_Size'].isnull()
df.loc[miss_bool, 'Outlet_Size'] = df.loc[miss_bool, 'Outlet_Type'].apply(lambda x: outlet_size_mode[x])
# replace zeros with mean
df.loc[:, 'Item_Visibility'].replace([0], [df['Item_Visibility'].mean()], inplace=True)
# combine item fat content
df['Item_Fat_Content'] = df['Item_Fat_Content'].replace({'LF':'Low Fat', 'reg':'Regular', 'low fat':'Low Fat'})
df['Item_Fat_Content'].value_counts()
#Creation of New Attributes
df['New_Item_Type'] = df['Item_Identifier'].apply(lambda x: x[:2])
df['New_Item_Type'] = df['New_Item_Type'].map({'FD':'Food', 'NC':'Non-Consumable', 'DR':'Drinks'})
df.loc[df['New_Item_Type']=='Non-Consumable', 'Item_Fat_Content'] = 'Non-Edible'
# create small values for establishment year
df['Outlet_Years'] = 2013 - df['Outlet_Establishment_Year']
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['Outlet'] = le.fit_transform(df['Outlet_Identifier'])
cat_col = ['Item_Fat_Content', 'Item_Type', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type', 'New_Item_Type']
for col in cat_col:
df[col] = le.fit_transform(df[col])
#Input Split
X = df.drop(columns=['Outlet_Establishment_Year', 'Item_Identifier', 'Outlet_Identifier', 'Item_Outlet_Sales'])
Y = df['Item_Outlet_Sales']
#Model Training
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
def train(model, X, Y):
# train the model
model.fit(X, Y)
# predict the training set
pred = model.predict(X)
# perform cross-validation
cv_score = cross_val_score(model, X, Y, scoring='neg_mean_squared_error', cv=5)
cv_score = np.abs(np.mean(cv_score))
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
train(model, X, Y)
coef = pd.Series(model.feature_importances_, X.columns).sort_values(ascending=False)
file = open('model.pkl','wb')
#dump information to that file
pickle.dump(model, file) | 38.302632 | 115 | 0.707317 | [
"MIT"
] | MayurJ-mike/Bigmart-analysis-ml | bigmart.py | 2,911 | Python |
"""State-Split Transformation
-----------------------------
(C) Frank-Rene Schaefer
The 'State-Split' is a procedure transforms a state machine that triggers on
some 'pure' values (e.g. Unicode Characters) into a state machine that triggers
on the code unit sequences (e.g. UTF8 Code Units) that correspond to the
original values. For example, a state transition on a Unicode Character
'0x1329D' as shown below,
[ A ]--->( 0x1329D )---->[ B ]
is translated into a sequence of UTF16 transitions with a new intermediate
state 'i' as follows.
[ A ]--( 0xD80C )-->[ i ]-->( 0xDE9E )-->[ B ]
This is so, since the character 0x1329D in Unicode is represented as the
sequence 0xD80C, 0xDE9E. The present algorithm exploits the fact that
translations of adjacent character result in sequences of adjacent intervals.
.----------------------------------------------------------------------------.
| This procedure is to be used for encodings of dynamic size, i.e. where the |
| number of code units to represent a 'pure' value changes depending on the |
| value itself (e.g. UTF8, UTF16). |
'----------------------------------------------------------------------------'
PRINCIPLE:
A state transition is described by a 'trigger set' and a target state. If an
input occurs that belongs to the 'trigger set' the state machine transits into
the specific target state. Trigger sets are composed of one ore more intervals
of adjacent values. If the encoding has some type of continuity, it can be
assumed that an interval in the pure values can be represented by a sequence of
intervals in the transformed state machine. This is, indeed true for the
encodings UTF8 and UTF16.
The algorithm below considers intervals of pure values and translates them
into interval sequences. All interval sequences of a triggger set that
triggers to a target state are then combined into a set of state transitions.
A unicode transition from state A to state B:
[ A ]-->(x0, x1)-->[ B ]
is translated into a chain of utf8-byte sequence transitions that might look
like this
[ A ]-->(b0)-->[ 1 ]-->(c0,c1)-->[ B ]
\ /
`->(d1)-->[ 2 ]---(e0,e1)---'
That means that intermediate states may be introduced to reflect the different
byte sequences that represent the original interval.
IDEAS:
In a simple approach one would translate each element of a interval into an
utf8-byte sequence and generate state transitions between A and B. Such an
approach, however, produces a huge computational overhead and charges the later
Hopcroft Minimization with a huge state machine.
To avoid such an hughe computational effort, the Hopcroft Minimzation can be
prepared on the basis of transition intervals.
(A) Backwards: In somewhat greater intervals, the following might occur:
.-->(d1)-->[ 1 ]---(A3,BF)---.
/ \
/ ,->(d1)-->[ 2 ]---(80,BF)--. \
/ / \ \
[ A ]-->(b0)-->[ 3 ]-->(80,BF)-->[ B ]
\ /
`->(d1)-->[ 4 ]---(80,81)---'
That means, that for states 2 and 3 the last transition is on [80, BF]
to state B. Thus, the intermediate states 2 and 3 are equivalent. Both
can be replaced by a single state.
(B) Forwards: The first couple of bytes in the correspondent utf8 sequences
might be the same. Then, no branch is required until the first differing
byte.
PROCESS:
(1) The original interval translated into a list of interval sequence
that represent the values in the target encoding.
(2) The interval sequences are plugged in between the state A and B
of the state machine.
"""
from quex.engine.state_machine.state.core import DFA_State
import quex.engine.state_machine.transformation.base as base
import quex.engine.state_machine.index as state_machine_index
from quex.engine.misc.interval_handling import NumberSet
from quex.engine.misc.tools import flatten_list_of_lists
from collections import defaultdict
class EncodingTrafoBySplit(base.EncodingTrafo):
"""Transformation that takes a lexatom and produces a lexatom sequence.
"""
def __init__(self, Name, ErrorRangeByCodeUnitDb):
base.EncodingTrafo.__init__(self, Name,
NumberSet.from_range(0, 0x110000),
ErrorRangeByCodeUnitDb)
def do_transition(self, from_target_map, FromSi, ToSi, BadLexatomSi):
"""Translates to transition 'FromSi' --> 'ToSi' inside the state
machine according to the specific coding (see derived class, i.e.
UTF8 or UTF16).
'BadLexatomSi' is None => no bad lexatom detection.
else, transitions to 'bad lexatom state' are added
on invalid code units.
RETURNS: [0] True if complete, False else.
[1] StateDb of newly generated states.
"""
number_set = from_target_map[ToSi]
# Check whether a modification is necessary
if number_set.least_greater_bound() <= self.UnchangedRange:
# 'UnchangedRange' => No change to numerical values.
return True, None
if not self.cut_forbidden_range(number_set):
# 'number_set' solely contains forbidden elements.
del from_target_map[ToSi]
return False, None
transformed_interval_sequence_list = flatten_list_of_lists(
self.get_interval_sequences(interval)
for interval in number_set.get_intervals(PromiseToTreatWellF=True)
)
# Second, enter the new transitions.
new_target_map, \
new_state_db = self.plug_interval_sequences(FromSi, ToSi,
transformed_interval_sequence_list,
BadLexatomSi)
# Absorb new transitions into the target map of the 'from state'.
del from_target_map[ToSi]
from_target_map.update(new_target_map)
return True, new_state_db
def _do_single(self, Code):
number_set = NumberSet.from_range(Code, Code+1)
if number_set.is_empty():
return -1
interval_list = number_set.get_intervals(PromiseToTreatWellF=True)
assert len(interval_list) == 1
interval_sequence_list = self.get_interval_sequences(interval_list[0])
# A single code element can only produce a single interval sequence!
assert len(interval_sequence_list) == 1
assert all(x.size() == 1 for x in interval_sequence_list[0])
return [x.begin for x in interval_sequence_list[0]]
def variable_character_sizes_f(self):
return True
def lexatom_n_per_character_in_state_machine(self, SM):
lexatom_n = None
for state in SM.states.itervalues():
for number_set in state.target_map.get_map().itervalues():
candidate_lexatom_n = self.lexatom_n_per_character(number_set)
if candidate_lexatom_n is None: return None
elif lexatom_n is None: lexatom_n = candidate_lexatom_n
elif lexatom_n != candidate_lexatom_n: return None
return lexatom_n
def hopcroft_minimization_always_makes_sense(self):
return True
def plug_interval_sequences(self, FromSi, ToSi, IntervalSequenceList,
BadLexatomSi):
"""Transform the list of interval sequences into intermediate state
transitions.
'BadLexatomSi' is None => no bad lexatom detection.
else, transitions to 'bad lexatom state' are added
on invalid code units.
RETURN: [0] Target map update for the first state.
[1] State Db update for intermediate states.
"""
def simplify(tm_db, tm_end_inv, ToSi):
"""Those states which trigger on the same intervals to 'ToSi' are
equivalent, i.e. can replaced by one state.
"""
# Find the states that trigger on the same interval list to the
# terminal 'ToSi'.
equivalence_db = {}
replacement_db = {}
for from_si, interval_list in tm_end_inv.iteritems():
key = tuple(sorted(interval_list))
equivalent_si = equivalence_db.get(key)
if equivalent_si is None: equivalence_db[key] = from_si
else: replacement_db[from_si] = equivalent_si
# Replace target states which are equivalent
result = {}
for from_si, tm in tm_db.iteritems():
new_tm = defaultdict(NumberSet)
for target_si, interval in tm.iteritems():
replacement_si = replacement_db.get(target_si)
if replacement_si is not None: target_si = replacement_si
new_tm[target_si].quick_append_interval(interval)
if any(number_set.is_empty() for si, number_set in new_tm.items()):
for si, number_set in new_tm.iteritems():
print "#sim", si, number_set
if from_si in tm_end_inv:
for interval in tm_end_inv[from_si]:
new_tm[ToSi].quick_append_interval(interval)
result[from_si] = new_tm
return result
tm_db, \
tm_end_inv, \
position_db = _get_intermediate_transition_maps(FromSi, ToSi,
IntervalSequenceList)
result_tm_db = simplify(tm_db, tm_end_inv, ToSi)
if BadLexatomSi is not None:
for si, position in position_db.iteritems():
# The 'positon 0' is done by 'do_state_machine'. It is concerned
# with the first state's transition.
assert position != 0
self._add_transition_to_bad_lexatom_detector(result_tm_db[si],
BadLexatomSi,
position)
for tm in result_tm_db.itervalues():
assert not any(number_set.is_empty() for number_set in tm.itervalues())
# Generate the target map to be inserted into state 'FromSi'.
# Generate list of intermediate states that implement the sequence
# of intervals.
first_tm = result_tm_db.pop(FromSi)
new_state_db = dict(
(si, DFA_State.from_TargetMap(tm)) for si, tm in result_tm_db.iteritems()
)
return first_tm, new_state_db
def __bunch_iterable(IntervalSequenceList, Index):
"""Iterate over sub-bunches of sequence in 'IntervalSequenceList' which are
the same at the given 'Position'. The 'IntervalSequenceList' must be sorted!
That is, same intervals must be adjacent.
EXAMPLE:
Index = 1
IntervalSequenceList = [
[ interval01, interval12, interval21, ],
[ interval01, interval12, interval21, ],
[ interval02, interval12, interval22, interval30 ],
[ interval02, interval13, interval22, interval30 ],
[ interval02, interval13, interval23, ] ]
That is, the interval sequences are grouped according to groups where the
second interval (Index=1) is equal, the yields are as follows:
(1) [ [ interval01, interval12, interval21, ],
[ interval01, interval12, interval21, ] ]
(2) [ [ interval02, interval12, interval22, interval30 ] ]
(3) [ [ interval02, interval13, interval22, interval30 ],
[ interval02, interval13, interval23, ] ]
NOTE: Two sequences of different lengths are *never* grouped together
-- by purpose.
The index is provided in order to avoid the creation of shorted sub-
sequences. Instead, the caller focusses on sub-sequences behind 'Index'.
Obviously, this function only makes sense if the intervals before 'Index'
are all the same.
YIELDS: [0] Interval which is the same for group of sequenes at 'Index'.
[1] Group of sequences.
[2] 'LastF' -- telling whether the interval is the last in the
sequence.
"""
prev_interval = None
prev_i = -1
prev_last_f = False
for i, sequence in enumerate(IntervalSequenceList):
interval = sequence[Index]
if interval.is_empty(): print "#bu:", interval; assert False
L = len(sequence)
last_f = L == Index + 1
if interval != prev_interval or last_f != prev_last_f:
if prev_i != -1:
yield prev_interval, IntervalSequenceList[prev_i:i], prev_last_f
prev_i = i
prev_interval = interval
prev_last_f = last_f
yield prev_interval, IntervalSequenceList[prev_i:], prev_last_f
def _get_intermediate_transition_maps(FromSi, ToSi, interval_sequence_list):
"""Several transitions are to be inserted in between state 'FromSi' and
'ToSi'. The transitions result from the list of sequences in
'interval_sequence_list'. This function develops the transition maps
of the states involved. Also, it notifies about the 'position' of each
state in the code unit sequence. Thus, the caller may insert error-detectors
on invalid code units.
FORBIDDEN: There cannot be a sequence that starts with the exact intervals
as a shorter sequences. Example:
[ (0, 1), (0, 2), (0, 3) ] #
[ (0, 1), (0, 2) ] # Bad, very bad!
This would mean that after (0, 1), (0, 2) the 'ToSi' is reached, but then
after (0, 3) again. The result is an *iteration* on 'ToSi'
--(0, 1)-->( A )--(0, 2)-->( ToSi )---->
| |
'-<-(0, 3)--'
Consequently, such a list of interval sequences cannot represent a linear
transition.
RETURNS: [0] Transition Map DB: state_index --> 'TransitionMap'
with TransitionMap: target_state_index --> Interval
That is 'TransitionMap[target_state_index]' tells through which
intervals the 'state_index' triggers to 'target_states'
The 'Transition Map DB' does not contain transitions to the
'ToSi'--the end state.
[1] Inverse End Transition Map:
Transitions to the end state are stored inversely:
from_state_index --> list of Interval-s
The end state can be reached by more than one interval, so a
list of Interval-s is associated with the transition
'from_state_index' to 'ToSi'.
[1] PositionDB: state_index --> position in code unit sequence.
"""
# Sort the list of sequences, so that adjacent intervals are listed one
# after the other. This is necessary for '__bunch_iterable()' to function.
interval_sequence_list.sort()
worklist = [
# The state at 'BeginStateIndex' is concerned with the intervals
# at position '0' in the 'interval_sequence_list'. The list needs to
# be grouped according to the first interval, and for each distinct
# interval a transition to another state must be generated.
(FromSi, interval_sequence_list, 0)
]
tm_db = defaultdict(dict)
tm_end_inv = defaultdict(list)
position_db = {}
while worklist:
si, sequence_group, index = worklist.pop()
# -- State 'si' triggers on intervals at 'index' in 'sequence_group'.
tm = tm_db[si]
# -- State 'si' comes at position 'index' in a sequence of code units.
# (position of 'FromSi' shall not appear in the 'position_db' since
# the error detection of the first state is done in the caller.)
if si != FromSi: position_db[si] = index
# Group the sequences according to the interval at position 'index'.
for interval, sub_group, last_f in __bunch_iterable(sequence_group, index):
# Transit to new state for the given sub-group of sequences.
if not last_f:
# For each 'interval' a deliberate target state is generated.
# => each target state is only reached by a single Interval.
new_si = state_machine_index.get()
tm[new_si] = interval
worklist.append((new_si, sub_group, index+1))
else:
# If the 'interval' is the last in the sequence, the 'ToSi' is
# reached. Obviously this may/should happen more than once.
tm_end_inv[si].append(interval)
return tm_db, tm_end_inv, position_db
| 43.550505 | 91 | 0.606923 | [
"MIT"
] | smmckay/quex-mirror | quex/engine/state_machine/transformation/state_split.py | 17,246 | Python |
import os
import platform
import ctypes
import subprocess
import distutils.command.build_py
from distutils.core import setup
class build_cnmpc(distutils.command.build_py.build_py):
description = """Build the CNMPC shared library"""
def run(self):
subprocess.call("cmake -DCMAKE_BUILD_TYPE=Release . && make cnmpc " +
"&& cp -r c ./python/nmpc/",
shell=True,
cwd=os.path.dirname(os.path.abspath(__file__)))
subprocess.call("cmake -DCMAKE_BUILD_TYPE=Release . " +
" && make c66nmpc && cp -r ccs-c66x ./python/nmpc/",
shell=True,
cwd=os.path.dirname(os.path.abspath(__file__)))
self.data_files = self.get_data_files()
distutils.command.build_py.build_py.run(self)
setup(
name="nmpc",
url="https://github.com/sfwa/nmpc",
author="Daniel Dyer",
author_email="",
version="1.0.0",
description="NMPC library for UAV model predictive control",
long_description=open("README.md").read(),
package_dir={"": "python"},
packages=["nmpc"],
package_data={"nmpc": ["c/cnmpc.dll", "c/libcnmpc.so", "c/libcnmpc.dylib",
"ccs-c66x/c66nmpc.dll", "ccs-c66x/libc66nmpc.so",
"ccs-c66x/libc66nmpc.dylib"]},
license="MIT License",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries"
],
cmdclass={"build_py": build_cnmpc}
)
| 35.1875 | 78 | 0.58733 | [
"MPL-2.0"
] | GCTMODS/nmpc | setup.py | 1,689 | Python |
# small demo for listmode TOF MLEM without subsets
import os
import matplotlib.pyplot as py
import pyparallelproj as ppp
from pyparallelproj.wrapper import joseph3d_fwd, joseph3d_fwd_tof, joseph3d_back, joseph3d_back_tof
import numpy as np
import argparse
import ctypes
from time import time
#---------------------------------------------------------------------------------
# parse the command line
parser = argparse.ArgumentParser()
parser.add_argument('--ngpus', help = 'number of GPUs to use', default = 0, type = int)
parser.add_argument('--nsubsets', help = 'number of subsets', default = 28, type = int)
parser.add_argument('--tpb', help = 'threads per block', default = 64, type = int)
parser.add_argument('--nontof', help = 'non-TOF instead of TOF', action = 'store_true')
parser.add_argument('--img_mem_order', help = 'memory layout for image', default = 'C',
choices = ['C','F'])
args = parser.parse_args()
#---------------------------------------------------------------------------------
ngpus = args.ngpus
nsubsets = args.nsubsets
tpb = args.tpb
tof = not args.nontof
img_mem_order = args.img_mem_order
subset = 0
if tof:
ntofbins = 27
else:
ntofbins = 1
np.random.seed(1)
#---------------------------------------------------------------------------------
# setup a scanner with one ring
scanner = ppp.RegularPolygonPETScanner(ncrystals_per_module = np.array([16,9]),
nmodules = np.array([28,5]))
# setup a test image
voxsize = np.array([2.,2.,2.])
n0 = 250
n1 = 250
n2 = max(1,int((scanner.xc2.max() - scanner.xc2.min()) / voxsize[2]))
# setup a random image
img = np.zeros((n0,n1,n2), dtype = np.float32, order = img_mem_order)
img[(n0//6):(5*n0//6),(n1//6):(5*n1//6),:] = 1
img_origin = (-(np.array(img.shape) / 2) + 0.5) * voxsize
# generate sinogram parameters and the projector
sd = np.array([[0,1,2],
[0,2,1],
[1,2,0],
[1,0,2],
[2,0,1],
[2,1,0]])
for sdo in sd:
sino_params = ppp.PETSinogramParameters(scanner, ntofbins = ntofbins, tofbin_width = 23.,
spatial_dim_order = sdo)
proj = ppp.SinogramProjector(scanner, sino_params, img.shape, nsubsets = nsubsets,
voxsize = voxsize, img_origin = img_origin, ngpus = ngpus,
tof = tof, sigma_tof = 60./2.35, n_sigmas = 3.,
threadsperblock = tpb)
# do a forward / back projection of subset 0 - same as img_fwd = proj.fwd_project(img, 0)
# we just write out the single steps to time the python overhead separately
#img_fwd = proj.fwd_project(img, 0)
#ones_sino = np.ones(img_fwd.shape, dtype = np.float32)
#back = proj.back_project(ones_sino, 0)
subset_slice = proj.subset_slices[subset]
sigma_tof = np.full(proj.nLORs[subset], proj.sigma_tof, dtype = ctypes.c_float).ravel()
tofcenter_offset = np.zeros(proj.nLORs[subset], dtype = ctypes.c_float).ravel()
xstart = proj.xstart[subset_slice].ravel()
xend = proj.xend[subset_slice].ravel()
img_ravel = img.ravel(order = img_mem_order)
subset_nLORs = proj.nLORs[subset]
img_fwd = np.zeros(subset_nLORs*proj.ntofbins, dtype = ctypes.c_float)
back_img = np.zeros(proj.nvox, dtype = ctypes.c_float)
sino = np.ones(subset_nLORs*proj.ntofbins, dtype = ctypes.c_float)
#--- time fwd projection
t0 = time()
if tof:
ok = joseph3d_fwd_tof(xstart, xend, img_ravel, proj.img_origin, proj.voxsize,
img_fwd, subset_nLORs, proj.img_dim,
proj.tofbin_width, sigma_tof, tofcenter_offset,
proj.nsigmas, proj.ntofbins,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
else:
ok = joseph3d_fwd(xstart, xend, img_ravel, proj.img_origin, proj.voxsize,
img_fwd, subset_nLORs, proj.img_dim,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
t1 = time()
#--- time back projection
t2 = time()
if tof:
ok = joseph3d_back_tof(xstart, xend, back_img, proj.img_origin, proj.voxsize,
sino, subset_nLORs, proj.img_dim,
proj.tofbin_width, sigma_tof, tofcenter_offset,
proj.nsigmas, proj.ntofbins,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
else:
ok = joseph3d_back(xstart, xend, back_img, proj.img_origin, proj.voxsize,
sino, subset_nLORs, proj.img_dim,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
t3 = time()
print(f'{sdo} {t1-t0} {t3-t2}')
| 38.379845 | 99 | 0.576247 | [
"MIT"
] | KrisThielemans/parallelproj | examples/projector_order_test.py | 4,951 | Python |
import asyncio
import random
import re
import textwrap
import discord
from .. import utils, errors, cmd
from ..servermodule import ServerModule, registered
from ..enums import PrivilegeLevel
@registered
class TruthGame(ServerModule):
MODULE_NAME = "Truth Game"
MODULE_SHORT_DESCRIPTION = "Tools to play *Truth*."
RECOMMENDED_CMD_NAMES = ["truth", "troof", "trufe"]
_SECRET_TOKEN = utils.SecretToken()
_cmdd = {}
_HELP_SUMMARY = """
`{modhelp}` - Truth game.
"""
DEFAULT_SETTINGS = {
"enabled channels": []
}
_PARTICIPANT_DELIMITER = " --> "
_RULES_STRING = textwrap.dedent("""
**Rules for a game of _Truth_**:
idk, ask the people playing it.
""").strip()
async def _initialize(self, resources):
self._client = resources.client
self._res = resources
self._enabled_channels = None
self._load_settings()
self._res.suppress_autokill(True)
return
def _load_settings(self):
settings = self._res.get_settings(default=self.DEFAULT_SETTINGS)
self._enabled_channels = []
try:
self._enabled_channels = settings["enabled channels"]
if self._enabled_channels is None:
print("DEBUGGING: truthgame.py TruthGame._load_settings() enabled channels is None!")
self._enabled_channels = []
except KeyError:
self._enabled_channels = settings["enabled channels"] = []
self._res.save_settings(settings)
return
def _save_settings(self):
settings = self._res.get_settings()
settings["enabled channels"] = self._enabled_channels
self._res.save_settings(settings)
return
@cmd.add(_cmdd, "rules")
async def _cmdf_enable(self, substr, msg, privilege_level):
"""`{cmd}` - View game rules."""
await self._client.send_msg(msg, self._RULES_STRING)
return
@cmd.add(_cmdd, "newgame", top=True)
@cmd.minimum_privilege(PrivilegeLevel.TRUSTED)
async def _cmdf_newgame(self, substr, msg, privilege_level):
"""`{cmd}` - New game."""
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
await self._new_game(channel)
await self._client.send_msg(channel, "Truth game cleared.")
return
@cmd.add(_cmdd, "in", top=True)
async def _cmdf_in(self, substr, msg, privilege_level):
"""
`{cmd}` - Adds you to the game.
This command also allows moderators to add other users and arbitrary strings as participants.
**Example:** `{cmd} an elephant` - Adds "an elephant" as a participant.
"""
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
new_participant = None
if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0):
new_participant = "<@" + msg.author.id + ">"
else:
new_participant = substr
if self._PARTICIPANT_DELIMITER in new_participant:
await self._client.send_msg(channel, "Error: Not allowed to use the delimiter characters.")
raise errors.OperationAborted
if new_participant in self._get_participants(channel):
await self._client.send_msg(channel, "Error: {} is already a participant.".format(new_participant))
else:
await self._add_participant(channel, new_participant)
await self._client.send_msg(channel, "Added {} to the game.".format(new_participant))
return
@cmd.add(_cmdd, "out", top=True)
async def _cmdf_out(self, substr, msg, privilege_level):
"""
`{cmd}` - Removes you from the game.
This command also allows moderators to remove other users and arbitrary strings.
**Example:** `{cmd} an elephant` - Removes "an elephant" as a participant.
"""
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
participant = None
if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0):
participant = "<@" + msg.author.id + ">"
else:
participant = substr
if participant in self._get_participants(channel):
await self._remove_participant(channel, participant)
await self._client.send_msg(channel, "Removed {} from the game.".format(participant))
else:
await self._client.send_msg(channel, "Error: {} is not already a participant.".format(participant))
return
@cmd.add(_cmdd, "enablechannel")
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_enable(self, substr, msg, privilege_level):
"""`{cmd}` - Enable Truth in this channel."""
channel = msg.channel
if channel.id in self._enabled_channels:
await self._client.send_msg(channel, "This channel is already a Truth game channel.")
else:
self._enabled_channels.append(channel.id)
self._save_settings()
await self._client.send_msg(channel, "This channel is now a Truth game channel.")
return
@cmd.add(_cmdd, "disablechannel")
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_disable(self, substr, msg, privilege_level):
"""`{cmd}` - Disable Truth in this channel."""
channel = msg.channel
if channel.id in self._enabled_channels:
self._enabled_channels.remove(channel.id)
self._save_settings()
await self._client.send_msg(channel, "This channel is no longer a Truth game channel.")
else:
await self._client.send_msg(channel, "This channel is not a Truth game channel.")
return
@cmd.add(_cmdd, "viewenabled")
async def _cmdf_viewenabled(self, substr, msg, privilege_level):
"""`{cmd}` - View all channels that are enabled as Truth channels."""
buf = None
if len(self._enabled_channels) == 0:
buf = "No channels have Truth game enabled."
else:
buf = "**Truth game enabled channels:**"
for channel_id in self._enabled_channels:
buf += "\n<#{0}> (ID: {0})".format(channel_id)
await self._client.send_msg(msg, buf)
return
# TODO: Edit this to use the topic string abstraction methods.
# Currently, it only consideres user mentions to be participants!
@cmd.add(_cmdd, "choose", "random", "rand")
async def _cmdf_choosetruth(self, substr, msg, privilege_level):
"""`{cmd}` - Pick a random participant other than yourself."""
topic = msg.channel.topic
if topic is None:
await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.")
raise errors.OperationAborted
mentions = utils.get_all_mentions(topic)
if len(mentions) == 0:
await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.")
raise errors.OperationAborted
try:
mentions.remove(msg.author.id)
if len(mentions) == 0:
await self._client.send_msg(msg, "<@{}>".format(msg.author.id))
raise errors.OperationAborted
except ValueError:
pass
choice = random.choice(mentions)
buf = "<@{}>\n".format(choice)
buf += "My choices were: "
for mention in mentions:
user = self._client.search_for_user(mention, enablenamesearch=False, serverrestriction=self._res.server)
if user is None:
buf += "<@{}>, ".format(mention)
else:
buf += "{}, ".format(user.name)
buf = buf[:-2]
await self._client.send_msg(msg, buf)
return
################################
### TOPIC STRING ABSTRACTION ###
################################
def _get_participants(self, channel):
topic = channel.topic
if topic is None:
return []
return topic.split(self._PARTICIPANT_DELIMITER)
# PRECONDITION: participant_str contains printable characters.
# PRECONDITION: participant_str does not contain the delimiter.
async def _add_participant(self, channel, participant_str):
topic = channel.topic
new_topic = None
if topic == "":
new_topic = participant_str
else:
new_topic = topic + self._PARTICIPANT_DELIMITER + participant_str
await self._client.edit_channel(channel, topic=new_topic)
return
# PRECONDITION: participant_str in self._get_participants(channel)
async def _remove_participant(self, channel, participant_str):
participants_list = self._get_participants(channel)
participants_list.remove(participant_str)
new_topic = self._PARTICIPANT_DELIMITER.join(participants_list)
await self._client.edit_channel(channel, topic=new_topic)
return
async def _new_game(self, channel):
await self._client.edit_channel(channel, topic="")
return
########################
### GENERAL SERVICES ###
########################
async def _abort_if_not_truth_channel(self, channel):
if not channel.id in self._enabled_channels:
await self._client.send_msg(channel, "Error: Truth isn't enabled on this channel.")
raise errors.OperationAborted
return
| 36.166667 | 113 | 0.650099 | [
"MIT"
] | simshadows/Discord-mentionbot | mentionbot/servermodules/truthgame.py | 9,114 | Python |
"""
Module for all Form Tests.
"""
import pytest
from django.utils.translation import gettext_lazy as _
from my_blog.users.forms import UserCreationForm
from my_blog.users.models import User
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
"""
Test class for all tests related to the UserCreationForm
"""
def test_username_validation_error_msg(self, user: User):
"""
Tests UserCreation Form's unique validator functions correctly by testing:
1) A new user with an existing username cannot be added.
2) Only 1 error is raised by the UserCreation Form
3) The desired error message is raised
"""
# The user already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": user.username,
"password1": user.password,
"password2": user.password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
assert form.errors["username"][0] == _("This username has already been taken.")
| 29.075 | 87 | 0.628547 | [
"MIT"
] | Tanishk-Sharma/my_blog | my_blog/users/tests/test_forms.py | 1,163 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Base operator for SQL to GCS operators.
"""
import abc
import json
import warnings
from tempfile import NamedTemporaryFile
import unicodecsv as csv
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
class BaseSQLToGCSOperator(BaseOperator, metaclass=abc.ABCMeta):
"""
:param sql: The SQL to execute.
:type sql: str
:param bucket: The bucket to upload to.
:type bucket: str
:param filename: The filename to use as the object name when uploading
to Google Cloud Storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: str
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from the database.
:type schema_filename: str
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filename param docs above). This param allows developers to specify the
file size of the splits. Check https://cloud.google.com/storage/quotas
to see the maximum allowed file size for a single object.
:type approx_max_file_size_bytes: long
:param export_format: Desired format of files to be exported.
:type export_format: str
:param field_delimiter: The delimiter to be used for CSV files.
:type field_delimiter: str
:param gzip: Option to compress file for upload (does not apply to schemas).
:type gzip: bool
:param schema: The schema to use, if any. Should be a list of dict or
a str. Pass a string if using Jinja template, otherwise, pass a list of
dict. Examples could be seen: https://cloud.google.com/bigquery/docs
/schemas#specifying_a_json_schema_file
:type schema: str or list
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud
Platform. This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:param parameters: a parameters dict that is substituted at query runtime.
:type parameters: dict
"""
template_fields = ('sql', 'bucket', 'filename', 'schema_filename', 'schema', 'parameters')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
export_format='json',
field_delimiter=',',
gzip=False,
schema=None,
parameters=None,
gcp_conn_id='google_cloud_default',
google_cloud_storage_conn_id=None,
delegate_to=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = google_cloud_storage_conn_id
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.export_format = export_format.lower()
self.field_delimiter = field_delimiter
self.gzip = gzip
self.schema = schema
self.parameters = parameters
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.parameters = parameters
def execute(self, context):
cursor = self.query()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.append(self._write_local_schema_file(cursor))
# Flush all files before uploading
for tmp_file in files_to_upload:
tmp_file['file_handle'].flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for tmp_file in files_to_upload:
tmp_file['file_handle'].close()
def convert_types(self, schema, col_type_dict, row):
"""Convert values from DBAPI to output-friendly formats."""
return [
self.convert_type(value, col_type_dict.get(name))
for name, value in zip(schema, row)
]
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if self.export_format == 'csv':
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
}]
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats.
# Convert binary type object to string encoded with base64.
row = self.convert_types(schema, col_type_dict, row)
if self.export_format == 'csv':
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
tmp_file_handle.write(json.dumps(row_dict, sort_keys=True).encode('utf-8'))
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
})
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema
as headers for the new file.
"""
csv_writer = csv.writer(file_handle, encoding='utf-8',
delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
@abc.abstractmethod
def query(self):
"""Execute DBAPI query."""
@abc.abstractmethod
def field_to_bigquery(self, field):
"""Convert a DBAPI field to BigQuery schema format."""
@abc.abstractmethod
def convert_type(self, value, schema_type):
"""Convert a value from DBAPI to output-friendly formats."""
def _get_col_type_dict(self):
"""
Return a dict of column name and column type based on self.schema if not None.
"""
schema = []
if isinstance(self.schema, str):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warning('Using default schema due to unexpected type.'
'Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warning('Using default schema due to missing name or type. Please '
'refer to: https://cloud.google.com/bigquery/docs/schemas'
'#specifying_a_json_schema_file')
return col_type_dict
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = [self.field_to_bigquery(field) for field in cursor.description]
self.log.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
tmp_schema_file_handle.write(json.dumps(schema, sort_keys=True).encode('utf-8'))
schema_file_to_upload = {
'file_name': self.schema_filename,
'file_handle': tmp_schema_file_handle,
'file_mime_type': 'application/json',
}
return schema_file_to_upload
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google Cloud Storage.
"""
hook = GCSHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
for tmp_file in files_to_upload:
hook.upload(self.bucket, tmp_file.get('file_name'),
tmp_file.get('file_handle').name,
mime_type=tmp_file.get('file_mime_type'),
gzip=self.gzip if tmp_file.get('file_name') == self.schema_filename else False)
| 41.273381 | 104 | 0.643891 | [
"Apache-2.0"
] | FRI-DAY/airflow | airflow/providers/google/cloud/operators/sql_to_gcs.py | 11,474 | Python |
from shallowflow.api.source import AbstractListOutputSource
from shallowflow.api.config import Option
class ForLoop(AbstractListOutputSource):
"""
Outputs an integer from the specified range.
"""
def description(self):
"""
Returns a description for the actor.
:return: the actor description
:rtype: str
"""
return "Outputs an integer from the specified range."
def _define_options(self):
"""
For configuring the options.
"""
super()._define_options()
self._option_manager.add(Option(name="start", value_type=int, def_value=1,
help="The starting value"))
self._option_manager.add(Option(name="end", value_type=int, def_value=10,
help="The last value (incl)"))
self._option_manager.add(Option(name="step", value_type=int, def_value=1,
help="The increment between values"))
def _get_item_type(self):
"""
Returns the type of the individual items that get generated, when not outputting a list.
:return: the type that gets generated
"""
return int
def setup(self):
"""
Prepares the actor for use.
:return: None if successful, otherwise error message
:rtype: str
"""
result = super().setup()
if result is None:
if self.get("end") < self.get("start"):
result = "End value (%s) must be smaller than start (%d)!" % (self.get("end"), self.get("start"))
return result
def _do_execute(self):
"""
Performs the actual execution.
:return: None if successful, otherwise error message
:rtype: str
"""
i = self.get("start")
step = self.get("step")
end = self.get("end")
while i <= end:
self._output.append(i)
i += step
return None
| 30.545455 | 113 | 0.556052 | [
"MIT"
] | waikato-datamining/shallow-flow | base/src/shallowflow/base/sources/_ForLoop.py | 2,016 | Python |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.vision_v1p3beta1.types import image_annotator
from google.longrunning import operations_pb2 # type: ignore
from .base import ImageAnnotatorTransport, DEFAULT_CLIENT_INFO
class ImageAnnotatorGrpcTransport(ImageAnnotatorTransport):
"""gRPC backend transport for ImageAnnotator.
Service that performs Google Cloud Vision API detection tasks
over client images, such as face, landmark, logo, label, and
text detection. The ImageAnnotator service returns detected
entities from the images.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'vision.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'vision.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def batch_annotate_images(self) -> Callable[
[image_annotator.BatchAnnotateImagesRequest],
image_annotator.BatchAnnotateImagesResponse]:
r"""Return a callable for the batch annotate images method over gRPC.
Run image detection and annotation for a batch of
images.
Returns:
Callable[[~.BatchAnnotateImagesRequest],
~.BatchAnnotateImagesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_annotate_images' not in self._stubs:
self._stubs['batch_annotate_images'] = self.grpc_channel.unary_unary(
'/google.cloud.vision.v1p3beta1.ImageAnnotator/BatchAnnotateImages',
request_serializer=image_annotator.BatchAnnotateImagesRequest.serialize,
response_deserializer=image_annotator.BatchAnnotateImagesResponse.deserialize,
)
return self._stubs['batch_annotate_images']
@property
def async_batch_annotate_files(self) -> Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest],
operations_pb2.Operation]:
r"""Return a callable for the async batch annotate files method over gRPC.
Run asynchronous image detection and annotation for a list of
generic files, such as PDF files, which may contain multiple
pages and multiple images per page. Progress and results can be
retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateFilesResponse`` (results).
Returns:
Callable[[~.AsyncBatchAnnotateFilesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'async_batch_annotate_files' not in self._stubs:
self._stubs['async_batch_annotate_files'] = self.grpc_channel.unary_unary(
'/google.cloud.vision.v1p3beta1.ImageAnnotator/AsyncBatchAnnotateFiles',
request_serializer=image_annotator.AsyncBatchAnnotateFilesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['async_batch_annotate_files']
def close(self):
self.grpc_channel.close()
__all__ = (
'ImageAnnotatorGrpcTransport',
)
| 45.822581 | 94 | 0.638226 | [
"Apache-2.0"
] | googleapis/googleapis-gen | google/cloud/vision/v1p3beta1/vision-v1p3beta1-py/google/cloud/vision_v1p3beta1/services/image_annotator/transports/grpc.py | 14,205 | Python |
import io
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
def resize_axis(tensor, axis, new_size, fill_value=0, random_sampling=False):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = torch.Tensor(tensor)
shape = list(tensor.shape)
pad_shape = shape[:]
pad_shape[axis] = max(0, new_size - shape[axis])
start = 0 if shape[axis] <= new_size else np.random.randint(
shape[axis] - new_size) # random clip
old_length = shape[axis]
shape[axis] = min(shape[axis], new_size)
resized = torch.cat([
torch.index_select(tensor, dim=axis, index=torch.randint(old_length, (new_size,))
) if start > 0 and random_sampling else torch.narrow(tensor, dim=axis, start=start, length=shape[axis]),
torch.Tensor(*pad_shape).fill_(fill_value)
], dim=axis)
return resized
class CircleLoss(torch.nn.Module):
def __init__(self, m=0.25, gamma=256):
super(CircleLoss, self).__init__()
self.m = m
self.gamma = gamma
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, logits, labels):
alpha = torch.clamp_min(logits + self.m, min=0).detach() # an
alpha[labels] = torch.clamp_min(-logits[labels] + 1 + self.m, min=0).detach() # ap
delta = torch.ones_like(logits, device=logits.device, dtype=logits.dtype) * self.m # delta_n
delta[labels] = 1 - self.m # delta_p
return self.loss(alpha * (logits - delta) * self.gamma, labels) | 37.033333 | 131 | 0.633663 | [
"Apache-2.0"
] | glee1228/segment_temporal_context_aggregation | utils.py | 2,222 | Python |
from .infinity import INFINITY
import json
from typing import List, Tuple, Any, Type, Union, TypeVar, Generic, Optional, Dict, cast, Callable
T = TypeVar('T')
class PageProperty(Generic[T]):
"""
A class to represent a property that varies depending on the pages of a spectral sequence.
This is the main helper class that encapsulates any property of a class, edge, or chart
that varies depending on the page.
Examples:
>>> p = PageProperty(1)
>>> p[4] = 7
>>> p[2]
1
>>> p[4]
7
"""
def __init__(self,
value : T,
parent : Optional[Any] = None,
callback : Optional[Callable[[], None]] = None,
):
""" Initialize the PageProperty to always have value v."""
self._values : List[Tuple[int, T]] = [(0, value)]
self.set_parent(parent)
self._callback = callback
def set_parent(self, parent : Optional[Any]):
self._parent = parent
def set_callback(self, callback : Callable[[], None]):
self._callback = callback
def _needs_update(self):
if self._parent:
self._parent._needs_update()
if self._callback:
self._callback()
def _find_index(self, target_page : int) -> Tuple[int, bool]:
result_idx = None
for (idx, (page, _)) in enumerate(self._values):
if page > target_page:
break
result_idx = idx
# We need to help out the type checker here
if result_idx is None:
raise ValueError(f"Page Property indexed with negative index: {target_page}")
return (result_idx, self._values[result_idx][0] == target_page)
def __getitem__(self, x : Union[int, slice]) -> T:
stop = None
if type(x) == slice:
stop = x.stop or INFINITY
x = x.start or 0
if type(x) != int:
raise TypeError(f"Expected integer, got {type(x).__name__}.")
assert type(x) is int # Make type analysis thing happy
(idx, _) = self._find_index(x)
if stop:
(idx2, _) = self._find_index(stop - 1)
if idx != idx2:
raise ValueError("Indexed with slice but value is inconsistent across slice.")
return self._values[idx][1]
def __setitem__(self, p : Union[int, slice], v : T) -> None:
if hasattr(v, "set_parent"):
v.set_parent(self)
if type(p) is int:
self._setitem_single(p, v)
self._merge_redundant()
self._needs_update()
return
if type(p) is not slice:
raise TypeError("Excepted int or slice!")
start = p.start or 0
stop = p.stop or INFINITY
orig_value = self[stop]
(start_idx, _) = self._setitem_single(start, v)
(end_idx, hit_end) = self._find_index(stop)
if not hit_end and stop < INFINITY:
(end_idx, _) = self._setitem_single(stop, orig_value)
if stop == INFINITY:
end_idx += 1
del self._values[start_idx + 1 : end_idx]
self._merge_redundant()
self._needs_update()
def _setitem_single(self, p : int, v : T):
(idx, hit) = self._find_index(p)
if hit:
self._values[idx] = (p, v)
else:
idx += 1
self._values.insert(idx, (p, v))
return (idx, hit)
def _merge_redundant(self):
for i in range(len(self._values) - 1, 0, -1):
if self._values[i][1] == self._values[i-1][1]:
del self._values[i]
def __repr__(self) -> str:
values = ", ".join([f"{page}: {value}" for (page, value) in self._values])
return f"PageProperty{{{values}}}"
def __eq__(self, other):
if type(other) != PageProperty:
return False
return self._values == other._values
def map_values_in_place(self, f):
for i in range(len(self._values)):
(p, v) = self._values[i]
self._values[i] = (p, f(v))
def to_json(self) -> Dict[str, Any]:
if len(self._values) == 1:
return self._values[0][1]
else:
return {"type" : "PageProperty", "values" : self._values }
@staticmethod
def from_json(json_obj : Dict[str, Any]) -> "PageProperty[Any]":
result : PageProperty[Any] = PageProperty(None)
result._values = [cast(Tuple[int, Any], tuple(x)) for x in json_obj["values"]]
return result
S = TypeVar('S')
PagePropertyOrValue = Union[S, PageProperty[S]]
def ensure_page_property(v : PagePropertyOrValue[S], parent : Optional[Any] = None) -> PageProperty[S]:
if(type(v) is PageProperty):
result = v
else:
result = PageProperty(v)
if parent:
result.set_parent(parent)
return result | 34.520548 | 104 | 0.547619 | [
"Apache-2.0",
"MIT"
] | JoeyBF/sseq | chart/chart/python/spectralsequence_chart/page_property.py | 5,040 | Python |
# Generated by Django 2.1.3 on 2018-12-08 05:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('staf', '0008_auto_20181207_1525'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='process',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='staf.Process'),
),
]
| 24.45 | 123 | 0.640082 | [
"MIT"
] | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | src/staf/migrations/0009_dataset_process.py | 489 | Python |
"""about command for osxphotos CLI"""
from textwrap import dedent
import click
from osxphotos._constants import OSXPHOTOS_URL
from osxphotos._version import __version__
MIT_LICENSE = """
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
APACHE_2_0_LICENSE = """
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
BSD_3_CLAUSE_LICENSE = """
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be
used to endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
ISC_LICENSE = """
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
"""
LICENSE = dedent(
f"""
osxphotos is copyright (c) 2019-2022 by Rhet Turnbull and is licensed under the MIT license:
{MIT_LICENSE}
osxphotos uses the following 3rd party software licensed under the BSD-3-Clause License:
Click (Copyright 2014 Pallets), ptpython (Copyright (c) 2015, Jonathan Slenders)
{BSD_3_CLAUSE_LICENSE}
osxphotos uses the following 3rd party software licensed under the Apache 2.0 License:
tenacity (Copyright Julien Danjou)
{APACHE_2_0_LICENSE}
osxphotos uses the following 3rd part software licensed under the ISC License:
xdg (Copyright 2016-2021 Scott Stevenson <[email protected]>)
{ISC_LICENSE}
"""
)
@click.command(name="about")
@click.pass_obj
@click.pass_context
def about(ctx, cli_obj):
"""Print information about osxphotos including license."""
click.echo_via_pager(
f"osxphotos, version {__version__}\n\n"
f"Source code available at: {OSXPHOTOS_URL}\n"
f"{LICENSE}"
)
| 50.848387 | 104 | 0.748588 | [
"MIT"
] | oPromessa/osxphotos | osxphotos/cli/about.py | 15,763 | Python |
from abc import ABCMeta, abstractmethod
class Book(object, metaclass=ABCMeta):
def __init__(self,title,author):
self.title=title
self.author=author
@abstractmethod
def display(): pass
| 27.375 | 40 | 0.680365 | [
"MIT"
] | SriCharan220800/RomanReigns | 13)Abstract classes.py | 219 | Python |
# -*- coding: utf-8 -*-
"""
Logging for the hubble daemon
"""
import logging
import time
import hubblestack.splunklogging
# These patterns will not be logged by "conf_publisher" and "emit_to_splunk"
PATTERNS_TO_FILTER = ["password", "token", "passphrase", "privkey",
"keyid", "s3.key", "splunk_token"]
# While hubble doesn't use these, salt modules can, so let's define them anyway
SPLUNK = logging.SPLUNK = 25
PROFILE = logging.PROFILE = 15
TRACE = logging.TRACE = 5
GARBAGE = logging.GARBAGE = 1
QUIET = logging.QUIET = 1000
LOG_LEVELS = {
'all': logging.NOTSET,
'debug': logging.DEBUG,
'error': logging.ERROR,
'critical': logging.CRITICAL,
'garbage': GARBAGE,
'info': logging.INFO,
'profile': PROFILE,
'quiet': QUIET,
'trace': TRACE,
'warning': logging.WARNING,
}
logging.addLevelName(SPLUNK, 'SPLUNK')
logging.addLevelName(QUIET, 'QUIET')
logging.addLevelName(PROFILE, 'PROFILE')
logging.addLevelName(TRACE, 'TRACE')
logging.addLevelName(GARBAGE, 'GARBAGE')
def _splunk(self, message, *args, **kwargs):
if self.isEnabledFor(logging.SPLUNK):
self._log(logging.SPLUNK, message, args, **kwargs)
def _quiet(self, message, *args, **kwargs):
if self.isEnabledFor(logging.QUIET):
self._log(logging.QUIET, message, args, **kwargs)
def _profile(self, message, *args, **kwargs):
if self.isEnabledFor(logging.PROFILE):
self._log(logging.PROFILE, message, args, **kwargs)
def _trace(self, message, *args, **kwargs):
if self.isEnabledFor(logging.TRACE):
self._log(logging.TRACE, message, args, **kwargs)
def _garbage(self, message, *args, **kwargs):
if self.isEnabledFor(logging.GARBAGE):
self._log(logging.GARBAGE, message, args, **kwargs)
logging.Logger.splunk = _splunk
logging.Logger.quiet = _quiet
logging.Logger.profile = _profile
logging.Logger.trace = _trace
logging.Logger.garbage = _garbage
SPLUNK_HANDLER = None
class MockRecord(object):
""" Fake record that mimicks a logging record """
def __init__(self, message, levelname, asctime, name):
self.message = message
self.levelname = levelname
self.asctime = asctime
self.name = name
# Set up an early log handler for use while we're generating config.
# Will be removed when we set up the console or file logger.
TEMP_HANDLER = logging.StreamHandler()
TEMP_HANDLER.setLevel(logging.INFO)
TEMP_HANDLER.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s'))
logging.root.handlers.insert(0, TEMP_HANDLER)
def _remove_temp_handler():
"""
Remove temporary handler if it exists
"""
if TEMP_HANDLER and TEMP_HANDLER in logging.root.handlers:
logging.root.handlers.remove(TEMP_HANDLER)
def setup_console_logger(log_level='error',
log_format='%(asctime)s [%(levelname)-5s] %(message)s',
date_format='%H:%M:%S'):
"""
Sets up logging to STDERR, allowing for configurable level, format, and
date format.
"""
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
def setup_file_logger(log_file,
log_level='error',
log_format='%(asctime)s,%(msecs)03d [%(levelname)-5s] [%(name)s:%(lineno)d] '
' %(message)s',
date_format='%Y-%m-%d %H:%M:%S',
max_bytes=100000000,
backup_count=1):
"""
Sets up logging to a file. By default will auto-rotate those logs every
100MB and keep one backup.
"""
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=max_bytes,
backupCount=backup_count)
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
def setup_splunk_logger():
"""
Sets up logging to splunk.
"""
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = hubblestack.splunklogging.SplunkHandler()
handler.setLevel(logging.SPLUNK)
rootlogger.addHandler(handler)
global SPLUNK_HANDLER
SPLUNK_HANDLER = handler
def emit_to_splunk(message, level, name):
"""
Emit a single message to splunk
"""
if isinstance(message, (list, dict)):
message = filter_logs(message, remove_dots=False)
if SPLUNK_HANDLER is None:
return False
handler = SPLUNK_HANDLER
handler.emit(MockRecord(message, level, time.asctime(), name))
return True
def workaround_salt_log_handler_queues():
"""
Build a fake log handler and add it to LOGGING_STORE_HANDLER and LOGGING_NULL_HANDLER
"""
class _FakeLogHandler(object):
level = 10
count = 0
def handle(self, _record):
""" Receive a record and increase the count """
self.count += 1
flh = _FakeLogHandler()
import salt.log.setup as sls
sls.LOGGING_STORE_HANDLER.sync_with_handlers([flh])
sls.LOGGING_NULL_HANDLER.sync_with_handlers([flh])
# if flh.count > 0:
# log.info("pretended to handle %d logging record(s)
# for salt.log.setup.LOGGING_*_HANDLER", flh.count)
def filter_logs(opts_to_log, remove_dots=True):
"""
Filters out keys containing certain patterns to avoid sensitive information being sent to logs
Works on dictionaries and lists
This function was located at extmods/modules/conf_publisher.py previously
"""
filtered_conf = _remove_sensitive_info(opts_to_log, PATTERNS_TO_FILTER)
if remove_dots:
for key in filtered_conf.keys():
if '.' in key:
filtered_conf[key.replace('.', '_')] = filtered_conf.pop(key)
return filtered_conf
def _remove_sensitive_info(obj, patterns_to_filter):
"""
Filter known sensitive info
"""
if isinstance(obj, dict):
obj = {
key: _remove_sensitive_info(value, patterns_to_filter)
for key, value in obj.items()
if not any(patt in key for patt in patterns_to_filter)}
elif isinstance(obj, list):
obj = [_remove_sensitive_info(item, patterns_to_filter)
for item in obj]
return obj
| 28.943231 | 99 | 0.664152 | [
"Apache-2.0"
] | instructure/hubble | hubblestack/log.py | 6,628 | Python |
import os
class ConfigParams:
def __init__(self,configPath):
self.env_dist = os.environ
#权限验证
self.api_key = ""
# userID = ""
# ip = "0.0.0.0"
#模型相关存放根目录
self.modelPath = os.path.join(os.getcwd(),"model")
cpuCores = 0
threads = 2
port = 33388
batchSize = 10
#每个算法使用的GPU数量
self.GPUDevices = 1
topK = 80
featureSize = 512
zmqthreads = 2
self.CPU = 0
self.zmqAddr = "tcp://{}:5560".format(self.env_dist["ZMQ_ADDR"]) if "ZMQ_ADDR" in self.env_dist else "tcp://127.0.0.1:5570"
print(str(self.zmqAddr))
self.helmet_ids = self.parseAI("HELMET") if "HELMET" in self.env_dist else []
self.pose_ids = self.parseAI("POSE") if "POSE" in self.env_dist else []
self.track_coal_ids = self.parseAI("TRACK_COAL") if "TRACK_COAL" in self.env_dist else []
self.smoke_phone_ids = self.parseAI("SMOKEPHONE") if "SMOKEPHONE" in self.env_dist else []
# self.helmet_ids = [1,1,1]
# self.pose_ids = []
# self.track_coal_ids = []
# self.smoke_phone_ids = []
def loadConfig(self,configPath):
pass
def generateDefaultConfig(self,configPath):
pass
def initEasylogging(self,logConfig):
pass
def printParams(self):
print("run configParams function printParams")
pass
def parseAI(self,key):
ai_ids = []
for i in self.env_dist[key].split(','):
ai_ids.append(int(i))
return ai_ids
| 26.3 | 131 | 0.576046 | [
"MIT"
] | MistSun-Chen/py_verifier | common/configParams.py | 1,622 | Python |
import sys, gzip, logging
from .in_util import TimeReport, detectFileChrom, extendFileList, dumpReader
#========================================
# Schema for AStorage
#========================================
_TRASCRIPT_PROPERTIES = [
{"name": "Ensembl_geneid", "tp": "str", "opt": "repeat"},
{"name": "Ensembl_transcriptid", "tp": "str", "opt": "repeat"},
{"name": "Ensembl_proteinid", "tp": "str", "opt": "repeat"},
{"name": "refcodon", "tp": "str", "opt": "repeat"},
{"name": "codonpos", "tp": "str", "opt": "repeat"},
{"name": "FATHMM_score", "tp": "num"},
{"name": "FATHMM_pred", "tp": "str", "opt": "dict"},
{"name": "GENCODE_basic", "tp": "str"},
{"name": "HGVSc_ANNOVAR", "tp": "str"},
{"name": "HGVSp_ANNOVAR", "tp": "str"},
{"name": "HGVSc_snpEff", "tp": "str"},
{"name": "HGVSp_snpEff", "tp": "str"},
{"name": "MPC_score", "tp": "num"},
{"name": "MutationTaster_score", "tp": "num"},
{"name": "MutationAssessor_pred", "tp": "str", "opt": "dict"},
{"name": "Polyphen2_HDIV_score", "tp": "num"},
{"name": "Polyphen2_HDIV_pred", "tp": "str", "opt": "dict"},
{"name": "Polyphen2_HVAR_score", "tp": "num"},
{"name": "Polyphen2_HVAR_pred", "tp": "str", "opt": "dict"},
{"name": "SIFT_score", "tp": "num"},
{"name": "SIFT_pred", "tp": "str", "opt": "dict"},
{"name": "SIFT4G_score", "tp": "num"},
{"name": "SIFT4G_pred", "tp": "str", "opt": "dict"},
{"name": "Uniprot_acc", "tp": "str"}
]
#===============================================
_FACETS_PROPERTIES = [
{"name": "MetaLR_score", "tp": "num"},
{"name": "MetaLR_rankscore", "tp": "num"},
{"name": "MetaLR_pred", "opt": "dict", "tp": "str"},
{"name": "MutPred_score", "tp": "str"},
{"name": "MutPred_rankscore", "tp": "num"},
{"name": "MutPred_protID", "tp": "str"},
{"name": "MutPred_AAchange", "tp": "str"},
{"name": "MutPred_Top5features", "tp": "str"},
{"name": "MPC_rankscore", "tp": "num"},
{"name": "PrimateAI_score", "tp": "num"},
{"name": "PrimateAI_rankscore", "tp": "num"},
{"name": "REVEL_score", "tp": "num"},
{"name": "SIFT4G_converted_rankscore", "tp": "num"},
{
"name": "transcripts", "tp": "list",
"item": {
"tp": "dict", "items": _TRASCRIPT_PROPERTIES
}
}
]
#===============================================
_VARIANT_PROPERTIES = [
{"name": "ALT", "tp": "str", "opt": "gene"},
{"name": "REF", "tp": "str", "opt": "gene"},
{"name": "CADD_raw", "tp": "num"},
{"name": "CADD_phred", "tp": "num"},
{"name": "DANN_score", "tp": "num"},
{"name": "DANN_rankscore", "tp": "num"},
{"name": "Eigen_raw_coding", "tp": "num"},
{"name": "Eigen_raw_coding_rankscore", "tp": "num"},
{"name": "Eigen_phred_coding", "tp": "num"},
{"name": "Eigen_PC_raw_coding", "tp": "num"},
{"name": "Eigen_PC_raw_coding_rankscore", "tp": "num"},
{"name": "Eigen_PC_phred_coding", "tp": "num"},
{"name": "GTEx_V7_gene", "tp": "str", "opt": "repeat"},
{"name": "GTEx_V7_tissue", "tp": "str"},
{"name": "MutationTaster_score", "tp": "str"},
{"name": "MutationTaster_pred", "tp": "str"},
{"name": "PrimateAI_pred", "tp": "str", "opt": "dict"},
{"name": "Geuvadis_eQTL_target_gene", "tp": "str"},
{
"name": "facets",
"tp": "list",
"item": {
"tp": "dict",
"items": _FACETS_PROPERTIES
}
}
]
#===============================================
SCHEMA_DBNSFP_4 = {
"name": "DBNSFP",
"key": "hg38",
"io": {
"block-type": "page-cluster",
"max-var-count": 50
},
"filter-list": {"ref": "REF", "alt": "ALT"},
"top": {
"tp": "list",
"item": {
"tp": "dict",
"items": _VARIANT_PROPERTIES
}
}
}
#========================================
# Ingest logic
#========================================
VARIANT_TAB = [
["REF", str],
["ALT", str],
["MutationTaster_score", str],
["MutationTaster_pred", str],
["PrimateAI_pred", str],
["CADD_raw", float],
["CADD_phred", float],
["DANN_score", float],
["DANN_rankscore", float],
["Eigen_raw_coding", float],
["Eigen_raw_coding_rankscore", float],
["Eigen_phred_coding", float],
["Eigen_PC_raw_coding", float],
["Eigen_PC_raw_coding_rankscore", float],
["Eigen_PC_phred_coding", float],
["GTEx_V7_gene", str],
["GTEx_V7_tissue", str],
["Geuvadis_eQTL_target_gene", str]
]
#========================================
FACET_TAB = [
["refcodon", str],
["codonpos", str],
["SIFT4G_converted_rankscore", float],
["MetaLR_score", float],
["MetaLR_rankscore", float],
["MetaLR_pred", str],
["REVEL_score", float],
["MutPred_score", str],
["MutPred_rankscore", float],
["MutPred_protID", str],
["MutPred_AAchange", str],
["MutPred_Top5features", str],
["MPC_rankscore", float],
["PrimateAI_score", float],
["PrimateAI_rankscore", float]
]
#========================================
TRANSCRIPT_TAB = [
["Ensembl_geneid", str],
["Ensembl_transcriptid", str],
["Ensembl_proteinid", str],
["Uniprot_acc", str],
["HGVSc_ANNOVAR", str],
["HGVSp_ANNOVAR", str],
["HGVSc_snpEff", str],
["HGVSp_snpEff", str],
["GENCODE_basic", str],
["SIFT_score", float],
["SIFT_pred", str],
["SIFT4G_score", float],
["SIFT4G_pred", str],
["Polyphen2_HDIV_score", float],
["Polyphen2_HDIV_pred", str],
["Polyphen2_HVAR_score", float],
["Polyphen2_HVAR_pred", str],
["MutationAssessor_score", float],
["MutationAssessor_pred", str],
["FATHMM_score", float],
["FATHMM_pred", str],
["MPC_score", float]
]
ALL_TABS = [VARIANT_TAB, FACET_TAB, TRANSCRIPT_TAB]
#========================================
FLD_NAME_MAP = {
"ref": "REF",
"alt": "ALT",
"Eigen_pred_coding": "Eigen_phred_coding"
}
def _normFieldName(name):
global FLD_NAME_MAP
name = name.replace('-', '_')
return FLD_NAME_MAP.get(name, name)
#========================================
def setupFields(field_line):
global ALL_TABS, FLD_NAME_MAP
assert field_line.startswith('#')
field_names = field_line[1:].split()
assert field_names[0].startswith("chr")
assert field_names[1].startswith("pos")
fields_idxs = {_normFieldName(name): idx
for idx, name in enumerate(field_names)}
errors = 0
for tab in ALL_TABS:
for field_info in tab:
idx = fields_idxs.get(field_info[0])
if idx is None:
errors += 1
logging.error("No field registered: %s" % field_info[0])
else:
if len(field_info) == 2:
field_info.append(idx)
else:
field_info[2] = idx
if errors > 0:
logging.info("Available fields:\n=====\n"
+ "\n".join(sorted(fields_idxs.keys())))
assert errors == 0
#========================================
def iterFields(fields, properties_tab):
for name, tp, idx in properties_tab:
val = fields[idx]
if val == '.':
yield name, None
else:
yield name, tp(val)
def iterDeepFields(fields, properties_tab):
for name, tp, idx in properties_tab:
val_seq = []
for val in fields[idx].split(';'):
if val == '.':
val_seq.append(None)
else:
val_seq.append(tp(val))
yield name, val_seq
#========================================
class DataCollector:
def __init__(self):
self.mCounts = [0, 0, 0]
self.mCurRecord = None
def getCounts(self):
return self.mCounts
def ingestLine(self, line):
global VARIANT_TAB, FACET_TAB, TRANSCRIPT_TAB
if line.endswith('\n'):
line = line[:-1]
fields = line.split('\t')
chrom = "chr" + str(fields[0])
pos = int(fields[1])
new_record = False
if self.mCurRecord is None or (chrom, pos) != self.mCurRecord[0]:
new_record = True
new_variant = new_record
var_data = dict()
for name, val in iterFields(fields, VARIANT_TAB):
var_data[name] = val
if not new_variant and val != self.mCurRecord[1][-1][name]:
new_variant = True
facet_data = {name: val
for name, val in iterFields(fields, FACET_TAB)}
tr_data_seq = None
for name, val_seq in iterDeepFields(fields, TRANSCRIPT_TAB):
if tr_data_seq is None:
tr_data_seq = [{name: val} for val in val_seq]
else:
for idx, val in enumerate(val_seq):
tr_data_seq[idx][name] = val
if tr_data_seq is None:
tr_data_seq = []
facet_data["transcripts"] = tr_data_seq
self.mCounts[2] += len(tr_data_seq)
self.mCounts[1] += 1
ret = None
if new_record:
self.mCounts[0] += 1
var_data["facets"] = [facet_data]
ret, self.mCurRecord = self.mCurRecord, [(chrom, pos), [var_data]]
elif new_variant:
self.mCounts[0] += 1
var_data["facets"] = [facet_data]
self.mCurRecord[1].append(var_data)
else:
self.mCurRecord[1][-1]["facets"].append(facet_data)
return ret
def finishUp(self):
return self.mCurRecord
#========================================
#========================================
class ReaderDBNSFP4:
def __init__(self, file_list, chrom_loc = "chr"):
self.mFiles = extendFileList(file_list)
self.mChromLoc = chrom_loc
def read(self):
exceptions = 0
for chrom_file in self.mFiles:
chrom = detectFileChrom(chrom_file, self.mChromLoc)
logging.info("Evaluation of %s in %s" % (chrom, chrom_file))
with gzip.open(chrom_file, 'rt') as text_inp:
time_rep = TimeReport("chr" + chrom)
collector = DataCollector()
for line_no, line in enumerate(text_inp):
if line_no == 0:
setupFields(line)
continue
try:
info = collector.ingestLine(line)
if info is not None:
yield info
if (line_no % 10000) == 0:
total_var, _, _ = collector.getCounts()
time_rep.portion(total_var)
except IndexError:
exceptions += 1
info = collector.finishUp()
if info:
yield info
total_var, total_facets, total_tr = collector.getCounts()
time_rep.done(total_var)
logging.info("transcripts: %d, facets: %d, exceptions: %d"
% (total_tr, total_facets, exceptions))
#========================================
def reader_dbNSFP4(properties, schema_h = None):
return ReaderDBNSFP4(
properties["file_list"],
properties.get("chrom_loc", "chr"))
#========================================
if __name__ == '__main__':
logging.root.setLevel(logging.INFO)
reader = reader_dbNSFP4({"file_list": sys.argv[1]})
dumpReader(reader)
| 37.005848 | 78 | 0.459387 | [
"Apache-2.0"
] | ForomePlatform/Anfisa-Annotations | a_storage/ingest/in_dbnsfp4.py | 12,656 | Python |
# Source: https://gist.github.com/redknightlois/c4023d393eb8f92bb44b2ab582d7ec20
from torch.optim.optimizer import Optimizer
import torch
import math
class Ralamb(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-4):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
radam_step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = radam_step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
radam_step = p_data_fp32.clone()
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(-radam_step_size, exp_avg, denom)
else:
radam_step.add_(-radam_step_size, exp_avg)
radam_norm = radam_step.pow(2).sum().sqrt()
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
if 0 in (weight_norm, radam_norm):
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
if N_sma >= 5:
p_data_fp32.addcdiv_(-radam_step_size * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step_size * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss | 40.191919 | 195 | 0.508922 | [
"MIT"
] | achaiah/pywick | pywick/optimizers/ralamb.py | 3,979 | Python |
import sys
import json
import hashlib
import gc
from operator import *
import shlex
from pyspark import StorageLevel
from pyspark.sql import SQLContext
from pyspark.sql.functions import *
from pyspark.sql.types import *
import numpy as np
from subjectivity_clues import clues
def expect(name, var, expected, op=eq):
if op(var, expected):
log('[checkpoint] {} = {}'.format(name, expected))
else:
log('[error] {} = {}'.format(name, expected))
raise Exception(name)
def log(message):
log_file = 'sample_subjectivity_tweets.log'
with open(log_file, 'a') as f:
f.write(message)
f.write('\n')
f.flush()
f.close()
print message
def to_json(name, jsons):
filename = '{}.json'.format(name)
with open(filename, 'w') as f:
for j in jsons:
f.write(j)
f.write('\n')
def to_csv(name, jsons):
filename = '{}.csv'.format(name)
with open(filename, 'w') as f:
for tweet in jsons:
t = json.loads(tweet)
body = t['body'].replace('\n', ' ').replace('\r', '').replace('"', '""')
f.write('"{}",{},{},"{}"\n'.format(t['id'], t['verb'], t['postedTime'], body))
def sample(rdd, size, seed):
items = rdd.collect()
rand = np.random.RandomState(seed)
sampled = rand.choice(items, size=size, replace=False)
expect('sampled', len(set(sampled)), size)
return sampled.tolist()
def sha(name, ext='json'):
BUF_SIZE = 65536
filename = '{}.{}'.format(name, ext)
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def read_and_parse_clues():
DEFAULT_FILENAME = os.getcwd() + os.sep + 'subjectivity_clues' + os.sep + 'subjclueslen1-HLTEMNLP05.tff'
lines = None
with open(DEFAULT_FILENAME, 'r') as f:
lines = f.readlines()
clues = dict()
for l in lines:
clue = dict(token.split('=') for token in shlex.split(l))
word = clue['word1']
clues[word] = clue
return clues
def calculate_relevant(lexicons, sentence):
PRIORPOLARITY = {
'positive': 1,
'negative': -1,
'both': 0,
'neutral': 0
}
TYPE = {
'strongsubj': 2,
'weaksubj': 1
}
total_score = 0
for w in sentence.split(' '):
if w not in lexicons.keys():
continue
total_score += PRIORPOLARITY[lexicons[w]['priorpolarity']] * TYPE[lexicons[w]['type']]
return total_score
# Make sure Python uses UTF-8 as tweets contains emoticon and unicode
reload(sys)
sys.setdefaultencoding('utf-8')
# Use SQLContext for better support
sqlContext = SQLContext(sc)
# Define storage level
DISK_ONLY_2 = StorageLevel(True, False, False, False, 2)
MEMORY_AND_DISK = StorageLevel(True, True, False, False, 1)
# Read GNIP's JSON file
directory = "tweets"
datasets = sqlContext.read.json(directory)
log('# Completed reading JSON files')
# Check checksum count
file_count = datasets.where(datasets['verb'].isNull()).count()
expect('file_count', file_count, 21888)
# Check post count
all_posts = datasets.where(datasets['verb'] == 'post')
all_posts_count = all_posts.count()
expect('all_posts_count', all_posts_count, 1570398)
# Check share count
all_shares = datasets.where(datasets['verb'] == 'share')
all_shares_count = all_shares.count()
expect('all_shares_count', all_shares_count, 1112590)
# Check dataset count
info_dataset = datasets.select('info')
info_dataset.registerTempTable('info')
all_tweets_count = info_dataset.select('info.activity_count').groupBy().sum('activity_count').collect()[0][0]
expect('all_tweets_count', all_tweets_count, 2682988)
expect('all_tweets_count', all_tweets_count, all_posts_count + all_shares_count)
log('# Completed validating tweets count')
# Remove post authored by @ChipotleTweet and news agencies
chipotle_tweet = 'id:twitter.com:141341662'
users_to_remove = [chipotle_tweet, 'id:twitter.com:759251', 'id:twitter.com:91478624', 'id:twitter.com:28785486',
'id:twitter.com:1652541', 'id:twitter.com:51241574', 'id:twitter.com:807095',
'id:twitter.com:34713362', 'id:twitter.com:3090733766', 'id:twitter.com:1367531',
'id:twitter.com:14293310', 'id:twitter.com:3108351', 'id:twitter.com:14173315',
'id:twitter.com:292777349', 'id:twitter.com:428333', 'id:twitter.com:624413',
'id:twitter.com:20562637', 'id:twitter.com:13918492', 'id:twitter.com:16184358',
'id:twitter.com:625697849', 'id:twitter.com:2467791', 'id:twitter.com:9763482',
'id:twitter.com:14511951', 'id:twitter.com:6017542', 'id:twitter.com:26574283',
'id:twitter.com:115754870']
all_posts_wo_specific_users = all_posts.where(~ col('actor.id').isin(users_to_remove))
all_posts_w_specific_users = all_posts.where(col('actor.id').isin(users_to_remove)).count()
expect('all_posts_wo_specific_users', all_posts_wo_specific_users.count(), all_posts_count - all_posts_w_specific_users)
# Remove share retweet of tweet by @ChipotleTweet and news agencies
all_shares_wo_specific_users = all_shares.where(~ col('object.actor.id').isin(users_to_remove))
all_shares_w_specific_users = all_shares.where(col('object.actor.id').isin(users_to_remove)).count()
expect('all_shares_wo_specific_users', all_shares_wo_specific_users.count(), all_shares_count - all_shares_w_specific_users)
# Generate tweets pool with only English tweet
tweets_pool = all_posts_wo_specific_users.unionAll(all_shares_wo_specific_users).filter("twitter_lang = 'en'")
tweets_pool.persist(MEMORY_AND_DISK)
tweets_pool_count = tweets_pool.count()
# Adding all post to all share will be greater than tweet pool because of non-English tweet
expected_tweets_pool_count = all_posts_count - all_posts_w_specific_users + \
all_shares_count - all_shares_w_specific_users
expect('tweets_pool_count', tweets_pool_count, expected_tweets_pool_count, op=lt)
log('# Completed constructing tweets pool')
# Check language of tweets
languages = tweets_pool.select('twitter_lang').distinct()
languages_count = languages.count()
language_check = languages.first()['twitter_lang']
expect('languages_count', languages_count, 1)
expect('language_check', language_check, 'en')
log('# Completed validating language variety')
# Take top 80% of tweets by length
tweets_pool_str_lengths = tweets_pool.select(length('body').alias('length')).rdd.map(lambda x: x.length).collect()
lengths_np = np.array(tweets_pool_str_lengths)
p = np.percentile(lengths_np, 20)
final_tweets_pool = tweets_pool.filter(length('body') >= p)
final_tweets_pool.persist(MEMORY_AND_DISK)
tweets_pool.unpersist(blocking=True)
final_tweets_pool_count = final_tweets_pool.count()
percentage_kept = float(final_tweets_pool_count) / tweets_pool_count
expect('percentage_kept', percentage_kept, 0.8, op=gt)
log('# Completed sampling top 80% of tweets by body length')
# Sampling
final_tweets_ids = final_tweets_pool.select(final_tweets_pool['id']).rdd.sortBy(lambda x: x.id).map(lambda x: x.id)
# Development tweets
dev_seed = 10102016
number_of_dev_samples = 3000
dev_posts = sample(final_tweets_ids, number_of_dev_samples, dev_seed)
dev_posts_count = len(dev_posts)
expect('dev_posts_count', dev_posts_count, number_of_dev_samples)
log('# Completed sampling dev tweets')
dev_posts_file = "dev_posts"
dev_posts_jsons = final_tweets_pool[final_tweets_pool['id'].isin(dev_posts)].toJSON().collect()
to_json(dev_posts_file, dev_posts_jsons)
to_csv(dev_posts_file, dev_posts_jsons)
expect('dev_posts_file', sha(dev_posts_file), '74447296831c8e3061fc0ee739f549c5b08b85a3')
expect('dev_posts_file', sha(dev_posts_file, ext='csv'), '6acfd1f8d238bc5d25d97d2c9e6f6b177699389a')
log('Exporting dev post to {}'.format(dev_posts_file))
log('# Completed exporting dev tweets')
del dev_posts_jsons
gc.collect()
# Find distinct set of tweets (unique body text)
post_pool = final_tweets_pool.where(final_tweets_pool['verb'] == 'post')
post_pool.persist(MEMORY_AND_DISK)
post_pool_ids = post_pool.select(post_pool['id']).rdd.sortBy(lambda x: x.id).map(lambda x: x.id).collect()
expect('post_pool', post_pool.count(), 1124935)
share_pool = final_tweets_pool.where(final_tweets_pool['verb'] == 'share')
share_pool.persist(MEMORY_AND_DISK)
expect('share_pool', share_pool.count(), 846141)
broadcast_post_ids = sc.broadcast(set(post_pool_ids))
unique_share_ids = share_pool.select(share_pool['id'], share_pool['object.id'].alias('object_id')).rdd.filter(lambda row: row['object_id'] not in broadcast_post_ids.value).map(lambda row: row.id).collect()
expect('unique_share_pool', len(unique_share_ids), 193006)
log('# Completed finding unique share tweet')
# Constructing distinct tweet pool
broadcast_unique_share_ids = sc.broadcast(unique_share_ids)
distinct_tweets_pool = final_tweets_pool.\
select(final_tweets_pool['id'], final_tweets_pool['body']).\
rdd.\
filter(lambda row: row['id'] in broadcast_post_ids.value or row['id'] in broadcast_unique_share_ids.value)
distinct_tweets_pool.persist(MEMORY_AND_DISK)
distinct_tweets_count = distinct_tweets_pool.count()
expect('distinct_tweets_pool', distinct_tweets_count, 1124935 + 193006)
# Exclude development tweets
tweets_unsampled = distinct_tweets_pool.toDF().where(~ col('id').isin(dev_posts))
tweets_unsampled.persist(MEMORY_AND_DISK)
tweets_unsampled_count = tweets_unsampled.count()
# no. of dev intersect post pool: 1718, no. of share dev intersect unique share pool: 293
expect('tweets_unsampled', tweets_unsampled_count, 1124935 + 193006 - 1718 - 293)
log('# Completed constructing unsampled tweets')
# Calculate subjectivity
lexicons = read_and_parse_clues()
udfBodyToRelevant = udf(lambda body: calculate_relevant(lexicons, body), IntegerType())
tweets_lexicon = tweets_unsampled.withColumn('score', udfBodyToRelevant('body'))
tweets_lexicon.persist(MEMORY_AND_DISK)
log('# Completed constructing tweet lexicon')
# Take top and bottom
number_of_tweets_each = 1500
positive_tweets = tweets_lexicon.orderBy(desc('score')).take(number_of_tweets_each)
negative_tweets = tweets_lexicon.orderBy(asc('score')).take(number_of_tweets_each)
# Cut top and bottom via score for more deterministic sampling
min_positive_score = positive_tweets[-1]['score']
min_negative_score = negative_tweets[-1]['score']
expect('min_positive_score', min_positive_score, 7)
expect('min_negative_score', min_negative_score, -5)
positive_tweets = tweets_lexicon.filter('score > {}'.format(min_positive_score - 1)).orderBy(desc('score')).collect()
expect('positive_tweets', len(positive_tweets), 2012)
negative_tweets = tweets_lexicon.filter('score < {}'.format(min_negative_score + 1)).orderBy(asc('score')).collect()
expect('positive_tweets', len(negative_tweets), 1715)
positive_tweet_file = "positive_tweets"
positive_tweets_ids = map(lambda t: t['id'], positive_tweets)
positive_tweet_jsons = final_tweets_pool[final_tweets_pool['id'].isin(positive_tweets_ids)].toJSON().collect()
to_json(positive_tweet_file, positive_tweet_jsons)
to_csv(positive_tweet_file, positive_tweet_jsons)
log('Exporting positive tweets to {}'.format(positive_tweet_file))
log('# Completed exporting positive tweets')
expect('positive_tweet_file', sha(positive_tweet_file), 'cb2f8b691ccf3eae9846c67735f413a49befea28')
expect('positive_tweet_file', sha(positive_tweet_file, ext='csv'), 'd3d43ab4e03fdf106b9191f4e0161cfcde3f040e')
negative_tweet_file = "negative_tweets"
negative_tweet_ids = map(lambda t: t['id'], negative_tweets)
negative_tweet_jsons = final_tweets_pool[final_tweets_pool['id'].isin(negative_tweet_ids)].toJSON().collect()
to_json(negative_tweet_file, negative_tweet_jsons)
to_csv(negative_tweet_file, negative_tweet_jsons)
log('Exporting negative tweets to {}'.format(negative_tweet_file))
log('# Completed exporting negative tweets')
expect('negative_tweet_file', sha(negative_tweet_file), '086c43427078092e538a779b8b06a71341b8da48')
expect('negative_tweet_file', sha(negative_tweet_file, ext='csv'), 'd10a1a95156c28d844e9c4e668d766963c0636a4')
| 39.711039 | 205 | 0.739024 | [
"Apache-2.0"
] | chuajiesheng/twitter-sentiment-analysis | step_2/scripts/sample_subjectivity_tweets.py | 12,231 | Python |
import os
import sys
from setuptools import find_packages, setup
IS_RTD = os.environ.get("READTHEDOCS", None)
version = "0.4.0b14.dev0"
long_description = open(os.path.join(os.path.dirname(__file__), "README.rst")).read()
install_requires = [
"morepath==0.19",
"alembic",
"rulez>=0.1.4,<0.2.0",
"inverter>=0.1.0<0.2.0",
"more.cors",
"celery",
"redis",
"jsl",
"pyyaml>=4.2b1",
"more.jsonschema",
"sqlalchemy",
"sqlalchemy_utils",
"more.signals",
"DateTime",
"transitions",
"jsonpath_ng",
"python-dateutil",
"more.jwtauth",
"more.itsdangerous",
"sqlsoup",
"celery",
"gunicorn",
"itsdangerous",
"pyyaml",
"passlib",
"jsonschema",
"more.transaction",
"zope.sqlalchemy",
"python-dateutil",
"more.cors",
"sqlalchemy_jsonfield",
"sqlsoup",
"celery",
"gunicorn",
"itsdangerous",
"pyyaml",
"passlib",
"jsonschema",
"more.transaction",
"zope.sqlalchemy",
"more.basicauth",
"cryptography",
"elasticsearch>7.0.0,<8.0.0",
"pamela",
"click",
"cookiecutter",
"eventlet",
"wsgigzip",
"psycopg2",
"colander",
"deform",
"more.chameleon",
"more.static",
"RestrictedPython",
"beaker",
"zstandard",
"oauthlib[signedtoken]",
"requests-oauthlib",
]
if IS_RTD is None:
install_requires.append("python-ldap")
setup(
name="morpfw",
version=version,
description="Web framework based on morepath",
long_description=long_description,
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords="",
author="Izhar Firdaus",
author_email="[email protected]",
url="http://github.com/morpframework/morpfw",
license="Apache-2.0",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
"test": [
"nose",
"webtest",
"pytest",
"pytest-html",
"pytest_postgresql",
"pytest_rabbitmq",
"pytest-annotate",
"pytest-cov",
"pika",
"mirakuru",
],
"docs": ["sphinxcontrib-httpdomain", "sphinx-click"],
},
entry_points={
"morepath": ["scan=morpfw"],
"console_scripts": [
"morpfw=morpfw.cli.main:main",
"mfw-runmodule=morpfw.cli:run_module",
"mfw-profilemodule=morpfw.cli:run_module_profile",
],
},
)
| 22.782609 | 94 | 0.579771 | [
"Apache-2.0"
] | morpframework/morpfw | setup.py | 2,620 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UpdateHistoryProperty(Model):
"""An update history of the ImmutabilityPolicy of a blob container.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar update: The ImmutabilityPolicy update type of a blob container,
possible values include: put, lock and extend. Possible values include:
'put', 'lock', 'extend'
:vartype update: str or
~azure.mgmt.storage.v2018_11_01.models.ImmutabilityPolicyUpdateType
:ivar immutability_period_since_creation_in_days: The immutability period
for the blobs in the container since the policy creation, in days.
:vartype immutability_period_since_creation_in_days: int
:ivar timestamp: Returns the date and time the ImmutabilityPolicy was
updated.
:vartype timestamp: datetime
:ivar object_identifier: Returns the Object ID of the user who updated the
ImmutabilityPolicy.
:vartype object_identifier: str
:ivar tenant_id: Returns the Tenant ID that issued the token for the user
who updated the ImmutabilityPolicy.
:vartype tenant_id: str
:ivar upn: Returns the User Principal Name of the user who updated the
ImmutabilityPolicy.
:vartype upn: str
"""
_validation = {
'update': {'readonly': True},
'immutability_period_since_creation_in_days': {'readonly': True},
'timestamp': {'readonly': True},
'object_identifier': {'readonly': True},
'tenant_id': {'readonly': True},
'upn': {'readonly': True},
}
_attribute_map = {
'update': {'key': 'update', 'type': 'str'},
'immutability_period_since_creation_in_days': {'key': 'immutabilityPeriodSinceCreationInDays', 'type': 'int'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'object_identifier': {'key': 'objectIdentifier', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'upn': {'key': 'upn', 'type': 'str'},
}
def __init__(self, **kwargs):
super(UpdateHistoryProperty, self).__init__(**kwargs)
self.update = None
self.immutability_period_since_creation_in_days = None
self.timestamp = None
self.object_identifier = None
self.tenant_id = None
self.upn = None
| 40.652174 | 118 | 0.647772 | [
"MIT"
] | pjquirk/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_11_01/models/update_history_property.py | 2,805 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/resources/paid_organic_search_term_view.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/resources/paid_organic_search_term_view.proto',
package='google.ads.googleads.v6.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v6.resourcesB\036PaidOrganicSearchTermViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V6.Resources\312\002!Google\\Ads\\GoogleAds\\V6\\Resources\352\002%Google::Ads::GoogleAds::V6::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nEgoogle/ads/googleads/v6/resources/paid_organic_search_term_view.proto\x12!google.ads.googleads.v6.resources\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xbd\x02\n\x19PaidOrganicSearchTermView\x12Q\n\rresource_name\x18\x01 \x01(\tB:\xe0\x41\x03\xfa\x41\x34\n2googleads.googleapis.com/PaidOrganicSearchTermView\x12\x1d\n\x0bsearch_term\x18\x03 \x01(\tB\x03\xe0\x41\x03H\x00\x88\x01\x01:\x9d\x01\xea\x41\x99\x01\n2googleads.googleapis.com/PaidOrganicSearchTermView\x12\x63\x63ustomers/{customer_id}/paidOrganicSearchTermViews/{campaign_id}~{ad_group_id}~{base64_search_term}B\x0e\n\x0c_search_termB\x8b\x02\n%com.google.ads.googleads.v6.resourcesB\x1ePaidOrganicSearchTermViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V6.Resources\xca\x02!Google\\Ads\\GoogleAds\\V6\\Resources\xea\x02%Google::Ads::GoogleAds::V6::Resourcesb\x06proto3'
,
dependencies=[google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_PAIDORGANICSEARCHTERMVIEW = _descriptor.Descriptor(
name='PaidOrganicSearchTermView',
full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A4\n2googleads.googleapis.com/PaidOrganicSearchTermView', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='search_term', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView.search_term', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\352A\231\001\n2googleads.googleapis.com/PaidOrganicSearchTermView\022ccustomers/{customer_id}/paidOrganicSearchTermViews/{campaign_id}~{ad_group_id}~{base64_search_term}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_search_term', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView._search_term',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=199,
serialized_end=516,
)
_PAIDORGANICSEARCHTERMVIEW.oneofs_by_name['_search_term'].fields.append(
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term'])
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term'].containing_oneof = _PAIDORGANICSEARCHTERMVIEW.oneofs_by_name['_search_term']
DESCRIPTOR.message_types_by_name['PaidOrganicSearchTermView'] = _PAIDORGANICSEARCHTERMVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PaidOrganicSearchTermView = _reflection.GeneratedProtocolMessageType('PaidOrganicSearchTermView', (_message.Message,), {
'DESCRIPTOR' : _PAIDORGANICSEARCHTERMVIEW,
'__module__' : 'google.ads.googleads.v6.resources.paid_organic_search_term_view_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.PaidOrganicSearchTermView)
})
_sym_db.RegisterMessage(PaidOrganicSearchTermView)
DESCRIPTOR._options = None
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['resource_name']._options = None
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term']._options = None
_PAIDORGANICSEARCHTERMVIEW._options = None
# @@protoc_insertion_point(module_scope)
| 58.404255 | 1,006 | 0.816393 | [
"Apache-2.0"
] | arammaliachi/google-ads-python | google/ads/google_ads/v6/proto/resources/paid_organic_search_term_view_pb2.py | 5,490 | Python |
from itertools import product
from json import dumps
import logging
import nox # noqa
from pathlib import Path # noqa
import sys
# add parent folder to python path so that we can import noxfile_utils.py
# note that you need to "pip install -r noxfile-requiterements.txt" for this file to work.
sys.path.append(str(Path(__file__).parent / "ci_tools"))
from nox_utils import PY27, PY37, PY36, PY35, PY38, PY39, power_session, rm_folder, rm_file, PowerSession # noqa
pkg_name = "genbadge"
gh_org = "smarie"
gh_repo = "python-genbadge"
ENVS = {
PY39: {"coverage": False, "pkg_specs": {"pip": ">19"}},
PY27: {"coverage": False, "pkg_specs": {"pip": ">10"}},
PY35: {"coverage": False, "pkg_specs": {"pip": ">10"}},
PY36: {"coverage": False, "pkg_specs": {"pip": ">19"}},
PY38: {"coverage": False, "pkg_specs": {"pip": ">19"}},
# IMPORTANT: this should be last so that the folder docs/reports is not deleted afterwards
PY37: {"coverage": True, "pkg_specs": {"pip": ">19"}}, # , "pytest-html": "1.9.0"
}
# set the default activated sessions, minimal for CI
nox.options.sessions = ["tests", "flake8"] # , "docs", "gh_pages"
nox.options.reuse_existing_virtualenvs = True # this can be done using -r
# if platform.system() == "Windows": >> always use this for better control
nox.options.default_venv_backend = "conda"
# os.environ["NO_COLOR"] = "True" # nox.options.nocolor = True does not work
# nox.options.verbose = True
nox_logger = logging.getLogger("nox")
# nox_logger.setLevel(logging.INFO) NO !!!! this prevents the "verbose" nox flag to work !
class Folders:
root = Path(__file__).parent
ci_tools = root / "ci_tools"
runlogs = root / Path(nox.options.envdir or ".nox") / "_runlogs"
runlogs.mkdir(parents=True, exist_ok=True)
dist = root / "dist"
site = root / "site"
site_reports = site / "reports"
reports_root = root / "docs" / "reports"
test_reports = reports_root / "junit"
test_xml = test_reports / "junit.xml"
test_html = test_reports / "report.html"
test_badge = test_reports / "junit-badge.svg"
coverage_reports = reports_root / "coverage"
coverage_xml = coverage_reports / "coverage.xml"
coverage_intermediate_file = root / ".coverage"
coverage_badge = coverage_reports / "coverage-badge.svg"
flake8_reports = reports_root / "flake8"
flake8_intermediate_file = root / "flake8stats.txt"
flake8_badge = flake8_reports / "flake8-badge.svg"
@power_session(envs=ENVS, logsdir=Folders.runlogs)
def tests(session: PowerSession, coverage, pkg_specs):
"""Run the test suite, including test reports generation and coverage reports. """
# As soon as this runs, we delete the target site and coverage files to avoid reporting wrong coverage/etc.
rm_folder(Folders.site)
rm_folder(Folders.reports_root)
# delete the .coverage files if any (they are not supposed to be any, but just in case)
rm_file(Folders.coverage_intermediate_file)
rm_file(Folders.root / "coverage.xml")
# CI-only dependencies
# Did we receive a flag through positional arguments ? (nox -s tests -- <flag>)
# install_ci_deps = False
# if len(session.posargs) == 1:
# assert session.posargs[0] == "keyrings.alt"
# install_ci_deps = True
# elif len(session.posargs) > 1:
# raise ValueError("Only a single positional argument is accepted, received: %r" % session.posargs)
# uncomment and edit if you wish to uninstall something without deleting the whole env
# session.run2("pip uninstall pytest-asyncio --yes")
# install all requirements
# session.install_reqs(phase="pip", phase_reqs=("pip",), versions_dct=pkg_specs)
session.install_reqs(setup=True, install=True, tests=True, extras=("all",), versions_dct=pkg_specs)
# install CI-only dependencies
# if install_ci_deps:
# session.install2("keyrings.alt")
# list all (conda list alone does not work correctly on github actions)
# session.run2("conda list")
conda_prefix = Path(session.bin)
if conda_prefix.name == "bin":
conda_prefix = conda_prefix.parent
session.run2("conda list", env={"CONDA_PREFIX": str(conda_prefix), "CONDA_DEFAULT_ENV": session.get_session_id()})
# Fail if the assumed python version is not the actual one
session.run2("python ci_tools/check_python_version.py %s" % session.python)
# install self so that it is recognized by pytest
session.run2("pip install -e . --no-deps")
# check that it can be imported even from a different folder
session.run2(['python', '-c', '"import os; os.chdir(\'./docs/\'); import %s"' % pkg_name])
# finally run all tests
if not coverage:
# simple: pytest only
session.run2("python -m pytest --cache-clear -v %s/tests/" % pkg_name)
else:
# coverage + junit html reports + badge generation
session.install_reqs(phase="coverage", phase_reqs=["coverage", "pytest-html", "requests"],
versions_dct=pkg_specs)
# --coverage + junit html reports
session.run2("coverage run --source {pkg_name} "
"-m pytest --cache-clear --junitxml={test_xml} --html={test_html} -v {pkg_name}/tests/"
"".format(pkg_name=pkg_name, test_xml=Folders.test_xml, test_html=Folders.test_html))
session.run2("coverage report")
session.run2("coverage xml -o {covxml}".format(covxml=Folders.coverage_xml))
session.run2("coverage html -d {dst}".format(dst=Folders.coverage_reports))
# delete this intermediate file, it is not needed anymore
rm_file(Folders.coverage_intermediate_file)
# --generates the badge for the test results and fail build if less than x% tests pass
nox_logger.info("Generating badge for tests coverage")
# Use our own package to generate the badge
session.run2("genbadge tests -i %s -o %s -t 100" % (Folders.test_xml, Folders.test_badge))
session.run2("genbadge coverage -i %s -o %s" % (Folders.coverage_xml, Folders.coverage_badge))
@power_session(python=PY38, logsdir=Folders.runlogs)
def flake8(session: PowerSession):
"""Launch flake8 qualimetry."""
session.install("-r", str(Folders.ci_tools / "flake8-requirements.txt"))
session.run2("pip install -e .[flake8]")
rm_folder(Folders.flake8_reports)
rm_file(Folders.flake8_intermediate_file)
# Options are set in `setup.cfg` file
session.run("flake8", pkg_name, "--exit-zero", "--format=html", "--htmldir", str(Folders.flake8_reports),
"--statistics", "--tee", "--output-file", str(Folders.flake8_intermediate_file))
# generate our badge
session.run2("genbadge flake8 -i %s -o %s" % (Folders.flake8_intermediate_file, Folders.flake8_badge))
rm_file(Folders.flake8_intermediate_file)
@power_session(python=[PY37])
def docs(session: PowerSession):
"""Generates the doc and serves it on a local http server. Pass '-- build' to build statically instead."""
session.install_reqs(phase="docs", phase_reqs=["mkdocs-material", "mkdocs", "pymdown-extensions", "pygments"])
if session.posargs:
# use posargs instead of "serve"
session.run2("mkdocs -f ./docs/mkdocs.yml %s" % " ".join(session.posargs))
else:
session.run2("mkdocs serve -f ./docs/mkdocs.yml")
@power_session(python=[PY37])
def publish(session: PowerSession):
"""Deploy the docs+reports on github pages. Note: this rebuilds the docs"""
session.install_reqs(phase="mkdocs", phase_reqs=["mkdocs-material", "mkdocs", "pymdown-extensions", "pygments"])
# possibly rebuild the docs in a static way (mkdocs serve does not build locally)
session.run2("mkdocs build -f ./docs/mkdocs.yml")
# check that the doc has been generated with coverage
if not Folders.site_reports.exists():
raise ValueError("Test reports have not been built yet. Please run 'nox -s tests-3.7' first")
# publish the docs
session.run2("mkdocs gh-deploy -f ./docs/mkdocs.yml")
# publish the coverage - now in github actions only
# session.install_reqs(phase="codecov", phase_reqs=["codecov", "keyring"])
# # keyring set https://app.codecov.io/gh/<org>/<repo> token
# import keyring # (note that this import is not from the session env but the main nox env)
# codecov_token = keyring.get_password("https://app.codecov.io/gh/<org>/<repo>>", "token")
# # note: do not use --root nor -f ! otherwise "There was an error processing coverage reports"
# session.run2('codecov -t %s -f %s' % (codecov_token, Folders.coverage_xml))
@power_session(python=[PY37])
def release(session: PowerSession):
"""Create a release on github corresponding to the latest tag"""
# Get current tag using setuptools_scm and make sure this is not a dirty/dev one
from setuptools_scm import get_version # (note that this import is not from the session env but the main nox env)
from setuptools_scm.version import guess_next_dev_version
version = []
def my_scheme(version_):
version.append(version_)
return guess_next_dev_version(version_)
current_tag = get_version(".", version_scheme=my_scheme)
# create the package
session.install_reqs(phase="setup.py#dist", phase_reqs=["setuptools_scm"])
rm_folder(Folders.dist)
session.run2("python setup.py sdist bdist_wheel")
if version[0].dirty or not version[0].exact:
raise ValueError("You need to execute this action on a clean tag version with no local changes.")
# Did we receive a token through positional arguments ? (nox -s release -- <token>)
if len(session.posargs) == 1:
# Run from within github actions - no need to publish on pypi
gh_token = session.posargs[0]
publish_on_pypi = False
elif len(session.posargs) == 0:
# Run from local commandline - assume we want to manually publish on PyPi
publish_on_pypi = True
# keyring set https://docs.github.com/en/rest token
import keyring # (note that this import is not from the session env but the main nox env)
gh_token = keyring.get_password("https://docs.github.com/en/rest", "token")
assert len(gh_token) > 0
else:
raise ValueError("Only a single positional arg is allowed for now")
# publish the package on PyPi
if publish_on_pypi:
# keyring set https://upload.pypi.org/legacy/ your-username
# keyring set https://test.pypi.org/legacy/ your-username
session.install_reqs(phase="PyPi", phase_reqs=["twine"])
session.run2("twine upload dist/* -u smarie") # -r testpypi
# create the github release
session.install_reqs(phase="release", phase_reqs=["click", "PyGithub"])
session.run2("python ci_tools/github_release.py -s {gh_token} "
"--repo-slug {gh_org}/{gh_repo} -cf ./docs/changelog.md "
"-d https://{gh_org}.github.io/{gh_repo}/changelog.html {tag}"
"".format(gh_token=gh_token, gh_org=gh_org, gh_repo=gh_repo, tag=current_tag))
@nox.session(python=False)
def gha_list(session):
"""(mandatory arg: <base_session_name>) Prints all sessions available for <base_session_name>, for GithubActions."""
# see https://stackoverflow.com/q/66747359/7262247
# get the desired base session to generate the list for
if len(session.posargs) != 1:
raise ValueError("This session has a mandatory argument: <base_session_name>")
session_func = globals()[session.posargs[0]]
# list all sessions for this base session
try:
session_func.parametrize
except AttributeError:
sessions_list = ["%s-%s" % (session_func.__name__, py) for py in session_func.python]
else:
sessions_list = ["%s-%s(%s)" % (session_func.__name__, py, param)
for py, param in product(session_func.python, session_func.parametrize)]
# print the list so that it can be caught by GHA.
# Note that json.dumps is optional since this is a list of string.
# However it is to remind us that GHA expects a well-formatted json list of strings.
print(dumps(sessions_list))
# if __name__ == '__main__':
# # allow this file to be executable for easy debugging in any IDE
# nox.run(globals())
| 44.498195 | 120 | 0.682865 | [
"BSD-3-Clause"
] | texnofobix/python-genbadge | noxfile.py | 12,326 | Python |
from hotel_app.views import *
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'rooms', RoomAPIView)
router.register(r'employee', EmployeeAPIView)
router.register(r'resident', ResidentAPIView)
router.register(r'booking', BookingRecordAPIView)
router.register(r'cleaning', CleaningScheduleAPIView)
| 33.4 | 53 | 0.826347 | [
"MIT"
] | ErmShadow/ITMO_ICT_WebDevelopment_2020-2021 | students/K33401/Kunal_Shubham/lab3/hotel_project/hotel_app/router.py | 334 | Python |
import os
class FileCredentials:
def __init__(self, credentials_file):
if credentials_file == None:
credentials_file = os.path.expanduser("~") + "/.pingboard"
self.credentials_file = credentials_file
self.client_id = None
self.client_secret = None
def load(self):
try:
credentials = dict(line.strip().split('=') for line in open(self.credentials_file))
self.client_id = credentials['client_id']
self.client_secret = credentials['client_secret']
return True
except Exception as e:
return False
class ArgsCredentials:
def __init__(self, id_key, secret_key, **kwargs):
self.client_id = None
self.client_secret = None
try:
self.client_id = kwargs[id_key]
self.client_secret = kwargs[secret_key]
except KeyError:
pass
def load(self):
return self.client_id != None and self.client_secret != None;
class Credentials:
def __init__(self, **kwargs):
self.chain = [
ArgsCredentials('client_id', 'client_secret',
**kwargs),
ArgsCredentials('PINGBOARD_CLIENT_ID', 'PINGBOARD_CLIENT_SECRET',
**os.environ),
FileCredentials(kwargs.get('credentials_file'))
]
def load(self):
loaded_credentials = None
for credentials in self.chain:
if credentials.load():
loaded_credentials = credentials
break
if not loaded_credentials:
return False
self.client_id = loaded_credentials.client_id
self.client_secret = loaded_credentials.client_secret
return True
| 27.40625 | 95 | 0.600342 | [
"MIT"
] | tsouza/pyngboard | pyngboard/credentials.py | 1,754 | Python |
#!usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'yanqiong'
import random
import secrets
from bisect import bisect_right
from sgqlc.operation import Operation
from pandas.core.internals import BlockManager
from tqsdk.ins_schema import ins_schema, _add_all_frags
RD = random.Random(secrets.randbits(128)) # 初始化随机数引擎,使用随机数作为seed,防止用户同时拉起多个策略,产生同样的 seed
def _generate_uuid(prefix=''):
return f"{prefix + '_' if prefix else ''}{RD.getrandbits(128):032x}"
def _query_for_quote(symbol):
"""
返回请求某个合约的合约信息的 query_pack
调用次函数应该全部都是sdk的代码主动请求合约信息
用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的
"""
symbol_list = symbol if isinstance(symbol, list) else [symbol]
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(instrument_id=symbol_list)
_add_all_frags(query)
return {
"aid": "ins_query",
"query_id": _generate_uuid(prefix='PYSDK_quote_'),
"query": op.__to_graphql__()
}
def _query_for_init():
"""
返回某些类型合约的 query
todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]
"""
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(class_=["FUTURE", "INDEX", "OPTION", "COMBINE", "CONT"],
exchange_id=["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"])
_add_all_frags(query)
return op.__to_graphql__()
night_trading_table = {
"DCE.a": ["21:00:00", "23:00:00"],
"DCE.b": ["21:00:00", "23:00:00"],
"DCE.c": ["21:00:00", "23:00:00"],
"DCE.cs": ["21:00:00", "23:00:00"],
"DCE.m": ["21:00:00", "23:00:00"],
"DCE.y": ["21:00:00", "23:00:00"],
"DCE.p": ["21:00:00", "23:00:00"],
"DCE.l": ["21:00:00", "23:00:00"],
"DCE.v": ["21:00:00", "23:00:00"],
"DCE.pp": ["21:00:00", "23:00:00"],
"DCE.j": ["21:00:00", "23:00:00"],
"DCE.jm": ["21:00:00", "23:00:00"],
"DCE.i": ["21:00:00", "23:00:00"],
"DCE.eg": ["21:00:00", "23:00:00"],
"DCE.eb": ["21:00:00", "23:00:00"],
"DCE.rr": ["21:00:00", "23:00:00"],
"DCE.pg": ["21:00:00", "23:00:00"],
"CZCE.CF": ["21:00:00", "23:00:00"],
"CZCE.CY": ["21:00:00", "23:00:00"],
"CZCE.SA": ["21:00:00", "23:00:00"],
"CZCE.SR": ["21:00:00", "23:00:00"],
"CZCE.TA": ["21:00:00", "23:00:00"],
"CZCE.OI": ["21:00:00", "23:00:00"],
"CZCE.MA": ["21:00:00", "23:00:00"],
"CZCE.FG": ["21:00:00", "23:00:00"],
"CZCE.RM": ["21:00:00", "23:00:00"],
"CZCE.ZC": ["21:00:00", "23:00:00"],
"CZCE.TC": ["21:00:00", "23:00:00"],
"SHFE.rb": ["21:00:00", "23:00:00"],
"SHFE.hc": ["21:00:00", "23:00:00"],
"SHFE.fu": ["21:00:00", "23:00:00"],
"SHFE.bu": ["21:00:00", "23:00:00"],
"SHFE.ru": ["21:00:00", "23:00:00"],
"SHFE.sp": ["21:00:00", "23:00:00"],
"INE.nr": ["21:00:00", "23:00:00"],
"SHFE.cu": ["21:00:00", "25:00:00"],
"SHFE.al": ["21:00:00", "25:00:00"],
"SHFE.zn": ["21:00:00", "25:00:00"],
"SHFE.pb": ["21:00:00", "25:00:00"],
"SHFE.ni": ["21:00:00", "25:00:00"],
"SHFE.sn": ["21:00:00", "25:00:00"],
"SHFE.ss": ["21:00:00", "25:00:00"],
"SHFE.au": ["21:00:00", "26:30:00"],
"SHFE.ag": ["21:00:00", "26:30:00"],
"INE.sc": ["21:00:00", "26:30:00"],
}
def _quotes_add_night(quotes):
"""为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间"""
for symbol in quotes:
product_id = quotes[symbol].get("product_id")
if quotes[symbol].get("trading_time") and product_id:
key = f"{quotes[symbol].get('exchange_id')}.{product_id}"
if key in night_trading_table and (not quotes[symbol]["trading_time"].get("night")):
quotes[symbol]["trading_time"]["night"] = [night_trading_table[key]]
def _bisect_value(a, x, priority="right"):
"""
返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值
a: 必须是已经排序好(升序排列)的 list
bisect_right : Return the index where to insert item x in list a, assuming a is sorted.
"""
assert priority in ['left', 'right']
insert_index = bisect_right(a, x)
if 0 < insert_index < len(a):
left_dis = x - a[insert_index - 1]
right_dis = a[insert_index] - x
if left_dis == right_dis:
mid_index = insert_index - 1 if priority == "left" else insert_index
elif left_dis < right_dis:
mid_index = insert_index - 1
else:
mid_index = insert_index
else:
assert insert_index == 0 or insert_index == len(a)
mid_index = 0 if insert_index == 0 else (len(a) - 1)
return a[mid_index]
class BlockManagerUnconsolidated(BlockManager):
"""mock BlockManager for unconsolidated, 不会因为自动合并同类型的 blocks 而导致 k 线数据不更新"""
def __init__(self, *args, **kwargs):
BlockManager.__init__(self, *args, **kwargs)
self._is_consolidated = False
self._known_consolidated = False
def _consolidate_inplace(self): pass
| 35.120567 | 108 | 0.577948 | [
"Apache-2.0"
] | Al-Wang/tqsdk-python | tqsdk/utils.py | 5,470 | Python |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from qiskit_aqua.algorithms.components.optimizers import Optimizer
from ._nloptimizer import minimize
import logging
try:
import nlopt
except ImportError:
raise ImportWarning('nlopt cannot be imported')
logger = logging.getLogger(__name__)
class ESCH(Optimizer):
"""ESCH (evolutionary algorithm)
NLopt global optimizer, derivative-free
http://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/#esch-evolutionary-algorithm
"""
ESCH_CONFIGURATION = {
'name': 'ESCH',
'description': 'GN_ESCH Optimizer',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'esch_schema',
'type': 'object',
'properties': {
'max_evals': {
'type': 'integer',
'default': 1000
}
},
'additionalProperties': False
},
'support_level': {
'gradient': Optimizer.SupportLevel.ignored,
'bounds': Optimizer.SupportLevel.supported,
'initial_point': Optimizer.SupportLevel.required
},
'options': ['max_evals'],
'optimizer': ['global']
}
def __init__(self, configuration=None):
super().__init__(configuration or self.ESCH_CONFIGURATION.copy())
def init_args(self):
pass
def optimize(self, num_vars, objective_function, gradient_function=None, variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function, variable_bounds, initial_point)
return minimize(nlopt.GN_ESCH, objective_function, variable_bounds, initial_point, **self._options)
| 32.875 | 119 | 0.636248 | [
"Apache-2.0"
] | eugescu/aqua | qiskit_aqua/algorithms/components/optimizers/nlopts/esch.py | 2,367 | Python |
# Generated by Django 3.2.11 on 2022-02-10 16:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("cabins", "0020_auto_20211111_1825"),
("cabins", "0021_booking_is_declined"),
]
operations = []
| 19.142857 | 48 | 0.660448 | [
"MIT"
] | hovedstyret/indok-web | backend/apps/cabins/migrations/0022_merge_20220210_1705.py | 268 | Python |
#! /usr/bin/env python3
"""Unit tests for smartcard.readers.ReaderGroups
This test case can be executed individually, or with all other test cases
thru testsuite_framework.py.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:[email protected]
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import platform
import unittest
from smartcard.System import readergroups
from smartcard.scard import resourceManager
if 'winscard' == resourceManager and \
-1 == platform.platform().find('Windows-7'):
class testcase_readergroups(unittest.TestCase):
"""Test smartcard framework readersgroups."""
pinpadgroup = 'Pinpad$Readers'
biogroup = 'Biometric$Readers'
def testcase_readergroup_add(self):
"""tests groups=groups+[newgroups]"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups = groups + [self.pinpadgroup]
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time and biometric once
groups = groups + [self.biogroup, self.pinpadgroup]
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_iadd(self):
"""test groups+=[newgroups]"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups += [self.pinpadgroup]
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time and biometric once
groups += [self.biogroup, self.pinpadgroup]
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_radd(self):
"""test groups=[newgroups]+groups"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
zgroups = [self.pinpadgroup] + groups
self.assertEqual(groups, groupssnapshot)
self.assertEqual(zgroups, groupssnapshot + [self.pinpadgroup])
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
# add pinpad a tiwce and biometric once
zgroups = \
[self.pinpadgroup, self.biogroup, self.pinpadgroup] + groups
self.assertEqual(groups, groupssnapshot)
self.assertEqual(
zgroups, groupssnapshot + [self.pinpadgroup, self.biogroup])
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
def testcase_readergroup_append(self):
"""test groups.append(newgroups)"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups.append(self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time
groups.append(self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add biometric once
groups.append(self.biogroup)
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_insert(self):
"""test groups.insert(i,newgroups)"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups.insert(0, self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time
groups.insert(1, self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add biometric once
groups.insert(1, self.biogroup)
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def suite():
suite1 = unittest.makeSuite(testcase_readergroups)
return unittest.TestSuite((suite1))
if __name__ == '__main__':
unittest.main()
| 35.197531 | 76 | 0.632936 | [
"BSD-3-Clause"
] | kyletanyag/LL-Smartcard | cacreader/pyscard-2.0.2/smartcard/test/framework/testcase_readergroups.py | 5,702 | Python |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging as log
from io import IOBase
import networkx as nx
import numpy as np
from openvino.tools.mo.ops.elementwise import Mul
from openvino.tools.mo.ops.split import AttributedVariadicSplit
from openvino.tools.mo.front.common.partial_infer.utils import float_array, int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import add_outputs_identity
from openvino.tools.mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, find_next_component, get_name_from_path, \
find_end_of_component, end_of_nnet_tag, read_binary_integer32_token, get_parameters, read_token_value, \
collect_until_token, collect_until_token_and_read, create_edge_attrs, get_args_for_specifier
from openvino.tools.mo.front.kaldi.utils import read_binary_vector
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.ops.const import Const
from openvino.tools.mo.utils.error import Error
from openvino.tools.mo.utils.utils import refer_to_faq_msg
def load_parallel_component(file_descr, graph: Graph, prev_layer_id):
"""
Load ParallelComponent of the Kaldi model.
ParallelComponent contains parallel nested networks.
VariadicSplit is inserted before nested networks.
Outputs of nested networks concatenate with layer Concat.
:param file_descr: descriptor of the model file
:param graph: graph with the topology.
:param prev_layer_id: id of the input layers for parallel component layer
:return: id of the concat layer - last layer of the parallel component layers
"""
nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
log.debug('Model contains parallel component with {} nested networks'.format(nnet_count))
split_points = []
outputs = []
inputs = []
for i in range(nnet_count):
read_token_value(file_descr, b'<NestedNnet>')
collect_until_token(file_descr, b'<Nnet>')
g = Graph()
load_kalid_nnet1_model(g, file_descr, 'Nested_net_{}'.format(i))
# input to nnet1 models is of a rank 1 but we also insert batch_size to 0th axis
# 1st axis contains input_size of the nested subnetwork
# we split input from the main network to subnetworks
input_node = Node(g, 'Parameter')
split_points.append(input_node['shape'][1])
g.remove_node(input_node.id)
mapping = {node: graph.unique_id(node) for node in g.nodes(data=False) if node in graph}
g = nx.relabel_nodes(g, mapping)
for val in mapping.values():
g.node[val]['name'] = val
graph.add_nodes_from(g.nodes(data=True))
graph.add_edges_from(g.edges(data=True))
sorted_nodes = tuple(nx.topological_sort(g))
outputs.append(Node(graph, sorted_nodes[-1]))
inputs.append(Node(graph, sorted_nodes[0]))
split_id = graph.unique_id(prefix='NestedNets/VariadicSplit')
attrs = {'out_ports_count': nnet_count, 'size_splits': split_points, 'axis': 1, 'name': split_id}
variadic_split_node = AttributedVariadicSplit(graph, attrs).create_node()
prev_layer_node = Node(graph, prev_layer_id)
prev_layer_node.add_output_port(0)
graph.create_edge(prev_layer_node, variadic_split_node, 0, 0, create_edge_attrs(prev_layer_id, variadic_split_node.id, prev_layer_id))
concat_id = graph.unique_id(prefix='Concat')
graph.add_node(concat_id, parameters=None, op='concat', kind='op')
concat_node = Node(graph, concat_id)
# Connect each output of variadic_split_node to each subnetwork's inputs in ParallelComponent
# and each subnetwork's output to concat_node
for i, (input_node, output_node) in enumerate(zip(inputs, outputs)):
output_node.add_output_port(0)
concat_node.add_input_port(i)
graph.create_edge(output_node, concat_node, 0, i, create_edge_attrs(output_node.id, concat_id, output_node.id, i, 0))
graph.create_edge(variadic_split_node, input_node, i, 0, create_edge_attrs(variadic_split_node.id, input_node.id, variadic_split_node.id, 0, i))
return concat_id
def load_kaldi_model(graph, nnet_path):
"""
Structure of the file is the following:
magic-number(16896)<Nnet> <Next Layer Name> weights etc.
:param nnet_path:
:return:
"""
nnet_name = None
if isinstance(nnet_path, str):
file_desc = open(nnet_path, "rb")
nnet_name = get_name_from_path(nnet_path)
elif isinstance(nnet_path, IOBase):
file_desc = nnet_path
else:
raise Error('Unsupported type of Kaldi model')
tag = find_next_tag(file_desc)
# start new model / submodel
if tag == '<Nnet>':
load_function = load_kalid_nnet1_model
elif tag == '<TransitionModel>':
while tag != '<Nnet>' and tag != '<Nnet3>':
tag = find_next_tag(file_desc)
if tag == '<Nnet3>':
load_function = load_kaldi_nnet3_model
else:
load_function = load_kalid_nnet2_model
elif tag == '<Nnet3>':
load_function = load_kaldi_nnet3_model
else:
raise Error('Kaldi model should start with <Nnet> or <TransitionModel> tag. ',
refer_to_faq_msg(89))
read_placeholder(file_desc, 1)
return load_function(graph, file_desc, nnet_name)
def load_kalid_nnet1_model(graph, file_descr, name):
prev_layer_id = 'Parameter'
graph.add_node(prev_layer_id, name=prev_layer_id, kind='op', op='Parameter', parameters=None)
# find out output layer, it can be only one due to chain structure of nnet1 model
output_layer = None
while True:
component_type = find_next_component(file_descr)
if component_type == end_of_nnet_tag.lower()[1:-1]:
break
layer_o = read_binary_integer32_token(file_descr)
layer_i = read_binary_integer32_token(file_descr)
if component_type == 'parallelcomponent':
prev_layer_id = load_parallel_component(file_descr, graph, prev_layer_id)
find_end_of_component(file_descr, component_type)
continue
start_index = file_descr.tell()
end_tag, end_index = find_end_of_component(file_descr, component_type)
end_index -= len(end_tag)
layer_id = graph.unique_id(prefix=component_type)
graph.add_node(layer_id,
parameters=get_parameters(file_descr, start_index, end_index),
op=component_type,
kind='op',
layer_i=layer_i,
layer_o=layer_o)
if hasattr(graph, 'op_names_statistic'):
graph.op_names_statistic[component_type] += 1
prev_node = Node(graph, prev_layer_id)
if prev_node.op == 'Parameter':
prev_node['shape'] = int64_array([1, layer_i])
prev_node.add_output_port(0)
Node(graph, layer_id).add_input_port(0)
graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
prev_layer_id = layer_id
output_layer = layer_id
log.debug('{} (type is {}) was loaded'.format(prev_layer_id, component_type))
# Tensor names information corresponding to a node is stored on outgoing edges.
# As output nodes do not have outgoing edges, fake outputs are required. In the following code
# for each output Identity node is added, and tensor name for the output is kept
# on (output, fake output) edge. After Result nodes adding transformation fake outputs
# are deleted from graph.
assert output_layer is not None, "Output layer is not found in graph"
add_outputs_identity(graph, [output_layer], lambda g, output, fake_output: g.create_edge(
Node(g, output), Node(g, fake_output), 0, 0, create_edge_attrs(output, fake_output, output)))
def load_kalid_nnet2_model(graph, file_descr, nnet_name):
input_name = 'Input'
graph.add_node(input_name, name=input_name, kind='op', op='Parameter', parameters=None, shape=None)
prev_layer_id = input_name
all_components = load_components(file_descr, graph)
used_layers = set()
for layer_id in all_components:
prev_node = Node(graph, prev_layer_id)
if prev_node.op == 'Parameter':
parameters = Node(graph, layer_id).parameters
input_dim = read_token_value(parameters, b'<InputDim>')
prev_node['shape'] = int64_array([1, input_dim])
prev_node.add_output_port(0)
Node(graph, layer_id).add_input_port(0)
graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
used_layers.add(prev_layer_id)
prev_layer_id = layer_id
log.debug('{} and {} were connected'.format(prev_layer_id, layer_id))
# Tensor names information corresponding to a node is stored on outgoing edges.
# As output nodes do not have outgoing edges, fake outputs are required. In the following code
# for each output Identity node is added, and tensor name for the output is kept
# on (output, fake output) edge. After Result nodes adding transformation fake outputs
# are deleted from graph.
output_layers = graph.nodes - used_layers
add_outputs_identity(graph, output_layers, lambda g, output, fake_output: g.create_edge(
Node(g, output), Node(g, fake_output), 0, 0, create_edge_attrs(output, fake_output, output)))
def load_kaldi_nnet3_model(graph, file_descr, nnet_name):
file_descr.read(1)
component_layer_map = load_topology_map(file_descr, graph)
# add information for shape calculation for MemoryOffset
# shape calculation for MemoryOffset can't be done through shape of previous layer because
# it is separated in 2 parts to remove cycle from graph
for node in graph.get_op_nodes(**{'op': 'Parameter'}):
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset':
# don't take batch from Parameter, it will be overwritten
# take only second dimension because we have only 2 dimensions
o_n['parameters']['element_size'] = int64_array([1, node.shape[1]])
load_components(file_descr, graph, component_layer_map)
load_priors(file_descr, graph)
def load_priors(file_descr, graph):
try:
collect_until_token(file_descr, b'<Priors>')
except Error:
# just ignore if priors were not found
return
if graph.graph['cmd_params'].counts is not None:
graph.graph['priors'] = read_binary_vector(file_descr)
else:
log.error("Model contains Prior values, if you want to embed them into the generated IR add option --counts=\"\" to command line",
extra={'is_warning': True})
def load_components(file_descr, graph, component_layer_map=None):
num_components = collect_until_token_and_read(file_descr, b'<NumComponents>')
log.debug('Network contains {} components'.format(num_components))
is_nnet3 = False if component_layer_map is None else True
if not is_nnet3:
collect_until_token(file_descr, b'<Components>')
all_components = list()
name = ""
for _ in range(num_components):
if is_nnet3:
name = collect_until_token_and_read(file_descr, b'<ComponentName>', np.string_)
component_type = find_next_component(file_descr)
if component_type == end_of_nnet_tag.lower()[1:-1]:
break
start_index = file_descr.tell()
end_tag, end_index = find_end_of_component(file_descr, component_type)
# read dim info where possible to simplify shape calculation for MemoryOffset
# shape calculation for MemoryOffset can't be done through shape of previous layer because
# it is separated in 2 parts to remove cycle from graph
file_descr.seek(start_index)
dim = 0
dim_words = {b'<Dim>', b'<InputDim>'}
for dim_word in dim_words:
try:
collect_until_token(file_descr, dim_word, size_search_zone=end_index - start_index)
cur_index = file_descr.tell()
if start_index < cur_index < end_index:
dim = read_binary_integer32_token(file_descr)
break
else:
file_descr.seek(start_index)
except Error:
file_descr.seek(start_index)
if is_nnet3:
if name in component_layer_map:
layer_id = component_layer_map[name][0]
for layer in component_layer_map[name]:
node = Node(graph, layer)
node['parameters'] = get_parameters(file_descr, start_index, end_index)
node['op'] = component_type
# Read dim info where possible to simplify shape calculation for MemoryOffset
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset' and dim != 0:
o_n['parameters']['element_size'] = int64_array([1, dim])
else:
raise Error("Something wrong with layer {}".format(name))
else:
layer_id = graph.unique_id(prefix=component_type)
graph.add_node(layer_id,
parameters=get_parameters(file_descr, start_index, end_index),
op=component_type,
kind='op')
if hasattr(graph, 'op_names_statistic'):
graph.op_names_statistic[component_type] += 1
all_components.append(layer_id)
log.debug('{} (type is {}) was loaded'.format(layer_id, component_type))
return all_components
def load_topology_map(file_descr, graph):
not_finished = True
component_layer_map = {}
layer_node_map = {}
while not_finished:
not_finished = read_node(file_descr, graph, component_layer_map, layer_node_map)
return component_layer_map
def read_node(file_descr, graph, component_layer_map, layer_node_map):
s = file_descr.readline()
if s == b'\n':
return False
tokens = s.split(b' ')
if tokens[0] == b'input-node':
in_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
in_name = str(in_name).strip('b').replace('\'', "")
in_shape = mo_array([1, s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0]], dtype=np.int)
if in_name not in layer_node_map:
graph.add_node(in_name, name=in_name, kind='op', op='Parameter', parameters=None, shape=in_shape)
layer_node_map[in_name] = in_name
else:
Node(graph, in_name)['op'] = 'Parameter'
Node(graph, in_name)['shape'] = in_shape
elif tokens[0] == b'component-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
component_name = s[s.find(b'component=') + len(b'component='):].split(b' ')[0]
if layer_name not in layer_node_map:
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters=None,
op=None,
kind='op')
layer_node_map[layer_name] = node_name
else:
node_name = layer_node_map[layer_name]
if component_name in component_layer_map:
component_layer_map[component_name].append(node_name)
else:
component_layer_map[component_name] = [node_name]
# parse input
in_node_id = parse_input_for_node(s[s.find(b'input=') + 6:], graph, layer_node_map)
# don't create cyclic edges node to itself to avoid removing later
if in_node_id != node_name:
out_port = len(Node(graph, in_node_id).out_nodes())
in_port = len(Node(graph, node_name).in_nodes())
Node(graph, node_name).add_input_port(in_port)
Node(graph, in_node_id).add_output_port(out_port, skip_if_exist=True)
graph.add_edge(in_node_id, node_name, **create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port))
elif tokens[0] == b'output-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters=None,
op='Identity',
kind='op')
out_name = graph.unique_id(prefix=node_name + "_out")
graph.add_node(out_name,
parameters=None,
op='Result',
kind='op')
Node(graph, node_name).add_input_port(0)
Node(graph, node_name).add_output_port(0)
Node(graph, out_name).add_input_port(0)
graph.add_edge(node_name, out_name, **create_edge_attrs(node_name, out_name, node_name))
# parse input
in_node_id = parse_input_for_node(s[s.find(b'input=') + len(b'input='):], graph, layer_node_map)
out_port = len(Node(graph, in_node_id).out_nodes())
Node(graph, in_node_id).add_output_port(out_port)
graph.create_edge(Node(graph, in_node_id), Node(graph, node_name), out_port, 0, create_edge_attrs(in_node_id, node_name, in_node_id, 0, out_port))
objective_type = s[s.find(b'objective=') + 10:].split(b' ')[0].split(b'\n')[0]
if objective_type != b'linear':
raise Error("Unsupported objective-type for output {}".format(node_name))
elif tokens[0] == b'dim-range-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
offset = int(s[s.find(b'dim-offset=') + len(b'dim-offset='):].split(b' ')[0])
dim = int(s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0])
if layer_name in layer_node_map:
node_name = layer_node_map[layer_name]
node = Node(graph, node_name)
node['parameters'] = {'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])}
node['op'] = 'Crop'
else:
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters={'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])},
op='Crop',
kind='op')
layer_node_map[layer_name] = node_name
node = Node(graph, node_name)
in_node_id = parse_input_for_node(s[s.find(b'input-node=') + len(b'input-node='):], graph, layer_node_map)
out_port = len(Node(graph, in_node_id).out_nodes())
in_port = len(Node(graph, node_name).in_nodes())
node.add_input_port(in_port)
Node(graph, in_node_id).add_output_port(out_port)
graph.create_edge(Node(graph, in_node_id), node, out_port, in_port, create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port))
# read dim info where possible to simplify shape calculation for MemoryOffset
# shape calculation for MemoryOffset can't be done through shape of previous layer because
# it is separated in 2 parts to remove cycle from graph
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset':
o_n['parameters']['element_size'] = int64_array([1, dim])
else:
raise Error("Unsupported node specifier {}".format(tokens[0]))
return True
def parse_input_for_node(string, graph, component_layer_map):
return parse_specifier(string, graph, component_layer_map)
def parse_specifier(string, graph, layer_node_map):
pos = string.find(b'(')
if pos == -1:
# node name
input_name = str(string.split(b' ')[0]).strip('b').replace("\'", '').replace('\\n', '')
if input_name not in layer_node_map:
node_name = graph.unique_id(prefix=input_name)
graph.add_node(node_name, parameters=[], op="", kind='op')
layer_node_map[input_name] = node_name
else:
node_name = layer_node_map[input_name]
return node_name
spec = string[:pos]
args = get_args_for_specifier(string[pos:])
if spec == b'Append':
nodes = []
for i in range(len(args)):
nodes.append(parse_specifier(args[i], graph, layer_node_map))
layer_name = 'Append_'
for node in nodes:
layer_name = layer_name + node + "_"
if layer_name not in layer_node_map:
concat_name = graph.unique_id(prefix=layer_name)
graph.add_node(concat_name,
parameters=None,
op='concat',
kind='op')
layer_node_map[layer_name] = concat_name
i = 0
Node(graph, concat_name).add_sequence_of_ports('in', range(len(nodes)))
for node in nodes:
out_port = len(Node(graph, node).out_nodes())
Node(graph, node).add_output_port(out_port)
graph.create_edge(Node(graph, node), Node(graph, concat_name), out_port, i, create_edge_attrs(node, concat_name, node, i, out_port))
i = i + 1
else:
concat_name = layer_node_map[layer_name]
return concat_name
elif spec == b'Offset':
node = parse_specifier(args[0], graph, layer_node_map)
t = int(args[1])
if len(args) > 2:
raise Error("ModelOptimizer supports only 2 arguments for Offset")
layer_name = 'Offset_' + node + '_'
if t < 0:
layer_name = layer_name + '_' + str(-t)
else:
layer_name = layer_name + str(t)
if layer_name not in layer_node_map:
memory_name = graph.unique_id(prefix=layer_name)
layer_node_map[layer_name] = memory_name
memory_name_2 = memory_name + '_out'
graph.add_node(memory_name,
parameters=dict(t=t, pair_name=memory_name_2, has_default=False),
op='MemoryOffset',
kind='op')
out_port = len(Node(graph, node).out_nodes())
in_port = len(Node(graph, memory_name).in_nodes())
Node(graph, memory_name).add_input_port(in_port)
Node(graph, node).add_output_port(out_port, skip_if_exist=True)
graph.create_edge(Node(graph, node), Node(graph, memory_name), out_port, in_port, create_edge_attrs(node, memory_name, node, in_port, out_port))
else:
memory_name = layer_node_map[layer_name]
return memory_name
elif spec == b'Sum':
nodes = []
for i in range(len(args)):
nodes.append(parse_specifier(args[i], graph, layer_node_map))
layer_name = 'Sum_'
for node in nodes:
layer_name = layer_name + node + "_"
if layer_name not in layer_node_map:
sum_name = graph.unique_id(prefix=layer_name)
graph.add_node(sum_name, parameters=None, op='Add', kind='op')
layer_node_map[layer_name] = sum_name
else:
sum_name = layer_node_map[layer_name]
for i, node in enumerate(nodes):
out_port = len(Node(graph, node).out_nodes())
Node(graph, node).add_output_port(out_port, skip_if_exist=True)
Node(graph, sum_name).add_input_port(i)
graph.add_edge(node, sum_name, **create_edge_attrs(node, sum_name, node, i))
return sum_name
elif spec == b'IfDefined':
node_id = parse_specifier(args[0], graph, layer_node_map)
node = Node(graph, node_id)
if node.op == 'MemoryOffset':
node['parameters']['has_default'] = True
return node_id
elif spec == b'ReplaceIndex':
node = parse_specifier(args[0], graph, layer_node_map)
return node
elif spec == b'Scale':
node_name = parse_specifier(args[1], graph, layer_node_map)
scale_value = float(args[0])
layer_name = '{}/Mul/{}'.format(node_name, scale_value)
if layer_name not in layer_node_map:
scale_name = graph.unique_id(prefix=layer_name)
scale_node = Mul(graph, {'name': scale_name}).create_node()
layer_node_map[layer_name] = scale_name
scale_const_name = 'Const_{}'.format(scale_value)
const_node = Const(graph, {'name': scale_const_name, 'value': float_array([scale_value])}).create_node()
node = Node(graph, node_name)
graph.create_edge(const_node, scale_node, 0, 0, create_edge_attrs(const_node.id, scale_node.id, const_node.id))
out_port = len(node.out_nodes())
graph.create_edge(node, scale_node, out_port, 1, create_edge_attrs(node_name, scale_node.id, node_name, 1, out_port))
else:
scale_name = layer_node_map[layer_name]
return scale_name
| 45.217469 | 156 | 0.643277 | [
"Apache-2.0"
] | 3Demonica/openvino | tools/mo/openvino/tools/mo/front/kaldi/loader/loader.py | 25,367 | Python |
#!/usr/bin/env python
"""
SHUTDOWN.PY
Shutdown Plugin
(C) 2015, rGunti
"""
import dot3k.lcd as lcd
import dot3k.backlight as backlight
import time, datetime, copy, math, psutil
import sys
import os
from dot3k.menu import Menu, MenuOption
class Shutdown(MenuOption):
def __init__(self):
self.last = self.millis()
MenuOption.__init__(self)
def redraw(self, menu):
lcd.clear()
lcd.set_cursor_position(3,1)
lcd.write("Bye (^_^)/")
for x in reversed(range(127)):
backlight.rgb(0, x * 2, 0)
lcd.clear()
os.system("halt")
sys.exit(0)
class Reboot(MenuOption):
def __init__(self):
self.last = self.millis()
MenuOption.__init__(self)
def redraw(self, menu):
lcd.clear()
lcd.set_cursor_position(3,1)
lcd.write("Bye (^_^)/")
for x in reversed(range(127)):
backlight.rgb(0, x * 2, 0)
lcd.clear()
os.system("reboot")
sys.exit(0)
class QuitScript(MenuOption):
def __init__(self):
self.last = self.millis()
MenuOption.__init__(self)
def redraw(self, menu):
lcd.clear()
lcd.set_cursor_position(3,1)
lcd.write("Bye (^_^)/")
for x in reversed(range(127)):
backlight.rgb(0, x * 2, 0)
lcd.clear()
sys.exit(0) | 20.189655 | 41 | 0.672075 | [
"MIT"
] | rGunti/Yuki-Chan-Music-Player | display/plugins/Shutdown.py | 1,171 | Python |
from collections import Iterable
from itertools import combinations
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
num_procs = 24
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type):
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
delta, x_delta = decl_consts(menv, delta_name, real_type)
transm_time, x_transm_time = decl_consts(menv, "tot_transm_time",
real_type)
curr2next = {delta: x_delta, transm_time: x_transm_time}
mgr = TokenManager("mgr", menv, enc, delta)
stations = [Station("st{}".format(i), menv, enc, mgr, delta)
for i in range(num_procs)]
for s, x_s in mgr.symb2next.items():
curr2next[s] = x_s
for comp in stations:
for s, x_s in comp.symb2next.items():
assert s not in curr2next.keys()
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
# init: tot_transm_time = 0
init = msat_make_equal(menv, transm_time, zero)
# invar: delta >= 0
init = msat_make_and(menv, init, msat_make_geq(menv, delta, zero))
trans = msat_make_geq(menv, x_delta, zero)
# only 1 station moves
for s0, s1 in combinations(stations, 2):
trans = msat_make_and(menv, trans,
msat_make_or(menv, s0.stutter, s1.stutter))
# sync stations and mgr
st_acquire = stations[0].acquire
for st in stations[1:]:
st_acquire = msat_make_or(menv, st_acquire, st.acquire)
trans = msat_make_and(menv, trans,
msat_make_iff(menv, mgr.acquire, st_acquire))
st_release = stations[0].release
for st in stations[1:]:
st_release = msat_make_or(menv, st_release, st.release)
trans = msat_make_and(menv, trans,
msat_make_iff(menv, mgr.release, st_release))
# (mgr.counting & mgr.idle') -> total_transm_time' = total_transm_time + mgr.c
lhs = msat_make_and(menv, mgr.counting, mgr.x_idle)
rhs = msat_make_equal(menv, x_transm_time,
msat_make_plus(menv, transm_time, mgr.c))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# !(mgr.counting & mgr.idle') -> total_transm_time' = total_transm_time
lhs = msat_make_not(menv, lhs)
rhs = msat_make_equal(menv, x_transm_time, transm_time)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
init = msat_make_and(menv, init, mgr.init)
trans = msat_make_and(menv, trans, mgr.trans)
for s in stations:
init = msat_make_and(menv, init, s.init)
trans = msat_make_and(menv, trans, s.trans)
# (G F (mgr.counting & mgr.idle')) -> G F total_transm_time < 10
lhs = enc.make_G(enc.make_F(msat_make_and(menv, mgr.counting,
enc.make_X(mgr.idle))))
rhs = msat_make_lt(menv, transm_time, msat_make_number(menv, "10"))
rhs = enc.make_G(enc.make_F(rhs))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
"""Synchronous component"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(self.menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(self._symb(c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(self.menv, b_vars[idx][0]),
msat_make_not(self.menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(self.menv, pred, it[0])
x_pred = msat_make_and(self.menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
class TokenManager(Module):
"""TokenManager module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder, delta):
super().__init__(name, menv, enc)
real_type = msat_get_rational_type(menv)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt_symbs, evts, x_evts = self._enum("evt", 3)
c, x_c = self._symb("c", real_type)
timeout, x_timeout = self._symb("timeout", real_type)
self.timeout = timeout
self.x_timeout = x_timeout
self.c = c
self.idle = loc
self.counting = msat_make_not(menv, loc)
self.x_idle = x_loc
self.x_counting = msat_make_not(menv, x_loc)
self.acquire = evts[0]
self.release = evts[1]
self.stutter = evts[2]
self.symb2next = {loc: x_loc, c: x_c, timeout: x_timeout}
for s, x_s in evt_symbs:
assert s not in self.symb2next
self.symb2next[s] = x_s
zero = msat_make_number(menv, "0")
# bound evt
bound_evt = evts[0]
x_bound_evt = x_evts[0]
for evt, x_evt in zip(evts[1:], x_evts[1:]):
bound_evt = msat_make_or(menv, bound_evt, evt)
x_bound_evt = msat_make_or(menv, x_bound_evt, x_evt)
self.init = bound_evt
self.trans = x_bound_evt
# idle & c = 0 & timeout = 0
self.init = msat_make_and(
menv,
msat_make_and(menv, self.init, self.idle),
msat_make_and(menv,
msat_make_equal(menv, c, zero),
msat_make_equal(menv, timeout, zero)))
# invar: counting -> c <= timeout
rhs = msat_make_leq(menv, c, timeout)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, self.counting, rhs))
rhs = msat_make_leq(menv, x_c, x_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, self.x_counting, rhs))
# (delta > 0 | stutter) -> c' = c + delta & l' = l & timeout' = timeout
lhs = msat_make_or(menv, self.stutter,
msat_make_gt(menv, delta, zero))
rhs = msat_make_and(
menv,
msat_make_and(menv, msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))),
msat_make_equal(menv, x_timeout, timeout))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_or(menv, self.acquire, self.release))
# (idle) -> (acquire & counting' & c' = 0)
lhs = msat_make_and(menv, disc_t, self.idle)
rhs = msat_make_and(menv, self.acquire,
msat_make_and(menv, self.x_counting,
msat_make_equal(menv, x_c, zero)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (counting) -> (release & idle' & c' = 0 & timeout' = 0)
lhs = msat_make_and(menv, disc_t, self.counting)
rhs = msat_make_and(
menv,
msat_make_and(menv, self.x_idle, self.release),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_timeout, zero)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Station(Module):
"""Station module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder, mgr, delta):
super().__init__(name, menv, enc)
real_type = msat_get_rational_type(menv)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt_symbs, evts, x_evts = self._enum("evt", 3)
req_time, x_req_time = self._symb("req_time", real_type)
self.idle = loc
self.transm = msat_make_not(menv, loc)
self.x_idle = x_loc
self.x_transm = msat_make_not(menv, x_loc)
self.acquire = evts[0]
self.release = evts[1]
self.stutter = evts[2]
self.symb2next = {loc: x_loc, req_time: x_req_time}
for s, x_s in evt_symbs:
assert s not in self.symb2next
self.symb2next[s] = x_s
zero = msat_make_number(menv, "0")
# bound evt
bound_evt = evts[0]
x_bound_evt = x_evts[0]
for evt, x_evt in zip(evts[1:], x_evts[1:]):
bound_evt = msat_make_or(menv, bound_evt, evt)
x_bound_evt = msat_make_or(menv, x_bound_evt, x_evt)
self.init = bound_evt
self.trans = x_bound_evt
# idle
self.init = msat_make_and(menv, self.init, self.idle)
# invar: req_time > 0
self.init = msat_make_and(menv, self.init,
msat_make_gt(menv, req_time, zero))
self.trans = msat_make_and(menv, self.trans,
msat_make_gt(menv, x_req_time, zero))
# (delta > 0 | stutter) -> l' = l & req_time' = req_time
lhs = msat_make_or(menv, self.stutter,
msat_make_gt(menv, delta, zero))
rhs = msat_make_and(
menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_req_time, req_time))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_or(menv, self.acquire, self.release))
# (idle) -> (acquire & transm' & mgr.timeout' = req_time & req_time' = req_time)
lhs = msat_make_and(menv, disc_t, self.idle)
rhs = msat_make_and(
menv,
msat_make_and(menv, self.acquire, self.x_transm),
msat_make_and(menv,
msat_make_equal(menv, mgr.x_timeout, req_time),
msat_make_equal(menv, x_req_time, req_time)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (transm) -> (release & mgr.c > 0 & idle')
lhs = msat_make_and(menv, disc_t, self.transm)
rhs = msat_make_and(
menv, self.release,
msat_make_and(menv, msat_make_gt(menv, mgr.c, zero), self.x_idle))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
| 39.578947 | 88 | 0.591534 | [
"MIT"
] | EnricoMagnago/F3 | benchmarks/ltl_timed_transition_system/token_ring/f3/token_ring_0024.py | 13,536 | Python |
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
From: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
| 47.64 | 123 | 0.592779 | [
"Apache-2.0"
] | MariusDgr/AudioMining | src/utils/console_functions.py | 1,193 | Python |
"""Julia set generator without optional PIL-based image drawing"""
import time
#from memory_profiler import profile
# area of complex space to investigate
x1, x2, y1, y2 = -1.8, 1.8, -1.8, 1.8
c_real, c_imag = -0.62772, -.42193
@profile
def calculate_z_serial_purepython(maxiter, zs, cs):
"""Calculate output list using Julia update rule"""
with profile.timestamp("create_output_list"):
output = [0] * len(zs)
time.sleep(1)
with profile.timestamp("create_range_of_zs"):
iterations = range(len(zs))
with profile.timestamp("calculate_output"):
for i in iterations:
n = 0
z = zs[i]
c = cs[i]
while n < maxiter and abs(z) < 2:
z = z * z + c
n += 1
output[i] = n
return output
@profile
def calc_pure_python(draw_output, desired_width, max_iterations):
"""Create a list of complex co-ordinates (zs) and complex parameters (cs), build Julia set and display"""
x_step = (float(x2 - x1) / float(desired_width))
y_step = (float(y1 - y2) / float(desired_width))
x = []
y = []
ycoord = y2
while ycoord > y1:
y.append(ycoord)
ycoord += y_step
xcoord = x1
while xcoord < x2:
x.append(xcoord)
xcoord += x_step
# set width and height to the generated pixel counts, rather than the
# pre-rounding desired width and height
# build a list of co-ordinates and the initial condition for each cell.
# Note that our initial condition is a constant and could easily be removed,
# we use it to simulate a real-world scenario with several inputs to our
# function
zs = []
cs = []
for ycoord in y:
for xcoord in x:
zs.append(complex(xcoord, ycoord))
cs.append(complex(c_real, c_imag))
print "Length of x:", len(x)
print "Total elements:", len(zs)
start_time = time.time()
output = calculate_z_serial_purepython(max_iterations, zs, cs)
end_time = time.time()
secs = end_time - start_time
print calculate_z_serial_purepython.func_name + " took", secs, "seconds"
# this sum is expected for 1000^2 grid with 300 iterations
assert sum(output) == 33219980
# Calculate the Julia set using a pure Python solution with
# reasonable defaults for a laptop
# set draw_output to True to use PIL to draw an image
calc_pure_python(draw_output=False, desired_width=1000, max_iterations=300)
#calc_pure_python(draw_output=False, desired_width=10, max_iterations=300)
| 34.4 | 109 | 0.642248 | [
"MIT"
] | dkdldbdbdosk/High_Performance_Python | codes/01_profiling/memory_profiler/julia1_memoryprofiler2.py | 2,580 | Python |
import os
import csv
# File path
election_dataCSV = os.path.join('.', 'election_data.csv')
# The total number of votes cast
# A complete list of candidates who received votes
# The percentage of votes each candidate won
# The total number of votes each candidate won
# The winner of the election based on popular vote.
# Declaring my variables
total_votes = 0
khan_votes = 0
correy_votes = 0
li_votes = 0
otooley_votes = 0
# percent_votes = 0
# total_votes_candidate = 0
# winner = 0
# Open file as read
with open ('election_data.csv','r') as csvfile:
# Identifying CSV file with delimiter set
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
# firstRow = next(csvreader)
# total_votes += 1
# previous_row = int(firstRow[0])
# Add rows to list
for row in csvreader:
#Adding total number of votes cast
total_votes += 1
#Candidates that received votes
if row[2] == "Khan":
khan_votes += 1
elif row[2] == "Correy":
correy_votes += 1
elif row[2] == "Li":
li_votes += 1
elif row[2] == "O'Tooley":
otooley_votes +=1
# Create a list of the candidates
candidates_list = ["Khan", "Correy", "Li", "O'Tooley"]
votes = [khan_votes, correy_votes, li_votes, otooley_votes]
# Pair candidates and votes together
dict_candidates_and_votes = dict(zip(candidates_list,votes))
# Find the winner by using the max function
key = max(dict_candidates_and_votes, key = dict_candidates_and_votes.get)
# Calculating the percentage of votes per candidate
khan_percentage = (khan_votes/total_votes) *100
correy_percentage = (correy_votes/total_votes) *100
li_percentage = (li_votes/total_votes) *100
otooley_percentage = (otooley_votes/total_votes) *100
# Print conclusion
print(f"Election Results")
print(f"----------------------------")
print(f"Total Votes: {total_votes}")
print(f"----------------------------")
print(f"Khan: {khan_percentage:.3f}% ({khan_votes})")
print(f"Correy: {correy_percentage:.3f}% ({correy_votes})")
print(f"Li: {li_percentage:.3f}% ({li_votes})")
print(f"O'Tooley: {otooley_percentage:.3f}% ({otooley_votes})")
print(f"----------------------------")
print(f"Winner: {key}")
print(f"----------------------------")
# Export results into txt file
file = open('election_output.txt','w')
file.write("Election Results: Total Votes - 1048575, Khan - 63.094% (661583), Correy - 19.936% (209046), Li: - 13.958% (146360), O'Tooley - 3.012% (31586), Winner - Khan")
file.close
| 30.625 | 171 | 0.621521 | [
"MIT"
] | dorispira/Python | PyPoll/main.py | 2,695 | Python |
'''
A collection of functions to perform portfolio analysis.
Max Gosselin, 2019
'''
import numpy as np
import pandas as pd
from scipy import optimize
def portfolio_metrics(weights, avg_xs_returns, covariance_matrix):
''' Compute basic portfolio metrics: return, stdv, sharpe ratio '''
portfolio_return = np.sum(weights * avg_xs_returns)
portfolio_stdv = np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
portfolio_sharpe = portfolio_return / portfolio_stdv
tickers = covariance_matrix.columns
metrics = {
'return': portfolio_return,
'stdv': portfolio_stdv,
'sharpe': portfolio_sharpe,
'weights': weights
}
metrics.update(dict([(ticker, weight) for ticker, weight in zip(tickers, weights)]).items())
return metrics
def simulate_portfolios(iters, xs_stats, covariance_matrix):
''' What we want here is to randomly generate portfolios that will sit
inside the efficiency frontier for illustrative purposes '''
# Set up an empty array to store our generated portfolios
simulations = []
while iters > 1:
weights = np.random.random(len(xs_stats.columns))
weights /= np.sum(weights)
simulations.append(portfolio_metrics(weights, xs_stats.loc['Avg'], covariance_matrix))
iters -= 1
return simulations
def solve_minvar(xs_avg, covariance_matrix):
''' Solve for the weights of the minimum variance portfolio
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
Returns the weights and the jacobian used to generate the solution.
'''
def __minvar(weights, xs_avg, covariance_matrix):
''' Anonymous function to compute stdv '''
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__minvar, np.zeros(p_size), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def solve_maxsharpe(xs_avg, covariance_matrix):
''' Solve for the weights of the maximum Sharpe ratio portfolio
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
Returns the weights and the jacobian used to generate the solution.
'''
def __max_by_min_sharpe(weights, xs_avg, covariance_matrix):
''' Anonymous function to compute sharpe ratio, note that since scipy only minimizes we go negative. '''
pm = portfolio_metrics(weights, xs_avg, covariance_matrix)
return -pm['return'] / pm['stdv']
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__max_by_min_sharpe, ((1/p_size) * np.ones(p_size)), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def solve_for_target_return(xs_avg, covariance_matrix, target):
''' Solve for the weights of the minimum variance portfolio which has
a specific targeted return.
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
portfolio return = target return,
Returns the weights and the jacobian used to generate the solution.
'''
def __minvar(weights, xs_avg, covariance_matrix):
''' Anonymous function to compute stdv '''
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
def __match_target(weights):
''' Anonymous function to check equality with the target return '''
return np.sum(weights * xs_avg)
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1},
{'type': 'eq', 'fun': lambda x: __match_target(x) - target},
]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__minvar, ((1/p_size) * np.ones(p_size)), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def generate_efficient_frontier(targets, xs_avg, covariance_matrix):
portfolios = []
for target in targets:
p_weights = solve_for_target_return(xs_avg, covariance_matrix, target)
portfolios.append(portfolio_metrics(p_weights['x'], xs_avg, covariance_matrix))
return portfolios
| 32.682432 | 112 | 0.647509 | [
"MIT",
"Unlicense"
] | MaxGosselin/portfolio_optimizer | portfolio_functions.py | 4,837 | Python |
from mmcv.utils import Registry, build_from_cfg
from .check_argument import (equal_len, is_2dlist, is_3dlist, is_ndarray_list,
is_none_or_type, is_type_list, valid_boundary)
from .collect_env import collect_env
from .img_util import drop_orientation
from .lmdb_util import lmdb_converter
from .logger import get_root_logger
__all__ = [
'Registry', 'build_from_cfg', 'get_root_logger', 'collect_env',
'is_3dlist', 'is_ndarray_list', 'is_type_list', 'is_none_or_type',
'equal_len', 'is_2dlist', 'valid_boundary', 'lmdb_converter',
'drop_orientation'
]
| 37.3125 | 78 | 0.747069 | [
"Apache-2.0"
] | Darrenonly/mmocr | mmocr/utils/__init__.py | 597 | Python |
## @example pyfast_and_pyside2_custom_window.py
# This example demonstrates how to use FAST in an existing PySide2 application.
#
# @m_class{m-block m-warning} @par PySide2 Qt Version
# @parblock
# For this example you <b>must</b> use the same Qt version of PySide2 as used in FAST (5.14.0)
# Do this with: <b>pip install pyside2==5.14.0</b>
# @endparblock
#
# @image html images/examples/python/pyfast_and_pyside_custom_window.jpg width=350px;
from PySide2.QtWidgets import *
from PySide2.QtOpenGL import QGLWidget
from PySide2.QtCore import Slot
import PySide2.QtSvg # Must import this before fast due to conflicting symbols
from shiboken2 import wrapInstance
import fast
import threading
import sys
#fast.Reporter.setGlobalReportMethod(fast.Reporter.COUT)
# Create a simple window widget with pyside2
class Window(QWidget):
def __init__(self):
super(Window, self).__init__()
self.setWindowTitle('pyFAST + PySide2')
# Create button
self.button = QPushButton("Restart FAST pipeline")
# Create FAST view
self.view = fast.View()
self.installEventFilter(wrapInstance(int(self.view.asQGLWidget()), QGLWidget))
self.view.set2DMode()
# Create layout and add widgets
layout = QVBoxLayout()
layout.addWidget(wrapInstance(int(self.view.asQGLWidget()), QGLWidget))
layout.addWidget(self.button)
self.setLayout(layout)
# Connect button click event
self.button.clicked.connect(self.restartPipeline)
self.resize(512, 512)
@Slot()
def restartPipeline(self):
# Create FAST computation thread
# This is needed to run computations smoothly in the background
# The computation thread must live in the object to avoid being destroyed when this function is done.
self.computationThread = fast.ComputationThread.create()
self.computationThread.addView(self.view)
# Setup a FAST pipeline
streamer = fast.ImageFileStreamer \
.create(fast.Config.getTestDataPath() + '/US/Heart/ApicalFourChamber/US-2D_#.mhd')
renderer = fast.ImageRenderer.create() \
.connect(streamer)
self.view.removeAllRenderers()
self.view.addRenderer(renderer)
self.view.reinitialize()
self.computationThread.start()
if __name__ == '__main__':
# Create the Qt Application
app = QApplication(sys.argv)
# Create and show the window
window = Window()
window.show()
# Run the main Qt loop
sys.exit(app.exec_()) | 32.897436 | 109 | 0.693687 | [
"BSD-2-Clause"
] | andreped/FAST | source/FAST/Examples/Python/pyfast_and_pyside2_custom_window.py | 2,566 | Python |
# Project Quex (http://quex.sourceforge.net); License: MIT;
# (C) 2005-2020 Frank-Rene Schaefer;
#_______________________________________________________________________________
from quex.input.setup import NotificationDB
from quex.input.regular_expression.pattern import Pattern_Prep
import quex.input.regular_expression.core as regular_expression
from quex.input.code.base import SourceRef, \
SourceRef_DEFAULT, \
SourceRefObject
from quex.engine.state_machine.core import DFA
import quex.engine.state_machine.construction.sequentialize as sequentialize
import quex.engine.state_machine.construction.repeat as repeat
import quex.engine.state_machine.algebra.difference as difference
import quex.engine.state_machine.algebra.intersection as intersection
import quex.engine.state_machine.algorithm.beautifier as beautifier
import quex.engine.state_machine.check.swallow as swallow
import quex.engine.state_machine.check.outrun as outrun
import quex.engine.state_machine.check.identity as identity
import quex.engine.state_machine.check.tail as tail
from quex.engine.misc.tools import typed
from quex.engine.misc.interval_handling import NumberSet
from quex.engine.counter import IndentationCount_Pre, \
cc_type_name_db, \
cc_type_db
from quex.engine.counter_builder import CountActionMap_Builder
import quex.engine.misc.error as error
import quex.engine.misc.error_check as error_check
from quex.engine.misc.file_in import check, \
check_or_die, \
skip_whitespace, \
read_identifier, \
read_integer
from quex.constants import E_CharacterCountType
from quex.blackboard import setup as Setup
def parse_CountActionMap(fh):
return _base_parse(fh, CountActionMapFromParser_Builder(fh))
def parse_IndentationSetup(fh):
return _base_parse(fh, IndentationSetup_Builder(fh))
def _base_parse(fh, builder, IndentationSetupF=False):
"""Parses pattern definitions of the form:
[ \t] => grid 4;
[:intersection([:alpha:], [\X064-\X066]):] => space 1;
In other words the right hand side *must* be a character set.
ADAPTS: result to contain parsing information.
"""
# NOTE: Catching of EOF happens in caller: parse_section(...)
#
while 1 + 1 == 2:
skip_whitespace(fh)
if check(fh, ">"):
break
# A regular expression state machine
pattern, identifier, sr = _parse_definition_head(fh, builder.identifier_list)
if pattern is None and not builder.keyword_else_f:
error.log("Keyword '\\else' cannot be used in indentation setup.", fh)
# '_parse_definition_head()' ensures that only identifiers mentioned in
# 'result' are accepted.
if builder.requires_count():
count = _read_value_specifier(fh, identifier, 1)
builder.specify(identifier, pattern, count, sr)
else:
builder.specify(identifier, pattern, sr)
if not check(fh, ";"):
error.log("Missing ';' after '%s' specification." % identifier, fh)
return builder.finalize()
class CharacterSetVsAction_BuilderBase:
def __init__(self, IdentifierList, KeywordElseAdmissibleF):
self.identifier_list = IdentifierList
self.keyword_else_f = KeywordElseAdmissibleF
class CountActionMapFromParser_Builder(CharacterSetVsAction_BuilderBase):
"""Line/column number count specification.
___________________________________________________________________________
The main result of the parsing the the Base's .count_command_map which is
an instance of CountActionMap_Builder.
____________________________________________________________________________
"""
@typed(sr=SourceRef)
def __init__(self, fh):
self.sr = SourceRef.from_FileHandle(fh)
self.__fh = fh
self._ca_map_builder = CountActionMap_Builder()
CharacterSetVsAction_BuilderBase.__init__(self,
("columns", "grid", "lines"),
KeywordElseAdmissibleF=True)
def finalize(self):
# Finalize / Produce 'LineColumnCount' object.
#
ca_map = self._ca_map_builder.finalize(
Setup.buffer_encoding.source_set.minimum(),
Setup.buffer_encoding.source_set.least_greater_bound(),
self.sr)
_check_grid_values_integer_multiples(ca_map)
check_defined(ca_map, self.sr, E_CharacterCountType.LINE)
return ca_map
def requires_count(self):
return True
@typed(sr=SourceRef, Identifier=(str,str))
def specify(self, Identifier, Pattern, Count, sr):
if Pattern is None:
self._ca_map_builder.define_else(cc_type_db[Identifier], Count, sr)
else:
trigger_set = _extract_trigger_set(sr, Identifier, Pattern)
self._ca_map_builder.add(trigger_set, cc_type_db[Identifier], Count, sr)
class IndentationSetup_Builder(CharacterSetVsAction_BuilderBase):
"""Indentation counter specification.
____________________________________________________________________________
The base's .count_command_map contains information about how to count the
space at the beginning of the line. The count until the first non-whitespace
is the 'indentation'.
+bad:
The spec contains information about what characters are not supposed to
appear in indentation (bad characters). Depending on the philosophical
basis, some might consider 'space' as evil, others consider 'tab' as evil.
+newline:
A detailed state machine can be defined for 'newline'. This might be
'\n|(\r\n)' or more complex things.
+suppressor:
A newline might be suppressed by '\' for example. For that, it might be
specified as 'newline suppressor'.
____________________________________________________________________________
"""
@typed(sr=SourceRef)
def __init__(self, fh):
self.__fh = fh
self.sm_whitespace = SourceRefObject("whitespace", None)
self.sm_badspace = SourceRefObject("bad", None)
self.sm_newline = SourceRefObject("newline", None)
self.sm_newline_suppressor = SourceRefObject("suppressor", None)
self.sm_suspend_list = []
if fh == -1: self.sr = SourceRef_DEFAULT
else: self.sr = SourceRef.from_FileHandle(self.__fh)
CharacterSetVsAction_BuilderBase.__init__(self,
("whitespace", "suspend", "newline", "suppressor", "bad"),
KeywordElseAdmissibleF=False)
def finalize(self):
# Finalize / Produce 'IndentationCount' object.
#
if self.sm_whitespace.get() is None:
self.sm_whitespace.set(self.__sm_whitespace_default(), SourceRef_DEFAULT)
if self.sm_newline.get() is None:
self.sm_newline.set(self.__sm_newline_default(), SourceRef_DEFAULT)
# -- consistency
self._consistency_check()
# Transform 'SourceRefObject' into 'Pattern_Prep' objects
# (TODO: Why not use it in the first place?)
def get_pattern(SRO):
if SRO is None or SRO.get() is None: return None
return Pattern_Prep(SRO.get(), PatternString="<indentation %s>" % SRO.name, Sr=SRO.sr)
pattern_suspend_list = [ get_pattern(sro) for sro in self.sm_suspend_list ]
pattern_suspend_list = [ x for x in pattern_suspend_list if x is not None ]
if self.sm_newline_suppressor.set_f():
sm_suppressed_newline = sequentialize.do([self.sm_newline_suppressor.get(),
self.sm_newline.get()])
sm_suppressed_newline = beautifier.do(sm_suppressed_newline)
pattern_suppressed_newline = Pattern_Prep(sm_suppressed_newline,
PatternString="<indentation suppressed newline>",
Sr=self.sm_newline_suppressor.sr)
else:
pattern_suppressed_newline = None
return IndentationCount_Pre(self.sr,
get_pattern(self.sm_whitespace),
get_pattern(self.sm_badspace),
get_pattern(self.sm_newline),
pattern_suppressed_newline,
pattern_suspend_list)
def requires_count(self):
return False
def specify(self, identifier, pattern, sr):
sm = pattern.extract_sm()
if identifier == "whitespace":
self.__specify(self.sm_whitespace, sm, sr)
elif identifier == "bad":
self.__specify(self.sm_badspace, sm, sr)
elif identifier == "newline":
self.__specify(self.sm_newline, sm, sr)
elif identifier == "suppressor":
self.__specify(self.sm_newline_suppressor, sm , sr)
elif identifier == "suspend":
self.__specify_suspend(sm, sr)
else:
return False
return True
@typed(sr=SourceRef)
def __specify(self, member_ref, Sm, sr):
assert Sm is not None
_error_if_defined_before(member_ref, sr)
if not Sm.is_DFA_compliant(): Sm = beautifier.do(Sm)
member_ref.set(Sm, sr)
@typed(sr=SourceRef)
def __specify_suspend(self, Sm, sr):
for before in self.sm_suspend_list:
if not identity.do(before.get(), Sm): continue
error.log("'suspend' has been defined before;", sr, DontExitF=True)
error.log("at this place.", before.sr)
sm_suspend = SourceRefObject("suspend", None)
self.__specify(sm_suspend, Sm, sr)
self.sm_suspend_list.append(sm_suspend)
def __sm_newline_default(self):
"""Default newline: '(\n)|(\r\n)'
"""
sm = DFA.from_character_set(NumberSet(ord('\n')))
if Setup.dos_carriage_return_newline_f:
sm.add_transition_sequence(sm.init_state_index, [ord('\r'), ord('\n')])
return sm
def __sm_whitespace_default(self):
"""Try to define default whitespace ' ' or '\t' if their positions
are not yet occupied in the count_command_map.
"""
sm_whitespace = DFA.from_character_set(NumberSet.from_integer_list([ord(' '), ord('\t')]))
sm_whitespace = beautifier.do(repeat.do(sm_whitespace, 1))
if self.sm_badspace.get() is not None:
sm_whitespace = difference.do(sm_whitespace, self.sm_badspace.get())
if sm_whitespace.is_Empty() \
or outrun.do(self.sm_badspace.get(), sm_whitespace):
error.log("Cannot define default 'whitespace' in the frame of the given\n"
"definition of 'bad'.", self.sm_badspace.sr)
return sm_whitespace
def _consistency_check(self):
"""
Required defintions:
-- WHITESPACE (Default done automatically) => Assert.
-- NEWLINE (Default done automatically) => Assert.
Inadmissible 'eat-into'.
-- SUPPRESSOR shall not eat into [NEWLINE]
-- NEWLINE shall not eat into [WHITESPACE, BADSPACE, SUSPEND, SUPPRESSOR]
-- WHITESPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].
-- BADSPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].
No common lexemes:
-- WHITESPACE and BADSPACE may not have common lexemes.
Outrun:
-- NEWLINE may not start with SUSPEND and vice versa
-- NEWLINE may not start with SUPPRESSOR and vice versa
-- SUPPRESSOR may not start with SUSPEND and vice versa
-- WHITESPACE shall not outrun BADSPACE, but the contrary is ok.
(BADSPACE may outrun WHITESPACE (e.g: lexeme with 'tab' after whitespace')
"""
# (1) Required definitions _____________________________________________
assert self.sm_whitespace.set_f()
assert self.sm_newline.set_f()
whitespace = self.sm_whitespace
newline = self.sm_newline
badspace = self.sm_badspace
suppressor = self.sm_newline_suppressor
suspend_list = self.sm_suspend_list
# (2) Inadmissible 'eat-into' __________________________________________
#
cmp_list = [
(newline, badspace), (newline, whitespace), (newline, suppressor),
(suppressor, newline),
(whitespace, newline), (whitespace, suppressor),
(badspace, newline), (badspace, suppressor),
] \
+ [ (whitespace, x) for x in suspend_list ] \
+ [ (newline, x) for x in suspend_list ] \
+ [ (badspace, x) for x in suspend_list ]
def _error(FormatStr, Sro0, Sro1):
error.log(FormatStr % (Sro0.name, Sro1.name), Sro0.sr, DontExitF=True)
error.log("'%s' defined here." % Sro1.name, Sro1.sr)
def _iterate(SroPairList):
for first_sro, second_sro in cmp_list:
first, second = first_sro.get(), second_sro.get()
if first is None or second is None: continue
yield first_sro, first, second_sro, second
for first_sro, first, second_sro, second in _iterate(cmp_list):
if swallow.ending_A_beginning_B(first, second):
_error("'%s' may eat into beginning of '%s'.", first_sro, second_sro)
elif swallow.inside_A_match_B(first, second):
_error("'%s' may swallow something matched by '%s'.", first_sro, second_sro)
for sm_suspend in self.sm_suspend_list:
only_common_f, \
common_f = tail.do(self.sm_newline.get(), sm_suspend.get())
error_check.tail(only_common_f, common_f,
"indentation handler's newline", self.sm_newline.sr,
"suspend", sm_suspend.sr)
# (3) Inadmissible common lexemes _____________________________________
#
if badspace.get() and not intersection.do([badspace.get(), whitespace.get()]).is_Empty():
_error("'%s' and '%s' match on common lexemes.", whitespace, badspace)
# (3) Inadmissible outruns ____________________________________________
#
cmp_list = [ (newline, suppressor), (suppressor, newline), (whitespace, badspace) ]
for x in suspend_list:
cmp_list.extend([
(newline, x), (x, newline),
(suppressor, x), (x, suppressor)
])
for first_sro, first, second_sro, second in _iterate(cmp_list):
if outrun.do(second, first):
_error("'%s' may outrun '%s'.", first_sro, second_sro)
def _parse_definition_head(fh, IdentifierList):
if check(fh, "\\default"):
error.log("'\\default' has been replaced by keyword '\\else' since quex 0.64.9!", fh)
elif check(fh, "\\else"):
pattern = None
else:
pattern = regular_expression.parse(fh, AllowPreContextF=False,
AllowPostContextF=False)
skip_whitespace(fh)
check_or_die(fh, "=>", " after character set definition.")
skip_whitespace(fh)
identifier = read_identifier(fh, OnMissingStr="Missing identifier following '=>'.")
error.verify_word_in_list(identifier, IdentifierList,
"Unrecognized specifier '%s'." % identifier, fh)
skip_whitespace(fh)
return pattern, identifier, SourceRef.from_FileHandle(fh)
def _read_value_specifier(fh, Keyword, Default=None):
skip_whitespace(fh)
value = read_integer(fh)
if value is not None: return value
# not a number received, is it an identifier?
variable = read_identifier(fh)
if variable: return variable
elif Default is not None: return Default
error.log("Missing integer or variable name after keyword '%s'." % Keyword, fh)
__CountActionMap_DEFAULT = None
def LineColumnCount_Default():
global __CountActionMap_DEFAULT
if __CountActionMap_DEFAULT is None:
builder = CountActionMap_Builder()
builder.add(NumberSet(ord('\n')), E_CharacterCountType.LINE, 1, SourceRef_DEFAULT)
builder.add(NumberSet(ord('\t')), E_CharacterCountType.GRID, 4, SourceRef_DEFAULT)
builder.define_else(E_CharacterCountType.COLUMN, 1, SourceRef_DEFAULT) # Define: "\else"
__CountActionMap_DEFAULT = builder.finalize(
Setup.buffer_encoding.source_set.minimum(),
Setup.buffer_encoding.source_set.least_greater_bound(), # Apply: "\else"
SourceRef_DEFAULT)
return __CountActionMap_DEFAULT
def _error_if_defined_before(Before, sr):
if not Before.set_f(): return
error.log("'%s' has been defined before;" % Before.name, sr,
DontExitF=True)
error.log("at this place.", Before.sr)
def _extract_trigger_set(sr, Keyword, Pattern):
if Pattern is None:
return None
elif isinstance(Pattern, NumberSet):
return Pattern
def check_can_be_matched_by_single_character(SM):
bad_f = False
init_state = SM.get_init_state()
if SM.get_init_state().is_acceptance():
bad_f = True
elif len(SM.states) != 2:
bad_f = True
# Init state MUST transit to second state. Second state MUST not have any transitions
elif len(init_state.target_map.get_target_state_index_list()) != 1:
bad_f = True
else:
tmp = set(SM.states.keys())
tmp.remove(SM.init_state_index)
other_state_index = next(iter(tmp))
if len(SM.states[other_state_index].target_map.get_target_state_index_list()) != 0:
bad_f = True
if bad_f:
error.log("For '%s' only patterns are addmissible which\n" % Keyword + \
"can be matched by a single character, e.g. \" \" or [a-z].", sr)
sm = Pattern.extract_sm()
check_can_be_matched_by_single_character(sm)
transition_map = sm.get_init_state().target_map.get_map()
assert len(transition_map) == 1
return list(transition_map.values())[0]
def _check_grid_values_integer_multiples(CaMap):
"""If there are no spaces and the grid is on a homogeneous scale,
=> then the grid can be transformed into 'easy-to-compute' spaces.
"""
grid_value_list = []
min_info = None
for character_set, info in CaMap:
if info.cc_type == E_CharacterCountType.COLUMN:
return
elif info.cc_type != E_CharacterCountType.GRID:
continue
elif type(info.value) in (str, str):
# If there is one single 'variable' grid value,
# then no assumptions can be made.
return
grid_value_list.append(info.value)
if min_info is None or info.value < min_info.value:
min_info = info
if min_info is None:
return
# Are all grid values a multiple of the minimum?
if all(x % min_info.value == 0 for x in grid_value_list):
error.warning("Setup does not contain spaces, only grids (tabulators). All grid\n" \
"widths are multiples of %i. The grid setup %s is equivalent to\n" \
% (min_info.value, repr(sorted(grid_value_list))[1:-1]) + \
"a setup with space counts %s. Space counts are faster to compute.\n" \
% repr([x / min_info.value for x in sorted(grid_value_list)])[1:-1],
min_info.sr)
return
def check_defined(CaMap, SourceReference, CCT):
"""Checks whether the character counter type has been defined in the
map.
THROWS: Error in case that is has not been defined.
"""
for character_set, info in CaMap:
if info.cc_type == CCT:
return
error.warning("Setup does not define '%s'." % cc_type_name_db[CCT], SourceReference,
SuppressCode=NotificationDB.warning_counter_setup_without_newline)
| 44.238589 | 111 | 0.60892 | [
"MIT"
] | Liby99/quex | quex/input/files/specifier/counter.py | 21,323 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=20
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=17
c.append(cirq.Z.on(input_qubit[1])) # number=18
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=19
c.append(cirq.Y.on(input_qubit[1])) # number=2
c.append(cirq.Y.on(input_qubit[1])) # number=4
c.append(cirq.Y.on(input_qubit[1])) # number=3
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.X.on(input_qubit[0])) # number=8
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.X.on(input_qubit[0])) # number=11
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=12
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq347.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 31.171429 | 77 | 0.691567 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | data/p2DJ/New/program/cirq/startCirq347.py | 2,182 | Python |
import requests
import urllib.parse
import posixpath
import pandas as pd
def get_enrollment_dates(course):
'''Takes a course object and returns student dates of enrollment.
Useful for handling late registrations and modified deadlines.
Example:
course.get_enrollment_date()'''
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
students = []
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"type": ["StudentEnrollment"],
"per_page":"100"
}
)
students.extend(resp.json())
enrollment_dates = {}
for st in students:
enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16]
return enrollment_dates
def get_assignments(course):
'''Takes a course object and returns
a Pandas data frame with all existing assignments and their attributes/data
Example:
course.get_assignments()'''
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = requests.get(
url=api_url,
headers={
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page": "10000"
},
)
assignments = resp.json()
assign_data = pd.DataFrame.from_dict(assignments)
return assign_data
def get_assignment_lock_date(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_due_date('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'lock_at']].query('name == @assignment')
lock_date = assignments['lock_at'].to_numpy()[0]
if lock_date is None:
return lock_date
lock_date = lock_date.replace("T", "-")
lock_date = lock_date.replace(":", "-")
return lock_date[:16]
def get_assignment_due_date(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_due_date('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'due_at']].query('name == @assignment')
due_date = assignments['due_at'].to_numpy()[0]
if due_date is None:
return due_date
due_date = due_date.replace("T", "-")
due_date = due_date.replace(":", "-")
return due_date[:16]
def get_assignment_unlock_date(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_unlock_date('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'unlock_at']].query('name == @assignment')
unlock_date = assignments['unlock_at'].to_numpy()[0]
if unlock_date is None:
return unlock_date
unlock_date = unlock_date.replace("T", "-").replace(':', '-')
return unlock_date[:16]
def get_assignment_id(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the Canvas ID.
Example:
course.get_assignment_id('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'id']].query('name == @assignment')
return assignments['id'].values[0]
def get_grades(course, assignment):
'''Takes a course object, an assignment name, and get the grades for that assignment from Canvas.
Example:
course.get_grades(course, 'worksheet_01')'''
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
scores = {}
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page":"100"
}
)
scores.update( {res['user_id'] : res['score'] for res in resp.json()} )
return scores
def grades_need_posting(course, assignment):
'''Takes a course object, an assignment name, and get the grades for that assignment from Canvas.
Example:
course.get_grades(course, 'worksheet_01')'''
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
#get enrollments to avoid the test student's submissions
real_stu_ids = list(get_enrollment_dates(course).keys())
resp = None
posted_flags = []
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page":"100"
}
)
posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids])
return not all(posted_flags)
def post_grade(course, assignment, student, score):
'''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas.
Example:
course.post_grades(dsci100, 'worksheet_01', '23423', 10)'''
assignment_id = get_assignment_id(course, assignment)
url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student)
api_url = urllib.parse.urljoin(course['hostname'], url_post_path)
token = course['token']
resp = requests.put(
url = urllib.parse.urljoin(api_url, student),
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"submission": {"posted_grade": score}
},
)
| 37.808511 | 134 | 0.638295 | [
"MIT"
] | hsmohammed/rudaux | scripts/canvas.py | 7,108 | Python |
# -*- coding: utf-8 -*-
"""
pygments.lexers.graphics
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer graphics and plotting related languages.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include, bygroups, using, \
this, default
from pygments.token import Text, Comment, Operator, Keyword, Name, \
Number, Punctuation, String
__all__ = ['GLShaderLexer', 'PostScriptLexer', 'AsymptoteLexer', 'GnuplotLexer',
'PovrayLexer', 'HLSLShaderLexer']
class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
.. versionadded:: 1.1
"""
name = 'GLSL'
aliases = ['glsl']
filenames = ['*.vert', '*.frag', '*.geo']
mimetypes = ['text/x-glslsrc']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),\[\]]', Punctuation),
# FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(words((
# Storage qualifiers
'attribute', 'const', 'uniform', 'varying',
'buffer', 'shared', 'in', 'out',
# Layout qualifiers
'layout',
# Interpolation qualifiers
'flat', 'smooth', 'noperspective',
# Auxiliary qualifiers
'centroid', 'sample', 'patch',
# Parameter qualifiers. Some double as Storage qualifiers
'inout',
# Precision qualifiers
'lowp', 'mediump', 'highp', 'precision',
# Invariance qualifiers
'invariant',
# Precise qualifiers
'precise',
# Memory qualifiers
'coherent', 'volatile', 'restrict', 'readonly', 'writeonly',
# Statements
'break', 'continue', 'do', 'for', 'while', 'switch',
'case', 'default', 'if', 'else', 'subroutine',
'discard', 'return', 'struct'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
# Boolean values
'true', 'false'),
prefix=r'\b', suffix=r'\b'),
Keyword.Constant),
(words((
# Miscellaneous types
'void', 'atomic_uint',
# Floating-point scalars and vectors
'float', 'vec2', 'vec3', 'vec4',
'double', 'dvec2', 'dvec3', 'dvec4',
# Integer scalars and vectors
'int', 'ivec2', 'ivec3', 'ivec4',
'uint', 'uvec2', 'uvec3', 'uvec4',
# Boolean scalars and vectors
'bool', 'bvec2', 'bvec3', 'bvec4',
# Matrices
'mat2', 'mat3', 'mat4', 'dmat2', 'dmat3', 'dmat4',
'mat2x2', 'mat2x3', 'mat2x4', 'dmat2x2', 'dmat2x3', 'dmat2x4',
'mat3x2', 'mat3x3', 'mat3x4', 'dmat3x2', 'dmat3x3',
'dmat3x4', 'mat4x2', 'mat4x3', 'mat4x4', 'dmat4x2', 'dmat4x3', 'dmat4x4',
# Floating-point samplers
'sampler1D', 'sampler2D', 'sampler3D', 'samplerCube',
'sampler1DArray', 'sampler2DArray', 'samplerCubeArray',
'sampler2DRect', 'samplerBuffer',
'sampler2DMS', 'sampler2DMSArray',
# Shadow samplers
'sampler1DShadow', 'sampler2DShadow', 'samplerCubeShadow',
'sampler1DArrayShadow', 'sampler2DArrayShadow',
'samplerCubeArrayShadow', 'sampler2DRectShadow',
# Signed integer samplers
'isampler1D', 'isampler2D', 'isampler3D', 'isamplerCube',
'isampler1DArray', 'isampler2DArray', 'isamplerCubeArray',
'isampler2DRect', 'isamplerBuffer',
'isampler2DMS', 'isampler2DMSArray',
# Unsigned integer samplers
'usampler1D', 'usampler2D', 'usampler3D', 'usamplerCube',
'usampler1DArray', 'usampler2DArray', 'usamplerCubeArray',
'usampler2DRect', 'usamplerBuffer',
'usampler2DMS', 'usampler2DMSArray',
# Floating-point image types
'image1D', 'image2D', 'image3D', 'imageCube',
'image1DArray', 'image2DArray', 'imageCubeArray',
'image2DRect', 'imageBuffer',
'image2DMS', 'image2DMSArray',
# Signed integer image types
'iimage1D', 'iimage2D', 'iimage3D', 'iimageCube',
'iimage1DArray', 'iimage2DArray', 'iimageCubeArray',
'iimage2DRect', 'iimageBuffer',
'iimage2DMS', 'iimage2DMSArray',
# Unsigned integer image types
'uimage1D', 'uimage2D', 'uimage3D', 'uimageCube',
'uimage1DArray', 'uimage2DArray', 'uimageCubeArray',
'uimage2DRect', 'uimageBuffer',
'uimage2DMS', 'uimage2DMSArray'),
prefix=r'\b', suffix=r'\b'),
Keyword.Type),
(words((
# Reserved for future use.
'common', 'partition', 'active', 'asm', 'class',
'union', 'enum', 'typedef', 'template', 'this',
'resource', 'goto', 'inline', 'noinline', 'public',
'static', 'extern', 'external', 'interface', 'long',
'short', 'half', 'fixed', 'unsigned', 'superp', 'input',
'output', 'hvec2', 'hvec3', 'hvec4', 'fvec2', 'fvec3',
'fvec4', 'sampler3DRect', 'filter', 'sizeof', 'cast',
'namespace', 'using'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# All names beginning with "gl_" are reserved.
(r'gl_\w*', Name.Builtin),
(r'[a-zA-Z_]\w*', Name),
(r'\.', Punctuation),
(r'\s+', Text),
],
}
class HLSLShaderLexer(RegexLexer):
"""
HLSL (Microsoft Direct3D Shader) lexer.
.. versionadded:: 2.3
"""
name = 'HLSL'
aliases = ['hlsl']
filenames = ['*.hlsl', '*.hlsli']
mimetypes = ['text/x-hlsl']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),.\[\]]', Punctuation),
# FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?f?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?f?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(r'"', String, 'string'),
(words((
'asm','asm_fragment','break','case','cbuffer','centroid','class',
'column_major','compile','compile_fragment','const','continue',
'default','discard','do','else','export','extern','for','fxgroup',
'globallycoherent','groupshared','if','in','inline','inout',
'interface','line','lineadj','linear','namespace','nointerpolation',
'noperspective','NULL','out','packoffset','pass','pixelfragment',
'point','precise','return','register','row_major','sample',
'sampler','shared','stateblock','stateblock_state','static',
'struct','switch','tbuffer','technique','technique10',
'technique11','texture','typedef','triangle','triangleadj',
'uniform','vertexfragment','volatile','while'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words(('true','false'), prefix=r'\b', suffix=r'\b'),
Keyword.Constant),
(words((
'auto','catch','char','const_cast','delete','dynamic_cast','enum',
'explicit','friend','goto','long','mutable','new','operator',
'private','protected','public','reinterpret_cast','short','signed',
'sizeof','static_cast','template','this','throw','try','typename',
'union','unsigned','using','virtual'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
(words((
'dword','matrix','snorm','string','unorm','unsigned','void','vector',
'BlendState','Buffer','ByteAddressBuffer','ComputeShader',
'DepthStencilState','DepthStencilView','DomainShader',
'GeometryShader','HullShader','InputPatch','LineStream',
'OutputPatch','PixelShader','PointStream','RasterizerState',
'RenderTargetView','RasterizerOrderedBuffer',
'RasterizerOrderedByteAddressBuffer',
'RasterizerOrderedStructuredBuffer','RasterizerOrderedTexture1D',
'RasterizerOrderedTexture1DArray','RasterizerOrderedTexture2D',
'RasterizerOrderedTexture2DArray','RasterizerOrderedTexture3D',
'RWBuffer','RWByteAddressBuffer','RWStructuredBuffer',
'RWTexture1D','RWTexture1DArray','RWTexture2D','RWTexture2DArray',
'RWTexture3D','SamplerState','SamplerComparisonState',
'StructuredBuffer','Texture1D','Texture1DArray','Texture2D',
'Texture2DArray','Texture2DMS','Texture2DMSArray','Texture3D',
'TextureCube','TextureCubeArray','TriangleStream','VertexShader'),
prefix=r'\b', suffix=r'\b'),
Keyword.Type),
(words((
'bool','double','float','int','half','min16float','min10float',
'min16int','min12int','min16uint','uint'),
prefix=r'\b', suffix=r'([1-4](x[1-4])?)?\b'),
Keyword.Type), # vector and matrix types
(words((
'abort','abs','acos','all','AllMemoryBarrier',
'AllMemoryBarrierWithGroupSync','any','AppendStructuredBuffer',
'asdouble','asfloat','asin','asint','asuint','asuint','atan',
'atan2','ceil','CheckAccessFullyMapped','clamp','clip',
'CompileShader','ConsumeStructuredBuffer','cos','cosh','countbits',
'cross','D3DCOLORtoUBYTE4','ddx','ddx_coarse','ddx_fine','ddy',
'ddy_coarse','ddy_fine','degrees','determinant',
'DeviceMemoryBarrier','DeviceMemoryBarrierWithGroupSync','distance',
'dot','dst','errorf','EvaluateAttributeAtCentroid',
'EvaluateAttributeAtSample','EvaluateAttributeSnapped','exp',
'exp2','f16tof32','f32tof16','faceforward','firstbithigh',
'firstbitlow','floor','fma','fmod','frac','frexp','fwidth',
'GetRenderTargetSampleCount','GetRenderTargetSamplePosition',
'GlobalOrderedCountIncrement','GroupMemoryBarrier',
'GroupMemoryBarrierWithGroupSync','InterlockedAdd','InterlockedAnd',
'InterlockedCompareExchange','InterlockedCompareStore',
'InterlockedExchange','InterlockedMax','InterlockedMin',
'InterlockedOr','InterlockedXor','isfinite','isinf','isnan',
'ldexp','length','lerp','lit','log','log10','log2','mad','max',
'min','modf','msad4','mul','noise','normalize','pow','printf',
'Process2DQuadTessFactorsAvg','Process2DQuadTessFactorsMax',
'Process2DQuadTessFactorsMin','ProcessIsolineTessFactors',
'ProcessQuadTessFactorsAvg','ProcessQuadTessFactorsMax',
'ProcessQuadTessFactorsMin','ProcessTriTessFactorsAvg',
'ProcessTriTessFactorsMax','ProcessTriTessFactorsMin',
'QuadReadLaneAt','QuadSwapX','QuadSwapY','radians','rcp',
'reflect','refract','reversebits','round','rsqrt','saturate',
'sign','sin','sincos','sinh','smoothstep','sqrt','step','tan',
'tanh','tex1D','tex1D','tex1Dbias','tex1Dgrad','tex1Dlod',
'tex1Dproj','tex2D','tex2D','tex2Dbias','tex2Dgrad','tex2Dlod',
'tex2Dproj','tex3D','tex3D','tex3Dbias','tex3Dgrad','tex3Dlod',
'tex3Dproj','texCUBE','texCUBE','texCUBEbias','texCUBEgrad',
'texCUBElod','texCUBEproj','transpose','trunc','WaveAllBitAnd',
'WaveAllMax','WaveAllMin','WaveAllBitOr','WaveAllBitXor',
'WaveAllEqual','WaveAllProduct','WaveAllSum','WaveAllTrue',
'WaveAnyTrue','WaveBallot','WaveGetLaneCount','WaveGetLaneIndex',
'WaveGetOrderedIndex','WaveIsHelperLane','WaveOnce',
'WavePrefixProduct','WavePrefixSum','WaveReadFirstLane',
'WaveReadLaneAt'),
prefix=r'\b', suffix=r'\b'),
Name.Builtin), # built-in functions
(words((
'SV_ClipDistance','SV_ClipDistance0','SV_ClipDistance1',
'SV_Culldistance','SV_CullDistance0','SV_CullDistance1',
'SV_Coverage','SV_Depth','SV_DepthGreaterEqual',
'SV_DepthLessEqual','SV_DispatchThreadID','SV_DomainLocation',
'SV_GroupID','SV_GroupIndex','SV_GroupThreadID','SV_GSInstanceID',
'SV_InnerCoverage','SV_InsideTessFactor','SV_InstanceID',
'SV_IsFrontFace','SV_OutputControlPointID','SV_Position',
'SV_PrimitiveID','SV_RenderTargetArrayIndex','SV_SampleIndex',
'SV_StencilRef','SV_TessFactor','SV_VertexID',
'SV_ViewportArrayIndex'),
prefix=r'\b', suffix=r'\b'),
Name.Decorator), # system-value semantics
(r'\bSV_Target[0-7]?\b', Name.Decorator),
(words((
'allow_uav_condition','branch','call','domain','earlydepthstencil',
'fastopt','flatten','forcecase','instance','loop','maxtessfactor',
'numthreads','outputcontrolpoints','outputtopology','partitioning',
'patchconstantfunc','unroll'),
prefix=r'\b', suffix=r'\b'),
Name.Decorator), # attributes
(r'[a-zA-Z_]\w*', Name),
(r'\\$', Comment.Preproc), # backslash at end of line -- usually macro continuation
(r'\s+', Text),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
class PostScriptLexer(RegexLexer):
"""
Lexer for PostScript files.
The PostScript Language Reference published by Adobe at
<http://partners.adobe.com/public/developer/en/ps/PLRM.pdf>
is the authority for this.
.. versionadded:: 1.4
"""
name = 'PostScript'
aliases = ['postscript', 'postscr']
filenames = ['*.ps', '*.eps']
mimetypes = ['application/postscript']
delimiter = r'()<>\[\]{}/%\s'
delimiter_end = r'(?=[%s])' % delimiter
valid_name_chars = r'[^%s]' % delimiter
valid_name = r"%s+%s" % (valid_name_chars, delimiter_end)
tokens = {
'root': [
# All comment types
(r'^%!.+\n', Comment.Preproc),
(r'%%.*\n', Comment.Special),
(r'(^%.*\n){2,}', Comment.Multiline),
(r'%.*\n', Comment.Single),
# String literals are awkward; enter separate state.
(r'\(', String, 'stringliteral'),
(r'[{}<>\[\]]', Punctuation),
# Numbers
(r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex),
# Slight abuse: use Oct to signify any explicit base system
(r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)'
r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct),
(r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?'
+ delimiter_end, Number.Float),
(r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer),
# References
(r'\/%s' % valid_name, Name.Variable),
# Names
(valid_name, Name.Function), # Anything else is executed
# These keywords taken from
# <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf>
# Is there an authoritative list anywhere that doesn't involve
# trawling documentation?
(r'(false|true)' + delimiter_end, Keyword.Constant),
# Conditionals / flow control
(r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)'
+ delimiter_end, Keyword.Reserved),
(words((
'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin',
'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat',
'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix',
'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix',
'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end',
'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get',
'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv',
'idtransform', 'index', 'invertmatrix', 'itransform', 'length',
'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto',
'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print',
'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat',
'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run',
'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray',
'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix',
'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt',
'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror',
'transform', 'translate', 'truncate', 'typecheck', 'undefined',
'undefinedfilename', 'undefinedresult'), suffix=delimiter_end),
Name.Builtin),
(r'\s+', Text),
],
'stringliteral': [
(r'[^()\\]+', String),
(r'\\', String.Escape, 'escape'),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'escape': [
(r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'),
default('#pop'),
],
}
class AsymptoteLexer(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
.. versionadded:: 1.2
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
filenames = ['*.asy']
mimetypes = ['text/x-asymptote']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment),
],
'statements': [
# simple string (TeX friendly)
(r'"(\\\\|\\"|[^"])*"', String),
# C style string (with character escapes)
(r"'", String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
r'return|break|continue|struct|typedef|new|access|import|'
r'unravel|from|include|quote|static|public|private|restricted|'
r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
# Since an asy-type-name can be also an asy-function-name,
# in the following we test if the string " [a-zA-Z]" follows
# the Keyword.Type.
# Of course it is not perfect !
(r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
r'path3|pen|picture|point|position|projection|real|revolution|'
r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
r'transformation|tree|triangle|trilinear|triple|vector|'
r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type),
# Now the asy-type-name which are not asy-function-name
# except yours !
# Perhaps useless
(r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
r'picture|position|real|revolution|slice|splitface|ticksgridT|'
r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'\n', String),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String),
(r'\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
def get_tokens_unprocessed(self, text):
from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
def _shortened(word):
dpos = word.find('$')
return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b'
for i in range(len(word), dpos, -1))
def _shortened_many(*words):
return '|'.join(map(_shortened, words))
class GnuplotLexer(RegexLexer):
"""
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
.. versionadded:: 0.11
"""
name = 'Gnuplot'
aliases = ['gnuplot']
filenames = ['*.plot', '*.plt']
mimetypes = ['text/x-gnuplot']
tokens = {
'root': [
include('whitespace'),
(_shortened('bi$nd'), Keyword, 'bind'),
(_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'),
(_shortened('f$it'), Keyword, 'fit'),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'),
(r'else\b', Keyword),
(_shortened('pa$use'), Keyword, 'pause'),
(_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'),
(_shortened('sa$ve'), Keyword, 'save'),
(_shortened('se$t'), Keyword, ('genericargs', 'optionarg')),
(_shortened_many('sh$ow', 'uns$et'),
Keyword, ('noargs', 'optionarg')),
(_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear',
'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int',
'pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'sy$stem', 'up$date'),
Keyword, 'genericargs'),
(_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'test$'),
Keyword, 'noargs'),
(r'([a-zA-Z_]\w*)(\s*)(=)',
bygroups(Name.Variable, Text, Operator), 'genericargs'),
(r'([a-zA-Z_]\w*)(\s*\(.*?\)\s*)(=)',
bygroups(Name.Function, Text, Operator), 'genericargs'),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r';', Keyword),
],
'comment': [
(r'[^\\\n]', Comment),
(r'\\\n', Comment),
(r'\\', Comment),
# don't add the newline to the Comment token
default('#pop'),
],
'whitespace': [
('#', Comment, 'comment'),
(r'[ \t\v\f]+', Text),
],
'noargs': [
include('whitespace'),
# semicolon and newline end the argument list
(r';', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
],
'dqstring': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'sqstring': [
(r"''", String), # escaped single quote
(r"'", String, '#pop'),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # normal backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'genericargs': [
include('noargs'),
(r'"', String, 'dqstring'),
(r"'", String, 'sqstring'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'-?\d+', Number.Integer),
('[,.~!%^&*+=|?:<>/-]', Operator),
(r'[{}()\[\]]', Punctuation),
(r'(eq|ne)\b', Operator.Word),
(r'([a-zA-Z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_]\w*', Name),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r'\\\n', Text),
],
'optionarg': [
include('whitespace'),
(_shortened_many(
"a$ll", "an$gles", "ar$row", "au$toscale", "b$ars", "bor$der",
"box$width", "cl$abel", "c$lip", "cn$trparam", "co$ntour", "da$ta",
"data$file", "dg$rid3d", "du$mmy", "enc$oding", "dec$imalsign",
"fit$", "font$path", "fo$rmat", "fu$nction", "fu$nctions", "g$rid",
"hid$den3d", "his$torysize", "is$osamples", "k$ey", "keyt$itle",
"la$bel", "li$nestyle", "ls$", "loa$dpath", "loc$ale", "log$scale",
"mac$ros", "map$ping", "map$ping3d", "mar$gin", "lmar$gin",
"rmar$gin", "tmar$gin", "bmar$gin", "mo$use", "multi$plot",
"mxt$ics", "nomxt$ics", "mx2t$ics", "nomx2t$ics", "myt$ics",
"nomyt$ics", "my2t$ics", "nomy2t$ics", "mzt$ics", "nomzt$ics",
"mcbt$ics", "nomcbt$ics", "of$fsets", "or$igin", "o$utput",
"pa$rametric", "pm$3d", "pal$ette", "colorb$ox", "p$lot",
"poi$ntsize", "pol$ar", "pr$int", "obj$ect", "sa$mples", "si$ze",
"st$yle", "su$rface", "table$", "t$erminal", "termo$ptions", "ti$cs",
"ticsc$ale", "ticsl$evel", "timef$mt", "tim$estamp", "tit$le",
"v$ariables", "ve$rsion", "vi$ew", "xyp$lane", "xda$ta", "x2da$ta",
"yda$ta", "y2da$ta", "zda$ta", "cbda$ta", "xl$abel", "x2l$abel",
"yl$abel", "y2l$abel", "zl$abel", "cbl$abel", "xti$cs", "noxti$cs",
"x2ti$cs", "nox2ti$cs", "yti$cs", "noyti$cs", "y2ti$cs", "noy2ti$cs",
"zti$cs", "nozti$cs", "cbti$cs", "nocbti$cs", "xdti$cs", "noxdti$cs",
"x2dti$cs", "nox2dti$cs", "ydti$cs", "noydti$cs", "y2dti$cs",
"noy2dti$cs", "zdti$cs", "nozdti$cs", "cbdti$cs", "nocbdti$cs",
"xmti$cs", "noxmti$cs", "x2mti$cs", "nox2mti$cs", "ymti$cs",
"noymti$cs", "y2mti$cs", "noy2mti$cs", "zmti$cs", "nozmti$cs",
"cbmti$cs", "nocbmti$cs", "xr$ange", "x2r$ange", "yr$ange",
"y2r$ange", "zr$ange", "cbr$ange", "rr$ange", "tr$ange", "ur$ange",
"vr$ange", "xzeroa$xis", "x2zeroa$xis", "yzeroa$xis", "y2zeroa$xis",
"zzeroa$xis", "zeroa$xis", "z$ero"), Name.Builtin, '#pop'),
],
'bind': [
('!', Keyword, '#pop'),
(_shortened('all$windows'), Name.Builtin),
include('genericargs'),
],
'quit': [
(r'gnuplot\b', Keyword),
include('noargs'),
],
'fit': [
(r'via\b', Name.Builtin),
include('plot'),
],
'if': [
(r'\)', Punctuation, '#pop'),
include('genericargs'),
],
'pause': [
(r'(mouse|any|button1|button2|button3)\b', Name.Builtin),
(_shortened('key$press'), Name.Builtin),
include('genericargs'),
],
'plot': [
(_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex',
'mat$rix', 's$mooth', 'thru$', 't$itle',
'not$itle', 'u$sing', 'w$ith'),
Name.Builtin),
include('genericargs'),
],
'save': [
(_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'),
Name.Builtin),
include('genericargs'),
],
}
class PovrayLexer(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
.. versionadded:: 0.11
"""
name = 'POVRay'
aliases = ['pov']
filenames = ['*.pov', '*.inc']
mimetypes = ['text/x-povray']
tokens = {
'root': [
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*\n', Comment.Single),
(r'(?s)"(?:\\.|[^"\\])+"', String.Double),
(words((
'break', 'case', 'debug', 'declare', 'default', 'define', 'else',
'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef',
'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render',
'statistics', 'switch', 'undef', 'version', 'warning', 'while',
'write'), prefix=r'#', suffix=r'\b'),
Comment.Preproc),
(words((
'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout',
'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle',
'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma',
'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation',
'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples',
'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size',
'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map',
'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock',
'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat',
'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh',
'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping',
'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction',
'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity',
'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent',
'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false',
'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor',
'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif',
'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold',
'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex',
'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate',
'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard',
'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like',
'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map',
'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level',
'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar',
'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves',
'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open',
'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3',
'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment',
'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm',
'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color',
'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow',
'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red',
'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt',
'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave',
'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere',
'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral',
'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str',
'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys',
't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3',
'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold',
'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit',
'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type',
'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps',
'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength',
'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light',
'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width',
'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference',
'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe',
'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism',
'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid',
'text', 'torus', 'triangle', 'union'), suffix=r'\b'),
Name.Builtin),
# TODO: <=, etc
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=]', Operator),
(r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\s+', Text),
]
}
| 49.923274 | 104 | 0.479662 | [
"BSD-3-Clause"
] | ritchadh/docs-like-code-demo | env/lib/python3.7/site-packages/pygments/lexers/graphics.py | 39,040 | Python |
import os
import sys
from typing import Dict
from typing import List
from typing import Optional
import pkg_resources
from setuptools import find_packages
from setuptools import setup
def get_version() -> str:
version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
with open(version_filepath) as f:
for line in f:
if line.startswith("__version__"):
return line.strip().split()[-1][1:-1]
assert False
def get_long_description() -> str:
readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
with open(readme_filepath) as f:
return f.read()
def get_install_requires() -> List[str]:
return [
"alembic",
"cliff",
"cmaes>=0.6.0",
"colorlog",
"joblib",
"numpy",
"packaging>=20.0",
"scipy!=1.4.0",
"sqlalchemy>=1.1.0",
"tqdm",
]
def get_tests_require() -> List[str]:
return get_extras_require()["testing"]
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
"checking": ["black", "hacking", "isort", "mypy==0.782", "blackdoc"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"matplotlib>=3.0.0",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"mlflow",
],
"document": [
# TODO(hvy): Unpin `sphinx` version after:
# https://github.com/sphinx-doc/sphinx/issues/8105.
"sphinx==3.0.4",
# As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949,
# `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0.
"sphinx_rtd_theme<0.5.0",
"sphinx-gallery",
"sphinx-plotly-directive",
"pillow",
"matplotlib",
"scikit-learn",
],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"scikit-image",
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
"xgboost",
"keras",
"tensorflow>=2.0.0",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"thop",
"skorch",
"stable-baselines3>=0.7.0",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (
[
"allennlp==1.2.0",
"fastai<2",
"dask[dataframe]",
"dask-ml",
]
if sys.version_info[:2] < (3, 8)
else []
),
"experimental": ["redis"],
"testing": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"lightgbm",
"matplotlib>=3.0.0",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"skorch",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (["allennlp==1.2.0", "fastai<2"] if sys.version_info[:2] < (3, 8) else []),
"tests": ["fakeredis", "pytest"],
"optional": [
"bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py.
"matplotlib>=3.0.0", # optuna/visualization/matplotlib
"pandas", # optuna/study.py
"plotly>=4.0.0", # optuna/visualization.
"redis", # optuna/storages/redis.py.
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
],
"integration": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"chainer>=5.0.0",
"cma",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"skorch",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (["allennlp==1.2.0", "fastai<2"] if sys.version_info[:2] < (3, 8) else []),
}
return requirements
def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
for pkg in pkgs:
try:
return pkg_resources.get_distribution(pkg)
except pkg_resources.DistributionNotFound:
pass
return None
setup(
name="optuna",
version=get_version(),
description="A hyperparameter optimization framework",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Takuya Akiba",
author_email="[email protected]",
url="https://optuna.org/",
packages=find_packages(),
package_data={
"optuna": [
"storages/_rdb/alembic.ini",
"storages/_rdb/alembic/*.*",
"storages/_rdb/alembic/versions/*.*",
"py.typed",
]
},
python_requires=">=3.6",
install_requires=get_install_requires(),
tests_require=get_tests_require(),
extras_require=get_extras_require(),
entry_points={
"console_scripts": ["optuna = optuna.cli:main"],
"optuna.command": [
"create-study = optuna.cli:_CreateStudy",
"delete-study = optuna.cli:_DeleteStudy",
"study set-user-attr = optuna.cli:_StudySetUserAttribute",
"studies = optuna.cli:_Studies",
"dashboard = optuna.cli:_Dashboard",
"study optimize = optuna.cli:_StudyOptimize",
"storage upgrade = optuna.cli:_StorageUpgrade",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 31.574297 | 92 | 0.512338 | [
"MIT"
] | 130ndim/optuna | setup.py | 7,862 | Python |
from ..coefficient_array import PwCoeffs
from scipy.sparse import dia_matrix
import numpy as np
def make_kinetic_precond(kpointset, c0, eps=0.1, asPwCoeffs=True):
"""
Preconditioner
P = 1 / (||k|| + ε)
Keyword Arguments:
kpointset --
"""
nk = len(kpointset)
nc = kpointset.ctx().num_spins()
if nc == 1 and nk == 1 and not asPwCoeffs:
# return as np.matrix
kp = kpointset[0]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([
1 / (np.sum((np.array(gkvec.gkvec(i)))**2) + eps)
for i in range(N)
])
return DiagonalPreconditioner(
D=dia_matrix((d, 0), shape=(N, N)), c0=c0)
else:
P = PwCoeffs(dtype=np.float64, ctype=dia_matrix)
for k in range(nk):
kp = kpointset[k]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([
1 / (np.sum(
(np.array(gkvec.gkvec_cart(i)))**2) + eps)
for i in range(N)
])
for ispn in range(nc):
P[k, ispn] = dia_matrix((d, 0), shape=(N, N))
return DiagonalPreconditioner(P, c0)
class Preconditioner:
def __init__(self):
pass
class DiagonalPreconditioner(Preconditioner):
"""
Apply diagonal preconditioner and project resulting gradient to satisfy the constraint.
"""
def __init__(self, D, c0):
super().__init__()
self.c0 = c0
self.D = D
def __matmul__(self, other):
"""
"""
from ..coefficient_array import CoefficientArray
from .ot_transformations import lagrangeMult
out = type(other)(dtype=other.dtype)
if isinstance(other, CoefficientArray):
for key, Dl in self.D.items():
out[key] = Dl * other[key]
else:
raise ValueError('wrong type given')
ll = lagrangeMult(other, self.c0, self)
return out + ll
def __mul__(self, s):
"""
"""
from ..coefficient_array import CoefficientArray
import numpy as np
if np.isscalar(s):
for key, Dl in self.D.items():
self.D[key] = s*Dl
elif isinstance(s, CoefficientArray):
out = type(s)(dtype=s.dtype)
for key in s.keys():
out[key] = self.D[key] * s[key]
return out
__lmul__ = __mul__
__rmul__ = __mul__
def __neg__(self):
"""
"""
from ..coefficient_array import CoefficientArray
if isinstance(self.D, CoefficientArray):
out_data = type(self.D)(dtype=self.D.dtype, ctype=self.D.ctype)
out = DiagonalPreconditioner(out_data, self.c0)
for k, v in self.D.items():
out.D[k] = -v
return out
else:
out = DiagonalPreconditioner(self.D, self.c0)
out.D = -self.D
return out
def __getitem__(self, key):
return self.D[key]
class IdentityPreconditioner(Preconditioner):
def __init__(self, c0, _f=1):
super().__init__()
self.c0 = c0
self._f = _f
def __matmul__(self, other):
from .ot_transformations import lagrangeMult
ll = lagrangeMult(other, self.c0, self)
return self._f * other + ll
def __mul__(self, s):
return self._f * s
def __neg__(self):
return IdentityPreconditioner(self.c0, _f=-self._f)
def __getitem__(self, key):
return self._f
__lmul__ = __mul__
__rmul__ = __mul__
| 26.92029 | 91 | 0.544818 | [
"BSD-2-Clause"
] | electronic-structure/SIRIUS | python_module/sirius/ot/ot_precondition.py | 3,716 | Python |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# @author: Edgar Magana, Cisco Systems, Inc.
#
"""
Configuration consolidation for the Nexus Driver
This module will export the configuration parameters
from the nexus.ini file
"""
from quantum.common.utils import find_config_file
from quantum.plugins.cisco.common import cisco_configparser as confp
CP = confp.CiscoConfigParser(find_config_file({'plugin': 'cisco'},
"nexus.ini"))
SECTION = CP['SWITCH']
NEXUS_IP_ADDRESS = SECTION['nexus_ip_address']
NEXUS_FIRST_PORT = SECTION['nexus_first_port']
NEXUS_SECOND_PORT = SECTION['nexus_second_port']
NEXUS_SSH_PORT = SECTION['nexus_ssh_port']
SECTION = CP['DRIVER']
NEXUS_DRIVER = SECTION['name']
| 34.317073 | 78 | 0.739161 | [
"Apache-2.0"
] | r-mibu/neutron | quantum/plugins/cisco/nexus/cisco_nexus_configuration.py | 1,407 | Python |
import os
def ensure_dir(path: str) -> str:
dirname = os.path.dirname(path)
os.makedirs(dirname, exist_ok=True)
return path | 22.666667 | 39 | 0.691176 | [
"MIT"
] | mbiemann/python-simple-toolbelt | simple_toolbelt/path.py | 136 | Python |
import emoji
import string
class Tweet():
def __init__(self, text: str):
self.text = text.lower()
self.hashtags = self.find("#", forbidden="@")
self.cleanTag()
self.tags = self.find("@", forbidden="#")
def find(self, prefix, forbidden):
ret = []
_text = self.text
_text = _text.replace(forbidden, " ")
_text = _text.replace(" ", "")
_text = _text.replace("!", "")
if not _text.startswith("RT"):
for word in _text.split(" "):
word = self.remove_emojis(word)
if len(word) >= 2 and word.count(prefix) == 1:
word = word.split(prefix)
word = prefix + word[len(word) - 1]
word = word.strip()
if word not in ret and len(word) >= 2 and word.startswith(prefix):
ret.append(word.lower())
return ret
def remove_emojis(self, s):
return ''.join(c for c in s if c not in emoji.UNICODE_EMOJI['en'])
def cleanTag(self):
allowed = list(string.ascii_lowercase + string.ascii_uppercase + string.digits) + ["_", "@", " "]
newtext = ""
for letter in self.text:
if letter in allowed:
newtext += letter
self.text = newtext
| 28.956522 | 105 | 0.512763 | [
"MIT"
] | EliasSchramm/TwitterDB | crawler/tweet.py | 1,336 | Python |
'''
Neuron simulator export for:
Components:
net1 (Type: network)
sim1 (Type: Simulation: length=1.0 (SI time) step=5.0E-5 (SI time))
hhcell (Type: cell)
passive (Type: ionChannelPassive: conductance=1.0E-11 (SI conductance))
na (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
k (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
pulseGen1 (Type: pulseGenerator: delay=0.0 (SI time) duration=1.0E8 (SI time) amplitude=8.000000000000001E-11 (SI current))
This NEURON file has been generated by org.neuroml.export (see https://github.com/NeuroML/org.neuroml.export)
org.neuroml.export v1.5.3
org.neuroml.model v1.5.3
jLEMS v0.9.9.0
'''
import neuron
import time
import hashlib
h = neuron.h
h.load_file("stdlib.hoc")
h.load_file("stdgui.hoc")
h("objref p")
h("p = new PythonObject()")
class NeuronSimulation():
def __init__(self, tstop, dt, seed=123456789):
print("\n Starting simulation in NEURON of %sms generated from NeuroML2 model...\n"%tstop)
self.seed = seed
self.randoms = []
self.next_global_id = 0 # Used in Random123 classes for elements using random(), etc.
self.next_spiking_input_id = 0 # Used in Random123 classes for elements using random(), etc.
'''
Adding simulation Component(id=sim1 type=Simulation) of network/component: net1 (Type: network)
'''
# ###################### Population: hhpop
print("Population hhpop contains 1 instance(s) of component: hhcell of type: cell")
h.load_file("hhcell.hoc")
a_hhpop = []
h("{ n_hhpop = 1 }")
h("objectvar a_hhpop[n_hhpop]")
for i in range(int(h.n_hhpop)):
h("a_hhpop[%i] = new hhcell()"%i)
h("access a_hhpop[%i].soma"%i)
self.next_global_id+=1
h("proc initialiseV_hhpop() { for i = 0, n_hhpop-1 { a_hhpop[i].set_initial_v() } }")
h("objref fih_hhpop")
h('{fih_hhpop = new FInitializeHandler(0, "initialiseV_hhpop()")}')
h("proc initialiseIons_hhpop() { for i = 0, n_hhpop-1 { a_hhpop[i].set_initial_ion_properties() } }")
h("objref fih_ion_hhpop")
h('{fih_ion_hhpop = new FInitializeHandler(1, "initialiseIons_hhpop()")}')
# Adding single input: Component(id=null type=explicitInput)
h("objref explicitInput_pulseGen1a_hhpop0_soma")
h("a_hhpop[0].soma { explicitInput_pulseGen1a_hhpop0_soma = new pulseGen1(0.5) } ")
trec = h.Vector()
trec.record(h._ref_t)
h.tstop = tstop
h.dt = dt
h.steps_per_ms = 1/h.dt
# ###################### File to save: time.dat (time)
# Column: time
h(' objectvar v_time ')
h(' { v_time = new Vector() } ')
h(' { v_time.record(&t) } ')
h.v_time.resize((h.tstop * h.steps_per_ms) + 1)
self.initialized = False
self.sim_end = -1 # will be overwritten
def run(self):
self.initialized = True
sim_start = time.time()
print("Running a simulation of %sms (dt = %sms; seed=%s)" % (h.tstop, h.dt, self.seed))
h.run()
self.sim_end = time.time()
sim_time = self.sim_end - sim_start
print("Finished NEURON simulation in %f seconds (%f mins)..."%(sim_time, sim_time/60.0))
self.save_results()
def advance(self):
if not self.initialized:
h.finitialize()
self.initialized = True
h.fadvance()
###############################################################################
# Hash function to use in generation of random value
# This is copied from NetPyNE: https://github.com/Neurosim-lab/netpyne/blob/master/netpyne/simFuncs.py
###############################################################################
def _id32 (self,obj):
return int(hashlib.md5(obj).hexdigest()[0:8],16) # convert 8 first chars of md5 hash in base 16 to int
###############################################################################
# Initialize the stim randomizer
# This is copied from NetPyNE: https://github.com/Neurosim-lab/netpyne/blob/master/netpyne/simFuncs.py
###############################################################################
def _init_stim_randomizer(self,rand, stimType, gid, seed):
#print("INIT STIM %s; %s; %s; %s"%(rand, stimType, gid, seed))
rand.Random123(self._id32(stimType), gid, seed)
def save_results(self):
print("Saving results at t=%s..."%h.t)
if self.sim_end < 0: self.sim_end = time.time()
# ###################### File to save: time.dat (time)
py_v_time = [ t/1000 for t in h.v_time.to_python() ] # Convert to Python list for speed...
f_time_f2 = open('time.dat', 'w')
num_points = len(py_v_time) # Simulation may have been stopped before tstop...
for i in range(num_points):
f_time_f2.write('%f'% py_v_time[i]) # Save in SI units...
f_time_f2.close()
print("Saved data to: time.dat")
save_end = time.time()
save_time = save_end - self.sim_end
print("Finished saving results in %f seconds"%(save_time))
print("Done")
quit()
if __name__ == '__main__':
ns = NeuronSimulation(tstop=1000.0, dt=0.049999997, seed=123456789)
ns.run()
| 31.72093 | 128 | 0.566166 | [
"MIT"
] | openworm/org.geppetto.model.neuroml | src/test/resources/expected/neuron/hhcell/main_script.py | 5,456 | Python |
# Character field ID when accessed: 100000201
# ParentID: 32226
# ObjectID: 0
| 19.5 | 45 | 0.75641 | [
"MIT"
] | doriyan13/doristory | scripts/quest/autogen_q32226s.py | 78 | Python |
# Josh Aaron Miller 2021
# VenntDB methods for Characters
import venntdb
from constants import *
# VenntDB Methods
def character_exists(self, username, char_id):
return self.get_character(username, char_id) is not None
def get_character(self, username, char_id):
self.assert_valid("accounts", username, "characters")
if self.is_valid("accounts", username, "characters", char_id):
return self.db["accounts"][username]["characters"][char_id]
return None
def create_character(self, username, character):
self.assert_valid("accounts", username, "characters")
self.db["accounts"][username]["characters"][character["id"]] = character
self.save_db()
def get_characters(self, username):
self.assert_valid("accounts", username, "characters")
return self.db["accounts"][username]["characters"]
def get_attr(self, username, char_id, attr):
self.assert_valid("accounts", username, "characters", char_id)
return self.get_character(username, char_id)[attr]
def set_attr(self, username, char_id, attr, val):
self.assert_valid("accounts", username, "characters", char_id)
self.get_character(username, char_id)[attr] = val
self.save_db()
| 30.219512 | 77 | 0.702986 | [
"MIT"
] | JackNolanDev/vennt-server | db_characters.py | 1,239 | Python |
from output.models.nist_data.atomic.g_year_month.schema_instance.nistschema_sv_iv_atomic_g_year_month_enumeration_5_xsd.nistschema_sv_iv_atomic_g_year_month_enumeration_5 import (
NistschemaSvIvAtomicGYearMonthEnumeration5,
NistschemaSvIvAtomicGYearMonthEnumeration5Type,
)
__all__ = [
"NistschemaSvIvAtomicGYearMonthEnumeration5",
"NistschemaSvIvAtomicGYearMonthEnumeration5Type",
]
| 40.1 | 179 | 0.875312 | [
"MIT"
] | tefra/xsdata-w3c-tests | output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_enumeration_5_xsd/__init__.py | 401 | Python |
import os
from setuptools import setup
def filepath(fname):
return os.path.join(os.path.dirname(__file__), fname)
exec(compile(open('bevel/version.py').read(),
'bevel/version.py', 'exec'))
readme_md = filepath('README.md')
try:
import pypandoc
readme_rst = pypandoc.convert_file(readme_md, 'rst')
except(ImportError):
readme_rst = open(readme_md).read()
setup(
name="bevel",
version="0.1.1",
author="Ross Diener, Steven Wu, Cameron Davidson-Pilon",
author_email="[email protected]",
description="Ordinal regression in Python",
license="MIT",
keywords="oridinal regression statistics data analysis",
url="https://github.com/ShopifyPeopleAnalytics/bevel",
packages=[
'bevel',
],
long_description=readme_rst,
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering",
],
install_requires=[
"numpy>=1.13.3",
"scipy>=1.0.0",
"pandas>=0.21.0",
"numdifftools>=0.9.20"
],
package_data={
"bevel": [
"../README.md",
"../LICENSE",
]
},
)
| 24.277778 | 60 | 0.591915 | [
"MIT"
] | ChihHsuanLin/bevel | setup.py | 1,311 | Python |
todos = ['barber', 'grocery']
for todo in todos:
print(todo) | 16.5 | 30 | 0.621212 | [
"MIT"
] | theseana/fempfasb | Term 5/Vue/p.py | 66 | Python |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
def remove_trailing_slash(filename: str):
while filename.endswith('/'):
filename = filename[:-1]
return filename
def maybe_add_0000_to_all_niigz(folder):
nii_gz = subfiles(folder, suffix='.nii.gz')
for n in nii_gz:
n = remove_trailing_slash(n)
if not n.endswith('_0000.nii.gz'):
os.rename(n, n[:-7] + '_0000.nii.gz')
| 34.129032 | 111 | 0.757089 | [
"Apache-2.0"
] | anxingle/nnUNet_simple | nnunet/utilities/file_endings.py | 1,058 | Python |
from dataclasses import dataclass
from apple.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class BackupInitialized(Streamable):
"""
Stores user decision regarding import of backup info
"""
user_initialized: bool # Stores if user made a selection in UI. (Skip vs Import backup)
user_skipped: bool # Stores if user decided to skip import of backup info
backup_info_imported: bool # Stores if backup info has been imported
new_wallet: bool # Stores if this wallet is newly created / not restored from backup
| 33.941176 | 92 | 0.755633 | [
"Apache-2.0"
] | Apple-Network/apple-blockchain | apple/wallet/settings/settings_objects.py | 577 | Python |
""" This file holds all the chapter 2 areas of the game. """
from time import sleep
# from classes import Player, Difficulty
from chapters.chapter import Chapter
from chapters.chapter3 import Chapter3
from other.sounds_effects import GameSounds
from game import player1, sounds, Difficulty
from choices import _player_choice, error_message
from other.colors import print_green, print_yellow, print_red, print_sleep, print_blue
class Chapter2(Chapter):
"""Contains all the main chapter 2 areas of the game."""
chapter_num = 2
def checkpoints(self):
"""runs movement to levels -- checkpoint when leaving area"""
return {'0': self.game,
'1': self.good_ending_and_continue,
'bad': self.bad_ending,
'3': self.woods_area,
}
def good_ending_and_continue(self):
"""Simply plays the good ending scene and then drops the player into chapter 2."""
self.good_ending()
Chapter3().game()
def game(self):
"""start of ch2"""
self.start()
print_sleep(
'Upon driving the car through the broken roads area, the sun is certainly dwindling and time in the car'
'says 2:35 AM.\nYou continue to grow yourself tired and restless from everything that had led to this '
'point\n', 2.5)
choices = [str(x) for x in range(1, 3)]
choice_options = [
'Due to the car getting low on gas, you must make a tough decision. (1) Drive back to the local gas '
'station in town (2) Turn off the car and set up a camp fire in the woods: ']
choice = _player_choice(choices, choice_options)
if choice == '1':
sounds.zombie_attack_inside()
print_sleep(
'While attempting to put the car in reverse and head backwards to the local gas station in town, '
'a swarm of zombies arise on the car while the car gets stuck into gear!\n', 2.5)
if not player1.user_attack():
return
player1.total_kills += 5
print_green('You have successfully killed off the heaping swarm of zombies surrounding the car!\n', 1)
self.continue_message()
elif choice == '2':
print_sleep(
'You have parked the car near the closet woods area and now need to gather up some supplies for a camp '
'fire.\n', 2)
self.woods_area()
def woods_area(self):
"""Checkpoint save 3"""
player1.checkpoint_save('3')
print_sleep(
'You have successfully gathered up some sticks and still need a source of flame to begin the campfire.\n',
2)
choices = [str(x) for x in range(1, 3)]
choice_options = [
'You can either test your luck in creating a fire by (1) Creating friction: Use sticks and rub against '
'nearby wood chips (2) Search for other useful resources: ']
choice = _player_choice(choices, choice_options)
if choice == '1':
sounds.flame_ignite()
print_sleep('Whoosh! after a few minutes of trying to create friction, the birth of a small ash turns into '
'a flame!\n', 2.5)
self.continue_message()
elif choice == '2':
sounds.zombie_attack_outside()
print_red(
'Whilst looking around for more resources, you begin hearing a group of 3 zombies running towards '
'you!\n', 2)
if not player1.user_attack():
return
player1.total_kills += 3
print_green('You have successfully killed off the group of 3 zombies running towards you!\n', 1)
self.continue_message()
| 44.186047 | 120 | 0.613421 | [
"MIT"
] | JordanLeich/Alpha-Zombie-Survival-Game | chapters/chapter2.py | 3,800 | Python |
import numpy
from fframework import asfunction, OpFunction
__all__ = ['Angle']
class Angle(OpFunction):
"""Transforms a mesh into the angle of the mesh to the x axis."""
def __init__(self, mesh):
"""*mesh* is the mesh Function."""
self.mesh = asfunction(mesh)
def __call__(self, ps):
"""Returns the arctan2. The (y, x) coordinate is in the last
dimension."""
meshT = self.mesh(ps).T
return numpy.arctan2(meshT[0], meshT[1]).T
| 24.7 | 70 | 0.621457 | [
"MIT"
] | friedrichromstedt/moviemaker3 | moviemaker3/math/angle.py | 494 | Python |
from pathlib import Path
import numba
import numpy as np
from det3d.core.bbox.geometry import (
points_count_convex_polygon_3d_jit,
points_in_convex_polygon_3d_jit,
)
try:
from spconv.utils import rbbox_intersection, rbbox_iou
except:
print("Import spconv fail, no support for sparse convolution!")
def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)):
rbbox_corners = center_to_corner_box3d(
rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis
)
surfaces = corner_to_surfaces_3d(rbbox_corners)
return points_count_convex_polygon_3d_jit(points[:, :3], surfaces)
def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0):
# less than 50ms when used in second one thread. 10x slower than gpu
boxes_corners = center_to_corner_box2d(
rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4]
)
boxes_standup = corner_to_standup_nd(boxes_corners)
qboxes_corners = center_to_corner_box2d(
qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4]
)
qboxes_standup = corner_to_standup_nd(qboxes_corners)
# if standup box not overlapped, rbbox not overlapped too.
standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0)
return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh)
def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0):
# less than 50ms when used in second one thread. 10x slower than gpu
boxes_corners = center_to_corner_box2d(
rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4]
)
boxes_standup = corner_to_standup_nd(boxes_corners)
qboxes_corners = center_to_corner_box2d(
qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4]
)
qboxes_standup = corner_to_standup_nd(qboxes_corners)
# if standup box not overlapped, rbbox not overlapped too.
standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0)
return rbbox_intersection(
boxes_corners, qboxes_corners, standup_iou, standup_thresh
)
def corners_nd(dims, origin=0.5):
"""generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
"""
ndim = int(dims.shape[1])
corners_norm = np.stack(
np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1
).astype(dims.dtype)
# now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1
# (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
# so need to convert to a format which is convenient to do other computing.
# for 2d boxes, format is clockwise start with minimum point
# for 3d boxes, please draw lines by your hand.
if ndim == 2:
# generate clockwise box corners
corners_norm = corners_norm[[0, 1, 3, 2]]
elif ndim == 3:
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim])
return corners
@numba.njit
def corners_2d_jit(dims, origin=0.5):
ndim = 2
corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype)
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim))
return corners
@numba.njit
def corners_3d_jit(dims, origin=0.5):
ndim = 3
corners_norm = np.array(
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1],
dtype=dims.dtype,
).reshape((8, 3))
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim))
return corners
@numba.njit
def corner_to_standup_nd_jit(boxes_corner):
num_boxes = boxes_corner.shape[0]
ndim = boxes_corner.shape[-1]
result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype)
for i in range(num_boxes):
for j in range(ndim):
result[i, j] = np.min(boxes_corner[i, :, j])
for j in range(ndim):
result[i, j + ndim] = np.max(boxes_corner[i, :, j])
return result
def corner_to_standup_nd(boxes_corner):
assert len(boxes_corner.shape) == 3
standup_boxes = []
standup_boxes.append(np.min(boxes_corner, axis=1))
standup_boxes.append(np.max(boxes_corner, axis=1))
return np.concatenate(standup_boxes, -1)
def rbbox2d_to_near_bbox(rbboxes):
"""convert rotated bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
"""
rots = rbboxes[..., -1]
rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi))
cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis]
bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])
bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])
return bboxes
def rotation_3d_in_axis(points, angles, axis=0):
# points: [N, point_size, 3]
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
ones = np.ones_like(rot_cos)
zeros = np.zeros_like(rot_cos)
if axis == 1:
rot_mat_T = np.stack(
[
[rot_cos, zeros, -rot_sin],
[zeros, ones, zeros],
[rot_sin, zeros, rot_cos],
]
)
elif axis == 2 or axis == -1:
rot_mat_T = np.stack(
[
[rot_cos, -rot_sin, zeros],
[rot_sin, rot_cos, zeros],
[zeros, zeros, ones],
]
)
elif axis == 0:
rot_mat_T = np.stack(
[
[zeros, rot_cos, -rot_sin],
[zeros, rot_sin, rot_cos],
[ones, zeros, zeros],
]
)
else:
raise ValueError("axis should in range")
return np.einsum("aij,jka->aik", points, rot_mat_T)
def rotation_points_single_angle(points, angle, axis=0):
# points: [N, 3]
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
if axis == 1:
rot_mat_T = np.array(
[[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]],
dtype=points.dtype,
)
elif axis == 2 or axis == -1:
rot_mat_T = np.array(
[[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],
dtype=points.dtype,
)
elif axis == 0:
rot_mat_T = np.array(
[[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]],
dtype=points.dtype,
)
else:
raise ValueError("axis should in range")
return points @ rot_mat_T
def rotation_2d(points, angles):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
"""
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])
return np.einsum("aij,jka->aik", points, rot_mat_T)
def rotation_box(box_corners, angle):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angle (float): rotation angle.
Returns:
float array: same shape as points
"""
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T = np.array(
[[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype
)
return box_corners @ rot_mat_T
def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2):
"""convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or array or float): origin point relate to smallest point.
use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.
axis (int): rotation axis. 1 for camera and 2 for lidar.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 8, 3]
if angles is not None:
corners = rotation_3d_in_axis(corners, angles, axis=axis)
corners += centers.reshape([-1, 1, 3])
return corners
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
"""convert kitti locations, dimensions and angles to corners.
format: center(xy), dims(xy), angles(clockwise when positive)
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 4, 2]
if angles is not None:
corners = rotation_2d(corners, angles)
corners += centers.reshape([-1, 1, 2])
return corners
@numba.jit(nopython=True)
def box2d_to_corner_jit(boxes):
num_box = boxes.shape[0]
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype)
for i in range(num_box):
rot_sin = np.sin(boxes[i, -1])
rot_cos = np.cos(boxes[i, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2]
return box_corners
def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2):
return center_to_corner_box3d(
rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis
)
def rbbox3d_to_bev_corners(rbboxes, origin=0.5):
return center_to_corner_box2d(
rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin
)
def minmax_to_corner_2d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center
return center_to_corner_box2d(center, dims, origin=0.0)
def minmax_to_corner_2d_v2(minmax_box):
# N, 4 -> N 4 2
return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2)
def minmax_to_corner_3d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center
return center_to_corner_box3d(center, dims, origin=0.0)
def minmax_to_center_2d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center_min = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center_min
center = center_min + 0.5 * dims
return np.concatenate([center, dims], axis=-1)
def center_to_minmax_2d_0_5(centers, dims):
return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1)
def center_to_minmax_2d(centers, dims, origin=0.5):
if origin == 0.5:
return center_to_minmax_2d_0_5(centers, dims)
corners = center_to_corner_box2d(centers, dims, origin=origin)
return corners[:, [0, 2]].reshape([-1, 4])
def limit_period(val, offset=0.5, period=np.pi):
return val - np.floor(val / period + offset) * period
def projection_matrix_to_CRT_kitti(proj):
# P = C @ [R|T]
# C is upper triangular matrix, so we need to inverse CR and use QR
# stable for all kitti camera projection matrix
CR = proj[0:3, 0:3]
CT = proj[0:3, 3]
RinvCinv = np.linalg.inv(CR)
Rinv, Cinv = np.linalg.qr(RinvCinv)
C = np.linalg.inv(Cinv)
R = np.linalg.inv(Rinv)
T = Cinv @ CT
return C, R, T
def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis]
b = bbox_image
box_corners = np.array(
[[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype
)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=1)
return ret_xyz
def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
num_box = bboxes.shape[0]
z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[
np.newaxis, :, np.newaxis
]
z_points = np.tile(z_points, [num_box, 1, 1])
box_corners = minmax_to_corner_2d_v2(bboxes)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=-1)
return ret_xyz
@numba.njit
def _add_rgb_to_points_kernel(points_2d, image, points_rgb):
num_points = points_2d.shape[0]
image_h, image_w = image.shape[:2]
for i in range(num_points):
img_pos = np.floor(points_2d[i]).astype(np.int32)
if img_pos[0] >= 0 and img_pos[0] < image_w:
if img_pos[1] >= 0 and img_pos[1] < image_h:
points_rgb[i, :] = image[img_pos[1], img_pos[0], :]
# image[img_pos[1], img_pos[0]] = 0
def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]):
kernel = np.ones(mean_size, np.float32) / np.prod(mean_size)
# image = cv2.filter2D(image, -1, kernel)
points_cam = lidar_to_camera(points[:, :3], rect, Trv2c)
points_2d = project_to_image(points_cam, P2)
points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype)
_add_rgb_to_points_kernel(points_2d, image, points_rgb)
return points_rgb
def project_to_image(points_3d, proj_mat):
points_shape = list(points_3d.shape)
points_shape[-1] = 1
points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1)
point_2d = points_4 @ proj_mat.T
point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
return point_2d_res
def camera_to_lidar(points, r_rect, velo2cam):
points_shape = list(points.shape[0:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T)
return lidar_points[..., :3]
def lidar_to_camera(points, r_rect, velo2cam):
points_shape = list(points.shape[:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
camera_points = points @ (r_rect @ velo2cam).T
return camera_points[..., :3]
def box_camera_to_lidar(data, r_rect, velo2cam):
xyz = data[:, 0:3]
l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
return np.concatenate([xyz_lidar, w, l, h, r], axis=1)
def box_lidar_to_camera(data, r_rect, velo2cam):
xyz_lidar = data[:, 0:3]
w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)
return np.concatenate([xyz, l, h, w, r], axis=1)
def remove_outside_points(points, rect, Trv2c, P2, image_shape):
# 5x faster than remove_outside_points_v1(2ms vs 10ms)
C, R, T = projection_matrix_to_CRT_kitti(P2)
image_bbox = [0, 0, image_shape[1], image_shape[0]]
frustum = get_frustum(image_bbox, C)
frustum -= T
frustum = np.linalg.inv(R) @ frustum.T
frustum = camera_to_lidar(frustum.T, rect, Trv2c)
frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...])
indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces)
points = points[indices.reshape([-1])]
return points
@numba.jit(nopython=True)
def iou_jit(boxes, query_boxes, eps=1.0):
"""calculate box iou. note that jit version runs 2x faster than cython in
my machine!
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * (
query_boxes[k, 3] - query_boxes[k, 1] + eps
)
for n in range(N):
iw = (
min(boxes[n, 2], query_boxes[k, 2])
- max(boxes[n, 0], query_boxes[k, 0])
+ eps
)
if iw > 0:
ih = (
min(boxes[n, 3], query_boxes[k, 3])
- max(boxes[n, 1], query_boxes[k, 1])
+ eps
)
if ih > 0:
ua = (
(boxes[n, 2] - boxes[n, 0] + eps)
* (boxes[n, 3] - boxes[n, 1] + eps)
+ box_area
- iw * ih
)
overlaps[n, k] = iw * ih / ua
return overlaps
@numba.jit(nopython=True)
def iou_3d_jit(boxes, query_boxes, add1=True):
"""calculate box iou3d,
----------
boxes: (N, 6) ndarray of float
query_boxes: (K, 6) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
for k in range(K):
box_area = (
(query_boxes[k, 3] - query_boxes[k, 0] + add1)
* (query_boxes[k, 4] - query_boxes[k, 1] + add1)
* (query_boxes[k, 5] - query_boxes[k, 2] + add1)
)
for n in range(N):
iw = (
min(boxes[n, 3], query_boxes[k, 3])
- max(boxes[n, 0], query_boxes[k, 0])
+ add1
)
if iw > 0:
ih = (
min(boxes[n, 4], query_boxes[k, 4])
- max(boxes[n, 1], query_boxes[k, 1])
+ add1
)
if ih > 0:
il = (
min(boxes[n, 5], query_boxes[k, 5])
- max(boxes[n, 2], query_boxes[k, 2])
+ add1
)
if il > 0:
ua = float(
(boxes[n, 3] - boxes[n, 0] + add1)
* (boxes[n, 4] - boxes[n, 1] + add1)
* (boxes[n, 5] - boxes[n, 2] + add1)
+ box_area
- iw * ih * il
)
overlaps[n, k] = iw * ih * il / ua
return overlaps
@numba.jit(nopython=True)
def iou_nd_jit(boxes, query_boxes, add1=True):
"""calculate box iou nd, 2x slower than iou_jit.
----------
boxes: (N, ndim * 2) ndarray of float
query_boxes: (K, ndim * 2) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
ndim = boxes.shape[1] // 2
overlaps = np.zeros((N, K), dtype=boxes.dtype)
side_lengths = np.zeros((ndim,), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
invalid = False
for k in range(K):
qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1
for i in range(1, ndim):
qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1
for n in range(N):
invalid = False
for i in range(ndim):
side_length = (
min(boxes[n, i + ndim], query_boxes[k, i + ndim])
- max(boxes[n, i], query_boxes[k, i])
+ add1
)
if side_length <= 0:
invalid = True
break
side_lengths[i] = side_length
if not invalid:
box_area = boxes[n, ndim] - boxes[n, 0] + add1
for i in range(1, ndim):
box_area *= boxes[n, ndim + i] - boxes[n, i] + add1
inter = side_lengths[0]
for i in range(1, ndim):
inter *= side_lengths[i]
# inter = np.prod(side_lengths)
ua = float(box_area + qbox_area - inter)
overlaps[n, k] = inter / ua
return overlaps
def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)):
rbbox_corners = center_to_corner_box3d(
rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis
)
surfaces = corner_to_surfaces_3d(rbbox_corners)
indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
return indices
def corner_to_surfaces_3d(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
surfaces = np.array(
[
[corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]],
[corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]],
[corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]],
[corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]],
[corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]],
[corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]],
]
).transpose([2, 0, 1, 3])
return surfaces
@numba.jit(nopython=True)
def corner_to_surfaces_3d_jit(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
num_boxes = corners.shape[0]
surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_idxes = np.array(
[0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]
).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
surfaces[i, j, k] = corners[i, corner_idxes[j, k]]
return surfaces
def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range):
"""assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
"""
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_centers = voxel_origins + voxel_size * 0.5
gt_box_corners = center_to_corner_box3d(
gt_boxes[:, :3] - voxel_size * 0.5,
gt_boxes[:, 3:6] + voxel_size,
gt_boxes[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2,
)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces)
return np.any(ret, axis=1).astype(np.int64)
def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range):
"""assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
"""
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_maxes = voxel_origins + voxel_size
voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1)
voxel_corners = minmax_to_corner_3d(voxel_minmax)
gt_box_corners = center_to_corner_box3d(
gt_boxes[:, :3],
gt_boxes[:, 3:6],
gt_boxes[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2,
)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
voxel_corners_flat = voxel_corners.reshape([-1, 3])
ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces)
ret = ret.reshape([-1, 8, ret.shape[-1]])
return ret.any(-1).any(-1).astype(np.int64)
def image_box_region_area(img_cumsum, bbox):
"""check a 2d voxel is contained by a box. used to filter empty
anchors.
Summed-area table algorithm:
==> W
------------------
| | |
|------A---------B
| | |
| | |
|----- C---------D
Iabcd = ID-IB-IC+IA
Args:
img_cumsum: [M, H, W](yx) cumsumed image.
bbox: [N, 4](xyxy) bounding box,
"""
N = bbox.shape[0]
M = img_cumsum.shape[0]
ret = np.zeros([N, M], dtype=img_cumsum.dtype)
ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]]
IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]]
IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]]
IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]]
ret = ID - IB - IC + IA
return ret
def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6):
x_vsize = voxel_size[0]
y_vsize = voxel_size[1]
max_x = points[:, 0].max()
max_y = points[:, 1].max()
min_x = points[:, 0].min()
min_y = points[:, 1].min()
max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample)
max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample)
min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample)
min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample)
max_x = np.minimum(max_x + margin, bound[2])
max_y = np.minimum(max_y + margin, bound[3])
min_x = np.maximum(min_x - margin, bound[0])
min_y = np.maximum(min_y - margin, bound[1])
return np.array([min_x, min_y, max_x, max_y])
def box3d_to_bbox(box3d, rect, Trv2c, P2):
box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c)
box_corners = center_to_corner_box3d(
box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1
)
box_corners_in_image = project_to_image(box_corners, P2)
# box_corners_in_image: [N, 8, 2]
minxy = np.min(box_corners_in_image, axis=1)
maxxy = np.max(box_corners_in_image, axis=1)
bbox = np.concatenate([minxy, maxxy], axis=1)
return bbox
def change_box3d_center_(box3d, src, dst):
dst = np.array(dst, dtype=box3d.dtype)
src = np.array(src, dtype=box3d.dtype)
box3d[..., :3] += box3d[..., 3:6] * (dst - src)
def encode_parts(relative_shifts):
parts = np.zeros((len(relative_shifts),), dtype=np.int32)
mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0)
parts[mask] = 0
mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0)
parts[mask] = 1
mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0)
parts[mask] = 2
mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0)
parts[mask] = 3
return parts | 36.460123 | 88 | 0.57126 | [
"MIT"
] | motional/polarstream | det3d/core/bbox/box_np_ops.py | 29,715 | Python |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HContractUnitR03_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HContractUnitR03_ConnectedLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HContractUnitR03_ConnectedLHS, self).__init__(name='HContractUnitR03_ConnectedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HContractUnitR03_ConnectedLHS')
self["equations"] = []
# Set the node attributes
# match class Class(Class) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__Class"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Class')
# Add the edges
self.add_edges([
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
# define evaluation methods for each match association.
def constraint(self, PreNode, graph):
return True
| 26.489796 | 114 | 0.716487 | [
"MIT"
] | levilucio/SyVOLT | UML2ER/contracts/unit/HContractUnitR03_ConnectedLHS.py | 1,298 | Python |
import numpy as np
import networkx as nx
if __name__ == '__main__':
from ged4py.algorithm import graph_edit_dist
else:
from .ged4py.algorithm import graph_edit_dist
def rearrange_adj_matrix(matrix, ordering):
assert matrix.ndim == 2
# Check that matrix is square
assert matrix.shape[0] == matrix.shape[1]
num_nodes = matrix.shape[0]
assert len(ordering) == num_nodes
# Swap rows into correct ordering
matrix = matrix[ordering, :]
# Swap columns into correct ordering
matrix = matrix[:, ordering]
return matrix
def rand_permute_adj_matrix(matrix):
"""Randomly permute the order of vertices in the adjacency matrix, while maintaining the connectivity
between them."""
num_vertices = matrix.shape[0]
rand_order = np.arange(num_vertices)
np.random.shuffle(rand_order)
matrix_permuted = rearrange_adj_matrix(matrix, rand_order)
return matrix_permuted
def ged_from_adj(adj_mat_1, adj_mat_2, directed=False, ged_function=graph_edit_dist.compare):
"""Calculate the graph edit distance between two graphs"""
if directed:
create_using = nx.DiGraph
else:
create_using = nx.Graph
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=create_using())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=create_using())
return ged_function(g1, g2)
def ged_from_adj_nx(adj_mat_1, adj_mat_2, directed=False):
"""Calculate the graph edit distance between two graphs using the networkx implementation"""
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=nx.graph_edit_distance)
def ged_from_adj_ged4py(adj_mat_1, adj_mat_2, directed=False):
"""Calculate the graph edit distance between two graphs using the ged4py implementation"""
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=graph_edit_dist.compare)
def is_isomorphic_from_adj(adj_mat_1, adj_mat_2):
"""Checks whether two graphs are isomorphic taking adjacency matrices as inputs"""
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=nx.DiGraph())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=nx.DiGraph())
return nx.is_isomorphic(g1, g2)
def adj_matrix_to_edge_list(adj_matrix, directed=True, first_id=0, weighted=False):
num_nodes = adj_matrix.shape[0]
if directed:
num_edges = np.sum(adj_matrix)
else:
num_edges = int(np.sum(adj_matrix) / 2)
if weighted:
edge_list = np.zeros([num_edges, 3], dtype=np.int32)
else:
edge_list = np.zeros([num_edges, 2], dtype=np.int32)
i = 0
for node_in in range(num_nodes):
if directed:
range_2 = range(num_nodes)
else:
range_2 = range(node_in + 1, num_nodes)
for node_out in range_2:
edge_val = adj_matrix[node_in, node_out]
if edge_val > 0:
# If there is a connection
if weighted:
edge_list[i] = (node_in + first_id, node_out + first_id, edge_val)
else:
edge_list[i] = (node_in + first_id, node_out + first_id)
i += 1
return edge_list
def edge_list_to_textfile(edge_list, filepath, weighted=False):
with open(filepath, 'w') as file:
if weighted:
for i, j, weight in edge_list:
file.write(f"{i} {j} {weight}\n")
else:
for i, j in edge_list:
file.write(f"{i} {j}\n")
return
| 34.147059 | 105 | 0.676715 | [
"MIT"
] | BrunoKM/rhoana_graph_tools | utils/graph_utils.py | 3,483 | Python |
import time
from typing import Optional, Dict
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence
from utils import TensorboardWriter, AverageMeter, save_checkpoint, accuracy, \
clip_gradient, adjust_learning_rate
from metrics import Metrics
class Trainer:
"""
Encoder-decoder pipeline. Tearcher Forcing is used during training and validation.
Parameters
----------
caption_model : str
Type of the caption model
epochs : int
We should train the model for __ epochs
device : torch.device
Use GPU or not
word_map : Dict[str, int]
Word2id map
rev_word_map : Dict[int, str]
Id2word map
start_epoch : int
We should start training the model from __th epoch
epochs_since_improvement : int
Number of epochs since last improvement in BLEU-4 score
best_bleu4 : float
Best BLEU-4 score until now
train_loader : DataLoader
DataLoader for training data
val_loader : DataLoader
DataLoader for validation data
encoder : nn.Module
Encoder (based on CNN)
decoder : nn.Module
Decoder (based on LSTM)
encoder_optimizer : optim.Optimizer
Optimizer for encoder (Adam) (if fine-tune)
decoder_optimizer : optim.Optimizer
Optimizer for decoder (Adam)
loss_function : nn.Module
Loss function (cross entropy)
grad_clip : float
Gradient threshold in clip gradients
tau : float
Penalty term τ for doubly stochastic attention in paper: show, attend and tell
fine_tune_encoder : bool
Fine-tune encoder or not
tensorboard : bool, optional, default=False
Enable tensorboard or not?
log_dir : str, optional
Path to the folder to save logs for tensorboard
"""
def __init__(
self,
caption_model: str,
epochs: int,
device: torch.device,
word_map: Dict[str, int],
rev_word_map: Dict[int, str],
start_epoch: int,
epochs_since_improvement: int,
best_bleu4: float,
train_loader: DataLoader,
val_loader: DataLoader,
encoder: nn.Module,
decoder: nn.Module,
encoder_optimizer: optim.Optimizer,
decoder_optimizer: optim.Optimizer,
loss_function: nn.Module,
grad_clip: float,
tau: float,
fine_tune_encoder: bool,
tensorboard: bool = False,
log_dir: Optional[str] = None
) -> None:
self.device = device # GPU / CPU
self.caption_model = caption_model
self.epochs = epochs
self.word_map = word_map
self.rev_word_map = rev_word_map
self.start_epoch = start_epoch
self.epochs_since_improvement = epochs_since_improvement
self.best_bleu4 = best_bleu4
self.train_loader = train_loader
self.val_loader = val_loader
self.encoder = encoder
self.decoder = decoder
self.encoder_optimizer = encoder_optimizer
self.decoder_optimizer = decoder_optimizer
self.loss_function = loss_function
self.tau = tau
self.grad_clip = grad_clip
self.fine_tune_encoder = fine_tune_encoder
self.print_freq = 100 # print training/validation stats every __ batches
# setup visualization writer instance
self.writer = TensorboardWriter(log_dir, tensorboard)
self.len_epoch = len(self.train_loader)
def train(self, epoch: int) -> None:
"""
Train an epoch
Parameters
----------
epoch : int
Current number of epoch
"""
self.decoder.train() # train mode (dropout and batchnorm is used)
self.encoder.train()
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter(tag='loss', writer=self.writer) # loss (per word decoded)
top5accs = AverageMeter(tag='top5acc', writer=self.writer) # top5 accuracy
start = time.time()
# batches
for i, (imgs, caps, caplens) in enumerate(self.train_loader):
data_time.update(time.time() - start)
# Move to GPU, if available
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
# forward encoder
imgs = self.encoder(imgs)
# forward decoder
if self.caption_model == 'att2all':
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)
else:
scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)
# since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
# calc loss
loss = self.loss_function(scores, targets)
# doubly stochastic attention regularization (in paper: show, attend and tell)
if self.caption_model == 'att2all':
loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()
# clear gradient of last batch
self.decoder_optimizer.zero_grad()
if self.encoder_optimizer is not None:
self.encoder_optimizer.zero_grad()
# backward
loss.backward()
# clip gradients
if self.grad_clip is not None:
clip_gradient(self.decoder_optimizer, self.grad_clip)
if self.encoder_optimizer is not None:
clip_gradient(self.encoder_optimizer, self.grad_clip)
# update weights
self.decoder_optimizer.step()
if self.encoder_optimizer is not None:
self.encoder_optimizer.step()
# set step for tensorboard
step = (epoch - 1) * self.len_epoch + i
self.writer.set_step(step=step, mode='train')
# keep track of metrics
top5 = accuracy(scores, targets, 5)
losses.update(loss.item(), sum(decode_lengths))
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
# print status
if i % self.print_freq == 0:
print(
'Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(self.train_loader),
batch_time = batch_time,
data_time = data_time,
loss = losses,
top5 = top5accs
)
)
def validate(self) -> float:
"""
Validate an epoch.
Returns
-------
bleu4 : float
BLEU-4 score
"""
self.decoder.eval() # eval mode (no dropout or batchnorm)
if self.encoder is not None:
self.encoder.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top5accs = AverageMeter()
start = time.time()
ground_truth = list() # ground_truth (true captions) for calculating BLEU-4 score
prediction = list() # prediction (predicted captions)
# explicitly disable gradient calculation to avoid CUDA memory error
# solves the issue #57
with torch.no_grad():
# Batches
for i, (imgs, caps, caplens, allcaps) in enumerate(self.val_loader):
# move to device, if available
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
# forward encoder
if self.encoder is not None:
imgs = self.encoder(imgs)
# forward decoder
if self.caption_model == 'att2all':
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)
else:
scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)
# since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores_copy = scores.clone()
scores = pack_padded_sequence(scores, decode_lengths, batch_first = True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first = True)[0]
# calc loss
loss = self.loss_function(scores, targets)
# doubly stochastic attention regularization (in paper: show, attend and tell)
if self.caption_model == 'att2all':
loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()
# keep track of metrics
losses.update(loss.item(), sum(decode_lengths))
top5 = accuracy(scores, targets, 5)
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
if i % self.print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})\t'.format(i, len(self.val_loader),
batch_time = batch_time,
loss = losses,
top5 = top5accs)
)
# store ground truth captions and predicted captions of each image
# for n images, each of them has one prediction and multiple ground truths (a, b, c...):
# prediction = [ [hyp1], [hyp2], ..., [hypn] ]
# ground_truth = [ [ [ref1a], [ref1b], [ref1c] ], ..., [ [refna], [refnb] ] ]
# ground truth
allcaps = allcaps[sort_ind] # because images were sorted in the decoder
for j in range(allcaps.shape[0]):
img_caps = allcaps[j].tolist()
img_captions = list(
map(
lambda c: [w for w in c if w not in {self.word_map['<start>'], self.word_map['<pad>']}],
img_caps
)
) # remove <start> and pads
ground_truth.append(img_captions)
# prediction
_, preds = torch.max(scores_copy, dim = 2)
preds = preds.tolist()
temp_preds = list()
for j, p in enumerate(preds):
temp_preds.append(preds[j][:decode_lengths[j]]) # remove pads
preds = temp_preds
prediction.extend(preds)
assert len(ground_truth) == len(prediction)
# calc BLEU-4 and CIDEr score
metrics = Metrics(ground_truth, prediction, self.rev_word_map)
bleu4 = metrics.belu[3] # BLEU-4
cider = metrics.cider # CIDEr
print(
'\n * LOSS - {loss.avg:.3f}, TOP-5 ACCURACY - {top5.avg:.3f}, BLEU-4 - {bleu}, CIDEr - {cider}\n'.format(
loss = losses,
top5 = top5accs,
bleu = bleu4,
cider = cider
)
)
return bleu4
def run_train(self) -> None:
# epochs
for epoch in range(self.start_epoch, self.epochs):
# decay learning rate if there is no improvement for 8 consecutive epochs
# terminate training if there is no improvement for 20 consecutive epochs
if self.epochs_since_improvement == 20:
break
if self.epochs_since_improvement > 0 and self.epochs_since_improvement % 8 == 0:
adjust_learning_rate(self.decoder_optimizer, 0.8)
if self.fine_tune_encoder:
adjust_learning_rate(self.encoder_optimizer, 0.8)
# train an epoch
self.train(epoch = epoch)
# validate an epoch
recent_bleu4 = self.validate()
# epochs num since last improvement
is_best = recent_bleu4 > self.best_bleu4
self.best_bleu4 = max(recent_bleu4, self.best_bleu4)
if not is_best:
self.epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (self.epochs_since_improvement,))
else:
self.epochs_since_improvement = 0
# save checkpoint
save_checkpoint(
epoch = epoch,
epochs_since_improvement = self.epochs_since_improvement,
encoder = self.encoder,
decoder = self.decoder,
encoder_optimizer = self.encoder_optimizer,
decoder_optimizer = self.decoder_optimizer,
caption_model = self.caption_model,
bleu4 = recent_bleu4,
is_best = is_best
)
| 36.77892 | 121 | 0.551618 | [
"MIT"
] | Renovamen/Image-Caption | trainer/trainer.py | 14,308 | Python |
# Get substring using 'start' and 'end' position.
def get_substring_or_empty(data, start, end=''):
if start in data:
if '' == start:
f = 0
else:
f = len(start)
f = data.find(start) + f
data = data[f:]
else:
return ''
if end in data:
if '' == end:
f = len(data)
else:
f = data.find(end)
data = data[:f]
else:
return ''
data = data.strip()
return data
| 20.04 | 49 | 0.447106 | [
"MIT"
] | domorelivelonger/rss-telegram-bot | utils.py | 501 | Python |
import cmath
import math
cv =150
cvconv = 736
t1 =440
t2 = 254
polos = 10
freq = 60
r1 = 0.012
R2L = 0.018
X1 = 0.08
X2L = X1
Rp = 58
Xm = 54
print("\nConsidere que o motor é alimentado com tensão de fase igual a 254 V, conexão Y e atinge escorregamento igual a 1,8%")
print("\nA - Corrente no estator\n")
s = 0.018
print("R2L_s = ", R2L/s, "Ohm")
print("(1-s)*(R2L_s) = ", (1-s)*(R2L/s), "Ohm")
Z1 = r1+complex(0,X1)
print("Z1 = ", Z1, "Ohm")
Z2 = R2L/s+complex(0,X2L)
print("Z2 = ", Z2, "Ohm")
Zn = Rp*complex(0,Xm)/complex(Rp,Xm)
print("Zn = ", Zn, "Ohm")
Zeq1 = Zn*Z2/(Zn+Z2)
print("Zeq1 = ", Zeq1, "Ohm")
Zeq2 = Z1+Zeq1
print("Zeq2 = ", Zeq2, "Ohm")
I1 = t2/Zeq2
print("I1 = ", I1, "A")
I1p = cmath.polar(I1)
print("\nB - Fator de pontecia\n")
FP = cmath.cos(I1p[1])
FPreal = round(FP.real,5)
print("FP = ", FPreal)
print("\nC - Potencia de entrada\n")
Pe = t2*I1p[0]*cmath.cos(I1p[1])
pereal = round(Pe.real,3)
print("Pe = ", pereal, "W")
Pe3 = 3*pereal
print("Pe3 = ", Pe3, "W")
print("\nD - Corrente no rotor\n")
E1 = t2-Z1*I1
E1p = cmath.polar(E1)
print("E1 = ", E1p, "V")
I2L = E1/Z2
I2Lp = cmath.polar(I2L)
print("I2L = ", I2Lp, "A")
print("\nE - Potencia na carga\n")
#professor ultiliza dados polares
Ps = ((R2L*(1-s))/s)*I2Lp[0]*I2Lp[0]
print("Ps = ", Ps, "W")
Ps3 = 3*Ps
print("Ps3 = ", Ps3, "W")
print("\nF - Velocidade do eixo\n")
ns = 120*freq/polos
print("ns = ", ns, "rpm")
n = (1-s)*ns
print("n = ", n, "rpm")
w = 2*math.pi*n/60
w = round(w,3)
print("w = ", w, "rad/s")
print("\nG - Torque na carga\n")
t = Ps3/w
print("t = ", t, "Nm")
print("\nH - Rendimento do motor\n")
eni = Ps3/Pe3*100
print("eni = ", eni, "%")
| 17.333333 | 126 | 0.582933 | [
"MIT"
] | Boa-Thomas/Eletricidade | P5/Brasilia/Q7 - BR.py | 1,667 | Python |
from dis_snek.models import InteractionContext, OptionTypes, slash_command, slash_option
from ElevatorBot.commandHelpers.subCommandTemplates import poll_sub_command
from ElevatorBot.commands.base import BaseScale
from ElevatorBot.core.misc.poll import Poll
class PollRemove(BaseScale):
@slash_command(
**poll_sub_command,
sub_cmd_name="remove",
sub_cmd_description="Remove an existing option from an existing poll",
)
@slash_option(
name="poll_id", description="The ID of the poll", opt_type=OptionTypes.INTEGER, required=True, min_value=0
)
@slash_option(
name="option",
description="The name the option should have",
opt_type=OptionTypes.STRING,
required=True,
)
async def remove(self, ctx: InteractionContext, poll_id: int, option: str):
poll = await Poll.from_poll_id(poll_id=poll_id, ctx=ctx)
if poll:
await poll.remove_option(ctx=ctx, option=option)
def setup(client):
PollRemove(client)
| 32 | 114 | 0.710938 | [
"MIT"
] | LukasSchmid97/elevatorbot | ElevatorBot/commands/miscellaneous/poll/remove.py | 1,024 | Python |
# +
import numpy as np
import holoviews as hv
from holoviews import opts
import matplotlib.pyplot as plt
from plotsun import plot_sun
hv.extension('bokeh', 'matplotlib')
# -
# # Load data
data = np.load('npz_timeseries/subset.npz')
arr = data['arr']
stack = data['stack']
sun = data['sun']
print(arr.shape, stack.shape, sun.shape)
stack[:,:,25]
plt.imshow(stack[:,:,25], cmap='binary')
# +
stack = hv.Dataset((np.arange(stack.shape[2]),
np.arange(stack.shape[0]),
np.arange(stack.shape[1]),
stack),
['Time', 'x', 'y'], 'Shadows')
stack
# -
arr = hv.Dataset((np.arange(arr.shape[0]),
np.arange(arr.shape[1]),
arr),
['x', 'y'], 'Elevation')
arr
# # View
opts.defaults(
opts.GridSpace(shared_xaxis=True, shared_yaxis=True),
opts.Image(cmap='viridis', invert_yaxis=True, width=400, height=400),
opts.Labels(text_color='white', text_font_size='8pt',
text_align='left', text_baseline='bottom'),
opts.Path(color='white'),
opts.Spread(width=600),
opts.Overlay(show_legend=False))
elevation = arr.to(hv.Image, ['x', 'y'])
shadows = stack.to(hv.Image, ['x', 'y'])
elevation
dims = {'figsize':(4,5), 'top':1, 'bottom':0, 'left':0.2, 'right':0.95}
plot_sun(sunposition=sun, d=dims)
elevation * shadows
stack[:,:,24]
| 21.553846 | 73 | 0.589579 | [
"BSD-3-Clause"
] | cisaacstern/horpyzon | datashader_nb.py | 1,401 | Python |
# -*- coding: utf-8 -*-
from django.conf.urls import include
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from .views import *
# register的可选参数 base_name: 用来生成urls名字,如果viewset中没有包含queryset, base_name一定要有
router = DefaultRouter()
router.register(r'idcs', IdcViewSet)
router.register(r'racks', RackViewSet)
router.register(r'servers', ServerViewSet)
router.register(r'sshusers', SSHUserViewSet)
router.register(r'businesslines', BusinessLineViewSet)
router.register(r'projects', ProjectViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api_dashboard/$', APIDashBoardView.as_view()),
url(r'^api_local_ssh_user/$', APILocalSSHUserView.as_view()),
]
| 30.956522 | 75 | 0.771067 | [
"BSD-3-Clause"
] | 17702296834/open-cmdb | backend/category/urls.py | 758 | Python |
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from djangocms_versioning.constants import PUBLISHED, VERSION_STATES
from djangocms_versioning.versionables import _cms_extension
from polymorphic.utils import get_base_polymorphic_model
from rangefilter.filters import DateRangeFilter
from .helpers import get_rangefilter_expires_default
class SimpleListMultiselectFilter(admin.SimpleListFilter):
def value_as_list(self):
return self.value().split(',') if self.value() else []
def _update_query(self, changelist, include=None, exclude=None):
selected_list = self.value_as_list()
if include and include not in selected_list:
selected_list.append(include)
if exclude and exclude in selected_list:
selected_list.remove(exclude)
if selected_list:
compiled_selection = ','.join(selected_list)
return changelist.get_query_string({self.parameter_name: compiled_selection})
else:
return changelist.get_query_string(remove=[self.parameter_name])
class ContentTypeFilter(SimpleListMultiselectFilter):
title = _("Content Type")
parameter_name = "content_type"
template = 'djangocms_content_expiry/multiselect_filter.html'
def lookups(self, request, model_admin):
lookup_list = []
for content_model in _cms_extension().versionables_by_content:
# Only add references to the inherited concrete model i.e. not referenced polymorphic models
if hasattr(content_model, "polymorphic_ctype"):
content_model = get_base_polymorphic_model(content_model)
# Create an entry
content_type = ContentType.objects.get_for_model(content_model)
lookup_list_entry = (content_type.pk, content_type)
# Only add unique entries
if lookup_list_entry not in lookup_list:
lookup_list.append(lookup_list_entry)
return lookup_list
def queryset(self, request, queryset):
content_types = self.value()
if not content_types:
return queryset
return queryset.filter(version__content_type__in=content_types.split(','))
def choices(self, changelist):
yield {
'selected': self.value() is None,
'query_string': changelist.get_query_string(remove=[self.parameter_name]),
'display': 'All',
'initial': True,
}
for lookup, title in self.lookup_choices:
yield {
'selected': str(lookup) in self.value_as_list(),
'query_string': changelist.get_query_string({self.parameter_name: lookup}),
'include_query_string': self._update_query(changelist, include=str(lookup)),
'exclude_query_string': self._update_query(changelist, exclude=str(lookup)),
'display': title,
}
class VersionStateFilter(SimpleListMultiselectFilter):
title = _("Version State")
parameter_name = "state"
default_filter_value = PUBLISHED
show_all_param_value = "_all_"
template = 'djangocms_content_expiry/multiselect_filter.html'
def _is_default(self, filter_value):
if self.default_filter_value == filter_value and self.value() is None:
return True
return False
def _get_all_query_string(self, changelist):
"""
If there's a default value set the all parameter needs to be provided
however, if a default is not set the all parameter is not required.
"""
# Default setting in use
if self.default_filter_value:
return changelist.get_query_string(
{self.parameter_name: self.show_all_param_value}
)
# Default setting not in use
return changelist.get_query_string(remove=[self.parameter_name])
def _is_all_selected(self):
state = self.value()
# Default setting in use
if self.default_filter_value and state == self.show_all_param_value:
return True
# Default setting not in use
elif not self.default_filter_value and not state:
return True
return False
def _update_query(self, changelist, include=None, exclude=None):
selected_list = self.value_as_list()
if self.show_all_param_value in selected_list:
selected_list.remove(self.show_all_param_value)
if include and include not in selected_list:
selected_list.append(include)
if exclude and exclude in selected_list:
selected_list.remove(exclude)
if selected_list:
compiled_selection = ','.join(selected_list)
return changelist.get_query_string({self.parameter_name: compiled_selection})
else:
return changelist.get_query_string(remove=[self.parameter_name])
def lookups(self, request, model_admin):
return VERSION_STATES
def queryset(self, request, queryset):
state = self.value()
# Default setting in use
if self.default_filter_value:
if not state:
return queryset.filter(version__state=self.default_filter_value)
elif state != "_all_":
return queryset.filter(version__state__in=state.split(','))
# Default setting not in use
elif not self.default_filter_value and state:
return queryset.filter(version__state__in=state.split(','))
return queryset
def choices(self, changelist):
yield {
"selected": self._is_all_selected(),
"query_string": self._get_all_query_string(changelist),
"display": _("All"),
'initial': True,
}
for lookup, title in self.lookup_choices:
lookup_value = str(lookup)
yield {
"selected": str(lookup) in self.value_as_list() or self._is_default(lookup_value),
"query_string": changelist.get_query_string(
{self.parameter_name: lookup}
),
'include_query_string': self._update_query(changelist, include=str(lookup_value)),
'exclude_query_string': self._update_query(changelist, exclude=str(lookup_value)),
"display": title,
}
class AuthorFilter(admin.SimpleListFilter):
"""
An author filter limited to those users who have added expiration dates
"""
title = _("Version Author")
parameter_name = "created_by"
def lookups(self, request, model_admin):
from django.utils.encoding import force_text
User = get_user_model()
options = []
qs = model_admin.get_queryset(request)
authors = qs.values_list('version__created_by', flat=True).distinct()
users = User.objects.filter(pk__in=authors)
for user in users:
options.append(
(force_text(user.pk), user.get_full_name() or user.get_username())
)
return options
def queryset(self, request, queryset):
if self.value():
return queryset.filter(created_by=self.value()).distinct()
return queryset
class ContentExpiryDateRangeFilter(DateRangeFilter):
def queryset(self, request, queryset):
queryset = super().queryset(request, queryset)
# By default the widget should default to show a default duration and not all content
# expiry records
if not any('expires__range' in seed for seed in request.GET):
default_gte, default_lte = get_rangefilter_expires_default()
queryset = queryset.filter(expires__range=(default_gte, default_lte))
return queryset
| 39.38 | 104 | 0.660995 | [
"BSD-3-Clause"
] | Aiky30/djangocms-content-expiry | djangocms_content_expiry/filters.py | 7,876 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.