metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jouvemax/DeepLearning",
"score": 3
} |
#### File: DeepLearning/Proj1/test.py
```python
import torch
from dlc_practical_prologue import generate_pair_sets
import torch.nn as nn
from utils import *
from utils_pipeline2 import *
import time
import models
import torch.nn.functional as F
def main():
# Set the device to cuda if it is available otherwise use the CPU
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
BATCH_SIZE = 64
# Use our best architecture
architecture = models.SiameseNetwork
# Use the best parameters we found for the SGD optimizer
optimizer_params = {'lr': 0.05, 'momentum':0.9, 'weight_decay': 0., 'gamma': 0.97}
nb_epochs = 50
nb_conv = 3
aux_loss_alpha = 0.4
nb_rounds = 10 # We use 10 reruns because of the high variance, reduce it to make everything faster
print("Training and testing independently 10 times the model (takes a few minutes)")
# Evaluate the model
accuracies = evaluate_model(architecture, nb_conv, aux_loss_alpha, nb_rounds, nn.CrossEntropyLoss(),
nb_epochs, BATCH_SIZE, optimizer_params, device)
print("The mean accuracy is: {a:0.2f}".format(a = accuracies.mean()))
print("The accuracy standard deviation is: {s:0.4f}".format(s = accuracies.std()))
if __name__ == '__main__':
main()
```
#### File: DeepLearning/Proj1/utils_pipeline1.py
```python
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from utils import *
import time
def test(test_input, test_target, test_classes, model, criterion, batch_size, with_aux_loss = False, aux_loss_alpha = 0.5):
"""
This methods is used for testing the model given as input on the testing data
"""
with torch.no_grad():
nb_data_errors = 0
loss_sum = 0
for inputs, targets in zip(test_input.split(batch_size),
test_target.split(batch_size)):
if with_aux_loss:
outputs, output_aux = model(inputs)
aux_loss = criterion(output_aux, targets)
primary_loss = criterion(outputs, targets)
loss = primary_loss + aux_loss_alpha * aux_loss
else:
outputs = model(inputs)
loss = criterion(outputs, targets)
loss_sum += loss
_, predicted_classes = torch.max(outputs, 1)
for k in range(len(inputs)):
if targets[k] != predicted_classes[k]:
nb_data_errors = nb_data_errors + 1
accuracy = (1 - (nb_data_errors / test_input.size(0))) * 100
return accuracy, loss_sum.item()
def train_model(model, train_input, train_target, train_classes, test_input, test_target, test_classes, nb_epoch, batch_size, optimizer_params, logging = False, with_aux_loss = False, aux_loss_alpha = 0.5):
"""
This methods is used for training the model given as input
"""
nb_epoch, batch_size = nb_epoch, batch_size
lr, momentum, weight_decay, gamma = optimizer_params['lr'], optimizer_params['momentum'], optimizer_params['weight_decay'], optimizer_params['gamma']
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma)
criterion = nn.CrossEntropyLoss()
if logging:
log_acc_loss_header(color=Color.GREEN)
train_accuracies = []
train_losses = []
test_accuracies = []
test_losses = []
start_time = time.time()
for e in range(nb_epoch):
for inputs, targets in zip(train_input.split(batch_size),
train_target.split(batch_size)):
if with_aux_loss:
outputs, output_aux = model(inputs)
aux_loss = criterion(output_aux, targets)
primary_loss = criterion(outputs, targets)
loss = primary_loss + aux_loss_alpha * aux_loss
else:
outputs = model(inputs)
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step() # Update the learning rate
if logging:
train_acc, train_loss = test(train_input, train_target, train_classes, model, criterion, batch_size, with_aux_loss)
test_acc, test_loss = test(test_input, test_target, test_classes, model, criterion, batch_size, with_aux_loss)
train_accuracies.append(train_acc)
train_losses.append(train_loss)
test_accuracies.append(test_acc)
test_losses.append(test_loss)
elapsed_time = time.time() - start_time
log_acc_loss(e, nb_epoch, elapsed_time, train_loss, train_acc, test_loss, test_acc, persistent=False)
if logging:
print()
return train_accuracies, train_losses, test_accuracies, test_losses
def evaluate_model(model, nb_rounds, criterion, device, batch_size, nb_epoch, optimizer_params, model_params = None, with_aux_loss = False, aux_loss_alpha = 0.5):
"""
This method is used to evaluate the model passed as argument
"""
accuracies = []
for round in range(nb_rounds):
# initialize new model
if model_params != None:
model_evaluated = model(model_params).to(device)
else:
model_evaluated = model().to(device)
# generate new data
train_input, train_target, train_classes, test_input, test_target, test_classes = generate_data_device(1000, device=device)
train_input = normalize_data(train_input)
test_input = normalize_data(test_input)
train_model(model_evaluated,
train_input,
train_target,
train_classes,
None,
None,
None,
nb_epoch, batch_size, optimizer_params, False, with_aux_loss, aux_loss_alpha)
accuracy, loss = test(test_input, test_target, test_classes, model_evaluated, criterion, batch_size, with_aux_loss, aux_loss_alpha)
print("Round {i}: accuracy = {a:0.2f}% | loss = {l:0.4f}".format(i = (round + 1), a = accuracy, l = loss))
accuracies.append(accuracy)
return torch.FloatTensor(accuracies)
def cross_validation(model_untrained, K, train_input, train_target, train_classes, device, batch_size, nb_epoch, aux_loss_alphas):
"""
This methods perform a cross validation and returns the optimal alpha used for auxiliary loss
"""
best_alpha = None
best_accuracy = -1
proportion = 1.0 / K
# parameters you want to test
for aux_loss_alpha in aux_loss_alphas:
accuracy_sum = 0
for i in range(K):
model = model_untrained(aux_loss = True).to(device = device)
tr_input, tr_target, tr_classes, val_input, val_target, val_classes = split_train_validation(train_input, train_target, train_classes, validation_proportion = proportion)
train_model(model,
tr_input,
tr_target,
tr_classes,
val_input,
val_target,
val_classes,
nb_epoch,
batch_size,
{'lr': 0.1, 'momentum':0.9, 'weight_decay': 0.0, 'gamma': 0.97},
logging = False,
with_aux_loss = True,
aux_loss_alpha = aux_loss_alpha)
accuracy, _ = test(val_input, val_target, val_classes, model, nn.CrossEntropyLoss(), batch_size, with_aux_loss = True, aux_loss_alpha = aux_loss_alpha)
accuracy_sum += accuracy
accuracy_mean = accuracy_sum / K
print('aux_loss_alpha = {a} - mean accuracy = {m}'.format(a = aux_loss_alpha, m = accuracy_mean))
if accuracy_mean > best_accuracy:
best_accuracy = accuracy_mean
best_alpha = aux_loss_alpha
return best_alpha, best_accuracy
```
#### File: DeepLearning/Proj1/utils_pipeline2.py
```python
import torch
from utils import *
import torch.nn as nn
import time
def test(test_input, test_target, test_classes, model, criterion, batch_size, aux_loss_alpha=0.0):
"""
This method tests the model passed as argument on the testing data
"""
model.eval() # Switch to eval mode in case we use an architecture that requires it
with torch.no_grad():
nb_final_errors = 0
nb_digit_errors = 0
loss_sum = 0
for inputs, classes, targets in zip(test_input.split(batch_size),
test_classes.split(batch_size),
test_target.split(batch_size)):
classes1, classes2 = classes[:, 0], classes[:, 1]
inputs1, inputs2 = separate_channels(inputs)
outputs1, outputs2 = model.digit_pred(inputs1), model.digit_pred(inputs2)
loss_digit = criterion(outputs1, classes1) + criterion(outputs2, classes2)
loss_sum += aux_loss_alpha * loss_digit
_, predicted1 = torch.max(outputs1, 1)
_, predicted2 = torch.max(outputs2, 1)
for k in range(len(inputs)):
if classes1[k] != predicted1[k]:
nb_digit_errors += 1
if classes2[k] != predicted2[k]:
nb_digit_errors += 1
outputs = model(inputs)
loss_final = criterion(outputs, targets)
loss_sum += loss_final
_, predicted = torch.max(outputs, 1)
for k in range(len(inputs)):
if targets[k] != predicted[k]:
nb_final_errors += 1
final_acc = (1 - (nb_final_errors / test_input.size(0))) * 100
digit_acc = (1 - (nb_digit_errors / (test_input.size(0) * 2))) * 100
return final_acc, digit_acc, loss_sum.item()
def train_model(model, train_input, train_target, train_classes, test_input, test_target, test_classes,
nb_epochs, batch_size, optimizer_params, logging = False, aux_loss_alpha=0.0):
"""
This method is used to train the model passed as argument
"""
lr, momentum, weight_decay, gamma = optimizer_params['lr'], optimizer_params['momentum'], optimizer_params['weight_decay'], optimizer_params['gamma']
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma)
criterion = nn.CrossEntropyLoss()
if logging:
log_acc_loss_header(color=Color.GREEN, aux=True)
train_final_accuracies = []
train_digit_accuracies = []
train_losses = []
test_final_accuracies = []
test_digit_accuracies = []
test_losses = []
start_time = time.time()
for e in range(nb_epochs):
model.train() # Switch to train mode in case we use an architecture that requires it
for inputs, targets, classes in zip(train_input.split(batch_size),
train_target.split(batch_size),
train_classes.split(batch_size)):
inputs1, inputs2 = separate_channels(inputs)
outputs1, outputs2 = model.digit_pred(inputs1), model.digit_pred(inputs2)
loss_digit = criterion(outputs1, classes[:, 0]) + criterion(outputs2, classes[:, 1])
loss = aux_loss_alpha * loss_digit
outputs = model(inputs)
loss_final = criterion(outputs, targets)
loss += loss_final
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step() # Update the learning rate
if logging:
train_final_acc, train_digit_acc, train_loss = test(train_input, train_target, train_classes, model, criterion, batch_size, aux_loss_alpha=aux_loss_alpha)
test_final_acc, test_digit_acc, test_loss = test(test_input, test_target, test_classes, model, criterion, batch_size, aux_loss_alpha=aux_loss_alpha)
train_final_accuracies.append(train_final_acc)
train_digit_accuracies.append(train_digit_acc)
train_losses.append(train_loss)
test_final_accuracies.append(test_final_acc)
test_digit_accuracies.append(test_digit_acc)
test_losses.append(test_loss)
elapsed_time = time.time() - start_time
log_acc_loss_aux(e, nb_epochs, elapsed_time, train_loss, train_final_acc, train_digit_acc, test_loss, test_final_acc, test_digit_acc, persistent=False)
if logging:
print()
return train_final_accuracies, train_digit_accuracies, train_losses, test_final_accuracies, test_digit_accuracies, test_losses
def evaluate_model(architecture, nb_conv, aux_loss_alpha, nb_rounds, criterion, nb_epochs, batch_size, optimizer_params, device):
"""
This method is used to evaluate the model passed as argument
"""
accuracies = []
log_evaluate_header(color=Color.GREEN)
for round in range(nb_rounds):
# initialize new model
model_evaluated = architecture(nb_conv=nb_conv, final_bias=True).to(device=device)
# generate new data
train_input, train_target, train_classes, test_input, test_target, test_classes = generate_data_device(1000, device=device)
train_input = normalize_data(train_input)
test_input = normalize_data(test_input)
train_model(model_evaluated,
train_input, train_target, train_classes,
None, None, None,
nb_epochs, batch_size,
optimizer_params, aux_loss_alpha=aux_loss_alpha)
accuracy, _, loss = test(test_input, test_target, test_classes, model_evaluated, criterion, batch_size, aux_loss_alpha=aux_loss_alpha)
log_evaluate(round, nb_rounds, accuracy, loss, persistent=True)
accuracies.append(accuracy)
return torch.FloatTensor(accuracies)
def cross_validation(architecture, K, train_input, train_target, train_classes, device, batch_size, nb_epoch, aux_loss_alphas, optimizer_params):
"""
This methods performs a cross validation and returns the best alpha used with auxiliary loss
"""
best_alpha = None
best_accuracy = -1
proportion = 1.0 / K
# parameters you want to test
for aux_loss_alpha in aux_loss_alphas:
accuracy_sum = 0
for i in range(K):
model = architecture(nb_conv=2).to(device = device)
tr_input, tr_target, tr_classes, val_input, val_target, val_classes = split_train_validation(train_input, train_target, train_classes, validation_proportion = proportion)
train_model(model, tr_input, tr_target, tr_classes, val_input, val_target, val_classes,
nb_epoch, batch_size, optimizer_params,
logging=False, aux_loss_alpha=aux_loss_alpha)
accuracy, _, _ = test(val_input, val_target, val_classes, model, nn.CrossEntropyLoss(), batch_size, aux_loss_alpha=aux_loss_alpha)
accuracy_sum += accuracy
accuracy_mean = accuracy_sum / K
print('Aux loss alpha = {a} => Mean accuracy = {m}'.format(a = aux_loss_alpha, m = accuracy_mean))
if accuracy_mean > best_accuracy:
best_accuracy = accuracy_mean
best_alpha = aux_loss_alpha
return best_alpha, best_accuracy
```
#### File: Proj2/modules/Activations.py
```python
from modules import Module
class ReLU(Module):
"""Class representing the rectified linear unit activation function."""
def forward(self, input) :
"""
Apllies the relu function to the input.
Args:
input -- tensor of size (N, *)
Return:
output -- tensor of same size as input
"""
# The input is needed when computing the backward pass.
self.input = input.clone()
output = input.clamp(min=0)
return output
__call__ = forward
def backward(self, grad_output):
"""
Given the gradient w.r.t. to the output of the activation,
computes the gradient w.r.t. to the input of the activation.
Args:
grad_output -- tensor of same size as self.input
Returns:
grad_input -- tensor of same size as self.input
"""
assert(self.input is not None)
assert(grad_output.size() == self.input.size())
grad_input = grad_output.clone()
grad_input[self.input < 0] = 0
return grad_input
class Tanh(Module):
"""Class representing the hyperbolic tangent activation function."""
def forward(self, input):
"""
Applies the hyperbolic tangent to the input.
Args:
input -- tensor of size (N, *)
Returns:
output -- tensor of same size as input
"""
# The input is needed when computing the backward pass.
self.input = input.clone()
output = input.tanh()
return output
__call__ = forward
def backward (self, grad_output):
"""
Given the gradient w.r.t. to the output of the activation,
computes the gradient w.r.t. to the input of the activation.
Args:
grad_output -- tensor of same size as self.input
Returns:
grad_input -- tensor of same size as self.input
"""
grad_input = 1 - (self.input.tanh() ** 2)
grad_input = grad_output * grad_input
return grad_input
```
#### File: Proj2/modules/Linear.py
```python
import math
from torch import empty
from modules import Module
class Linear(Module):
"""Class representing a fully connected linear layer in a neural network."""
def __init__(self, in_features, out_features, bias=True):
self.in_features = in_features
self.out_features = out_features
self.params = []
tmp = math.sqrt(in_features) # For weight initialization.
self.weight = empty(size=(out_features, in_features)).uniform_(-1/tmp,1/tmp)
self.dw = empty(size=(out_features, in_features)).zero_()
self.params.append((self.weight, self.dw))
if bias:
self.bias = empty(out_features).normal_()
self.db = empty(out_features).zero_()
self.params.append((self.bias, self.db))
else:
self.bias = None
self.db = None
def forward(self, input):
"""
Forwards the input data by applying a linear transformation on it.
Args:
input -- tensor of size (N, in_features)
Returns:
output -- tensor of size (N, out_features)
"""
assert(input.size(-1) == self.in_features)
# Required information for the backward pass.
# We clone to ensure that input won't be modified by the user before
# calling backward.
self.input = input.clone()
output = input @ self.weight.T
if self.bias is not None:
output += self.bias
return output
__call__ = forward
def backward(self, grad_output):
"""
Computes the gradient w.r.t. the input of the layer
given the gradient w.r.t. to the output of the layer.
Also computes and updates the gradient w.r.t.
the parameters of the layer.
Args:
grad_output -- tensor of size (N, out_features)
Returns
grad_input -- tensor of size (N, in_features)
"""
assert(grad_output.size(-1) == self.out_features)
grad_input = grad_output @ self.weight
if self.bias is not None:
self.db += grad_output.sum(axis=0)
self.dw += grad_output.T @ self.input
return grad_input
def zero_grad(self):
"""
Sets the gradient w.r.t. the parametes to zero.
"""
self.dw = empty(size=(self.out_features, self.in_features)).zero_()
if self.bias is not None:
self.db = empty(self.out_features).zero_()
return
def param(self):
"""
Returns the parameters of the layer i.e. its weight and
bias along with their gradient.
Returns:
params -- a list of pairs, each composed of a parameter tensor,
and a gradient tensor of same size.
"""
# We just return a copy as we don't want the user
# to be able to change the params of the model through this method.
params = self.params.copy()
return params
def update_params(self, step_size):
"""
Updates the parameters of the linear layer by going
in the opposite direction of the gradient.
Args:
step_size -- the size of an update step
"""
self.weight -= step_size * self.dw
if self.bias is not None:
self.bias -= step_size * self.db
return
```
#### File: Proj2/modules/Module.py
```python
class Module(object):
"""Base class for all neural network modules."""
def forward(self, *input):
raise NotImplementedError
def backward(self, *gradwrtoutput):
raise NotImplementedError
def update_params(self, step_size):
return
def zero_grad(self):
return
def param(self):
return []
```
#### File: Proj2/modules/Sequential.py
```python
from modules import Module
class Sequential(Module):
"""Class representing a neural network composed of several modules"""
def __init__(self, *modules):
self.modules = list(modules)
def forward(self, input):
"""
Computes the output of the neural network given some input.
Args:
input -- tensor of size (N, i), i = in_features of the first layer of the nn.
Returns:
output -- tensor of size (N, o), o = out_features of the last layer of the nn.
"""
output = input
for module in self.modules:
output = module(output)
return output
__call__ = forward
def backward(self, grad_output):
"""
Performs whole backward pass of the neural networks.
Args:
grad_output -- gradient of the loss w.r.t. the output of the nn.
Returns:
grad_input -- gradient of the loss w.r.t. to the input of the nn.
"""
grad_input = grad_output
self.modules.reverse()
for module in self.modules:
grad_input = module.backward(grad_input)
self.modules.reverse()
return grad_input
def zero_grad(self):
"""
Sets the gradient w.r.t. the parametes of the nn. to zero.
"""
for module in self.modules:
module.zero_grad()
return
def param(self):
"""
Returns the parameters of the nn along with their gradient.
Returns:
params -- a list of pairs, each composed of a parameter tensor,
and a gradient tensor of same size.
"""
params = []
for module in self.modules:
params.append(module.param())
return params
def update_params(self, step_size):
"""
Updates the parameters of the nn going
in the opposite direction of the gradient.
Args:
step_size -- the size of an update step
"""
for module in self.modules:
module.update_params(step_size)
return
```
#### File: DeepLearning/Proj2/print_util.py
```python
import matplotlib.pyplot as plt
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
BRIGHT_GREEN = '\033[92m'
GREEN = '\033[32m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
GRAY = '\033[90m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def log_acc_loss_header(color=''):
print(color + 'Epoch'.ljust(12) + 'Time'.ljust(8) + 'Train loss'.ljust(15) +
'Train accuracy'.ljust(20) + 'Train F1 score'.ljust(15) + Color.END)
def log_acc_loss(e, nb_epoch, time, train_loss, train_acc, train_f, color='', persistent=True):
print('\r' + color +
'[{0}/{1}]'.format(e + 1, nb_epoch).ljust(12) +
'{time:.0f}s'.format(time=time).ljust(8) +
'{0:.4f}'.format(train_loss).ljust(15) +
'{0:.4f}'.format(train_acc).ljust(20) +
'{0:.4f}'.format(train_f).ljust(15) +
Color.END,
end='\n' if persistent else '')
``` |
{
"source": "jouvencia/aiortc",
"score": 2
} |
#### File: visionia/gif/travail72.py
```python
import argparse
import asyncio
import json
import logging
import os
import platform
import ssl
import math
import cv2
import numpy
from av import VideoFrame
from aiohttp import web
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack
from aiortc.contrib.media import MediaPlayer
ROOT = os.path.dirname(__file__)
async def index(request):
content = open(os.path.join(ROOT, "index.html"), "r").read()
return web.Response(content_type="text/html", text=content)
async def javascript(request):
content = open(os.path.join(ROOT, "client.js"), "r").read()
return web.Response(content_type="application/javascript", text=content)
class FlagVideoStreamTrack(VideoStreamTrack):
"""
A video track that returns an animated flag.
"""
def __init__(self):
super().__init__() # don't forget this!
self.counter = 0
height, width = 480, 640
# generate flag
data_bgr = numpy.hstack(
[
self._create_rectangle(
width=213, height=480, color=(255, 0, 0)
), # blue
self._create_rectangle(
width=214, height=480, color=(255, 255, 255)
), # white
self._create_rectangle(width=213, height=480, color=(0, 0, 255)), # red
]
)
# shrink and center it
M = numpy.float32([[0.5, 0, width / 4], [0, 0.5, height / 4]])
data_bgr = cv2.warpAffine(data_bgr, M, (width, height))
# compute animation
omega = 2 * math.pi / height
id_x = numpy.tile(numpy.array(range(width), dtype=numpy.float32), (height, 1))
id_y = numpy.tile(
numpy.array(range(height), dtype=numpy.float32), (width, 1)
).transpose()
self.frames = []
for k in range(30):
phase = 2 * k * math.pi / 30
map_x = id_x + 10 * numpy.cos(omega * id_x + phase)
map_y = id_y + 10 * numpy.sin(omega * id_x + phase)
img = cv2.imread('test'+str(k%3)+'.jpg')
self.frames.append(
VideoFrame.from_ndarray(numpy.array(img))
#VideoFrame.from_ndarray(
# cv2.remap(data_bgr, map_x, map_y, cv2.INTER_LINEAR), format="bgr24"
#)
)
async def recv(self):
pts, time_base = await self.next_timestamp()
frame = self.frames[self.counter % 30]
frame.pts = pts
frame.time_base = time_base
self.counter += 1
return frame
def _create_rectangle(self, width, height, color):
data_bgr = numpy.zeros((height, width, 3), numpy.uint8)
data_bgr[:, :] = color
return data_bgr
async def offer(request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
pc = RTCPeerConnection()
pcs.add(pc)
@pc.on("connectionstatechange")
async def on_connectionstatechange():
print("Connection state is %s" % pc.connectionState)
if pc.connectionState == "failed":
await pc.close()
pcs.discard(pc)
"""
# open media source
if true:
player = new FlagVideoStreamTrack()
else:
options = {"framerate": "30", "video_size": "640x480"}
if platform.system() == "Darwin":
player = MediaPlayer("default:none", format="avfoundation", options=options)
else:
player = MediaPlayer("/dev/video0", format="v4l2", options=options)
"""
await pc.setRemoteDescription(offer)
#for t in pc.getTransceivers():
pc.addTrack(FlagVideoStreamTrack())
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
),
)
pcs = set()
async def on_shutdown(app):
# close peer connections
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="WebRTC webcam demo")
parser.add_argument("--cert-file", help="SSL certificate file (for HTTPS)")
parser.add_argument("--key-file", help="SSL key file (for HTTPS)")
parser.add_argument("--play-from", help="Read the media from a file and sent it."),
parser.add_argument(
"--host", default="0.0.0.0", help="Host for HTTP server (default: 0.0.0.0)"
)
parser.add_argument(
"--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
)
parser.add_argument("--verbose", "-v", action="count")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if args.cert_file:
ssl_context = ssl.SSLContext()
ssl_context.load_cert_chain(args.cert_file, args.key_file)
else:
ssl_context = None
app = web.Application()
app.on_shutdown.append(on_shutdown)
app.router.add_get("/", index)
app.router.add_get("/client.js", javascript)
app.router.add_post("/offer", offer)
web.run_app(app, host=args.host, port=args.port, ssl_context=ssl_context)
```
#### File: visionia/webcam/Videoquimarche.py
```python
import argparse
import asyncio
import json
import logging
import os
import platform
import ssl
import math
import cv2
import numpy
from av import VideoFrame
from utils import ArducamUtils
import subprocess
import time
from aiohttp import web
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack
from aiortc.contrib.media import MediaPlayer
logging.basicConfig(level=logging.DEBUG)
ROOT = os.path.dirname(__file__)
Expo=40
Freeram=50
async def index(request):
content = open(os.path.join(ROOT, "index.html"), "r").read()
return web.Response(content_type="text/html", text=content)
async def javascript(request):
content = open(os.path.join(ROOT, "client.js"), "r").read()
return web.Response(content_type="application/javascript", text=content)
class CamVideoStreamTrack(VideoStreamTrack):
def __init__(self):
super().__init__() # don't forget this!
self.counter = 0
self.frames = []
async def recv(self):
ret, frame = cap.read()
frame = arducam_utils.convert(frame)
cv2.imwrite('cam.jpg',frame)
img = cv2.imread('cam.jpg')
if self.counter%(Freeram)==(0):
self.frames=[]
logging.debug('Liste vidée')
self.frames.append(VideoFrame.from_ndarray(numpy.array(img)))
logging.debug('Frame dans la liste')
pts, time_base = await self.next_timestamp()
frame = self.frames[self.counter%(Freeram)]
logging.debug('Frame lue')
logging.debug(self.counter)
frame.pts = pts
frame.time_base = time_base
self.counter += 1
return frame
async def offer(request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
pc = RTCPeerConnection()
pcs.add(pc)
@pc.on("connectionstatechange")
async def on_connectionstatechange():
print("Connection state is %s" % pc.connectionState)
if pc.connectionState == "failed":
await pc.close()
pcs.discard(pc)
await pc.setRemoteDescription(offer)
pc.addTrack(CamVideoStreamTrack())
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
),
)
pcs = set()
async def on_shutdown(app):
# close peer connections
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="WebRTC webcam demo")
parser.add_argument("--cert-file", help="SSL certificate file (for HTTPS)")
parser.add_argument("--key-file", help="SSL key file (for HTTPS)")
parser.add_argument("--play-from", help="Read the media from a file and sent it."),
parser.add_argument(
"--host", default="0.0.0.0", help="Host for HTTP server (default: 0.0.0.0)"
)
parser.add_argument(
"--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
)
parser.add_argument("--verbose", "-v", action="count")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if args.cert_file:
ssl_context = ssl.SSLContext()
ssl_context.load_cert_chain(args.cert_file, args.key_file)
else:
ssl_context = None
#Init camera
cmd1 = 'v4l2-ctl -d 0 -c exposure='+str(Expo)
cap = cv2.VideoCapture(0, cv2.CAP_V4L2)
arducam_utils = ArducamUtils(0)
cap.set(cv2.CAP_PROP_CONVERT_RGB, arducam_utils.convert2rgb)
# Aquisition des dimentions de l'image en provenance du capteur
w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
logging.debug('Caméra initialisée')
# needed to purge the frame with default exposure
for i in range(6):
subprocess.call(cmd1, shell=True)
ret, frame = cap.read()
logging.debug('Temps d\'exposition réglé')
app = web.Application()
app.on_shutdown.append(on_shutdown)
app.router.add_get("/", index)
app.router.add_get("/client.js", javascript)
app.router.add_post("/offer", offer)
web.run_app(app, host=args.host, port=args.port, ssl_context=ssl_context)
```
#### File: visionia/webcam/webcam.py
```python
import argparse
import asyncio
import json
import logging
import os
import platform
import ssl
from aiohttp import web
from aiortc import RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.media import MediaPlayer
ROOT = os.path.dirname(__file__)
logger2 = logging.getLogger("main")
logger2.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler() #interface
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') #formate (requetes ?) pour l'interface
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger2.addHandler(ch) # ajout des infos de ch au logger
async def index(request): # lance requetes return reponse.toString
content = open(os.path.join(ROOT, "index.html"), "r").read()
return web.Response(content_type="text/html", text=content)
async def javascript(request): #lancement du script
content = open(os.path.join(ROOT, "client.js"), "r").read()
return web.Response(content_type="application/javascript", text=content)
async def offer(request):
params = await request.json() # Attente d'une requete pour interpreter la suite
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"]) # formalise et standardise le traitement du json plus haut
pc = RTCPeerConnection() #cree toutes les options du RTC
pcs.add(pc) # Stocke toutes les differentes RTC ? a controler
@pc.on("connectionstatechange") #actualisation des etats de connection ?
async def on_connectionstatechange():
print("Connection state is %s" % pc.connectionState)
if pc.connectionState == "failed":
await pc.close()
pcs.discard(pc)
# open media source
if args.play_from:
player = MediaPlayer(args.play_from) #option pour lire des videos
else:
options = {"framerate": "1", "video_size": "1280x800"} # reglage des options par défault, si v4l2,par exemple, ne reecrit pas dessus
print(platform.system())
logger2.debug(f'La plateforme est : {platform.system()}')
if platform.system() == "Darwin":
player = MediaPlayer("default:none", format="avfoundation", options=options)
else:
player = MediaPlayer("/dev/video0", format="v4l2", options=options)
await pc.setRemoteDescription(offer) #communication
for t in pc.getTransceivers():
if t.kind == "audio" and player.audio:
pc.addTrack(player.audio)
elif t.kind == "video" and player.video:
pc.addTrack(player.video)
answer = await pc.createAnswer()#Acteur de la negociation RTC
await pc.setLocalDescription(answer)
return web.Response(#le json de retour
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
),
)
pcs = set()
async def on_shutdown(app):
# close peer connections
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Aucune idee de l'utilité de cette string, je recroiserais peut être ce message un jour")
parser.add_argument("--cert-file", help="SSL certificate file (for HTTPS)")
parser.add_argument("--key-file", help="SSL key file (for HTTPS)")
parser.add_argument("--play-from", help="Read the media from a file and sent it."),
parser.add_argument(
"--host", default="0.0.0.0", help="Host for HTTP server (default: 0.0.0.0)"
)
parser.add_argument(
"--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
)
parser.add_argument("--verbose", "-v", action="count")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)#l'option verbose
if args.cert_file:
ssl_context = ssl.SSLContext()
ssl_context.load_cert_chain(args.cert_file, args.key_file)#l'option cert_file, aucune idee de son utilite
else:
ssl_context = None
app = web.Application()
app.on_shutdown.append(on_shutdown)
app.router.add_get("/", index)
app.router.add_get("/client.js", javascript)#demande un client au javascript
app.router.add_post("/offer", offer)#lance l'echange
web.run_app(app, host=args.host, port=args.port, ssl_context=ssl_context) #lance l'app
``` |
{
"source": "jouvin/indico-mlz-export",
"score": 2
} |
#### File: indico-mlz-export/indico_mlz_export/controller.py
```python
from __future__ import unicode_literals
from flask import jsonify, request
from werkzeug.exceptions import Forbidden
from indico.modules.events.models.events import Event
from indico.modules.oauth import oauth
from indico.web.rh import RH
from indico_mlz_export.api import all_registrations, one_registration
class RHMLZExportBase(RH):
"""RESTful registrant API base class"""
CSRF_ENABLED = False
FLAT = False
@oauth.require_oauth('registrants')
def _check_access(self):
try:
ok = self.event.can_manage(request.oauth.user, permission='registration')
except TypeError:
ok = self.event.can_manage(request.oauth.user)
if not ok:
raise Forbidden()
class RHExportRegistrations(RHMLZExportBase):
""" Export a list of registrations for an event"""
def _process_args(self):
self.event_id = request.view_args['event_id']
self.event = Event.get(self.event_id, is_deleted=False)
def _process_GET(self):
return jsonify(all_registrations(self.event, self.FLAT))
class RHExportRegistrationsFlat(RHExportRegistrations):
FLAT = True
class RHExportRegistration(RHMLZExportBase):
""" Export a single registration for an event"""
def _process_args(self):
self.event_id = request.view_args['event_id']
self.registrant_id = request.view_args['registrant_id']
self.event = Event.get(self.event_id, is_deleted=False)
def _process_GET(self):
return jsonify(one_registration(self.event, self.registrant_id, self.FLAT))
class RHExportRegistrationFlat(RHExportRegistration):
FLAT = True
``` |
{
"source": "JouvinLea/pyirf",
"score": 3
} |
#### File: pyirf/perf/utils.py
```python
import numpy as np
import pickle
import gzip
def percentiles(values, bin_values, bin_edges, percentile):
# Seems complicated for vector defined as [inf, inf, .., inf]
percentiles_binned = np.squeeze(np.full((len(bin_edges) - 1, len(values.shape)), np.inf))
err_percentiles_binned = np.squeeze(np.full((len(bin_edges) - 1, len(values.shape)), np.inf))
for i, (bin_l, bin_h) in enumerate(zip(bin_edges[:-1], bin_edges[1:])):
try:
print(i)
print(bin_l)
print(bin_h)
distribution = values[(bin_values > bin_l) & (bin_values < bin_h)]
percentiles_binned[i] = np.percentile(distribution, percentile)
print(percentiles_binned[i])
err_percentiles_binned[i] = percentiles_binned[i] / np.sqrt(len(distribution))
except IndexError:
pass
return percentiles_binned.T, err_percentiles_binned.T
def plot_hist(ax, data, edges, norm=False, yerr=False, hist_kwargs={}, error_kw={}):
"""Utility function to plot histogram"""
weights = np.ones_like(data)
if norm is True:
weights = weights / float(np.sum(data))
if yerr is True:
yerr = np.sqrt(data) * weights
else:
yerr = np.zeros(len(data))
centers = 0.5 * (edges[1:] + edges[:-1])
width = edges[1:] - edges[:-1]
ax.bar(centers, data * weights, width=width, yerr=yerr, error_kw=error_kw, **hist_kwargs)
return ax
def save_obj(obj, name):
"""Save object in binary"""
with gzip.open(name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
"""Load object in binary"""
with gzip.open(name, 'rb') as f:
return pickle.load(f)
```
#### File: pyirf/scripts/make_performance.py
```python
import os
import astropy.units as u
import argparse
import pandas as pd
import numpy as np
from gammapy.spectrum import cosmic_ray_flux, CrabSpectrum
from pyirf.io.io import load_config
from pyirf.perf import (CutsOptimisation,
CutsDiagnostic,
CutsApplicator,
IrfMaker,
SensitivityMaker,
)
def main():
# Read arguments
parser = argparse.ArgumentParser(description='Make performance files')
parser.add_argument('--config_file', type=str, required=True, help='')
parser.add_argument(
'--obs_time',
type=str,
required=True,
help='Observation time, should be given as a string, value and astropy unit separated by a dot. E.g:`50.h`'
)
mode_group = parser.add_mutually_exclusive_group()
mode_group.add_argument('--wave', dest="mode", action='store_const',
const="wave", default="tail",
help="if set, use wavelet cleaning")
mode_group.add_argument('--tail', dest="mode", action='store_const',
const="tail",
help="if set, use tail cleaning, otherwise wavelets")
args = parser.parse_args()
# Read configuration file
cfg = load_config(args.config_file)
# Add obs. time in configuration file
str_obs_time = args.obs_time.split('.')
cfg['analysis']['obs_time'] = {'value': float(str_obs_time[0]), 'unit': str(str_obs_time[-1])}
print(cfg['analysis']['obs_time'])
# Create output directory if necessary
outdir = os.path.join(cfg['general']['outdir'], 'irf_{}_ThSq_{}_Time{:.2f}{}'.format(
args.mode,
cfg['analysis']['thsq_opt']['type'],
cfg['analysis']['obs_time']['value'],
cfg['analysis']['obs_time']['unit'])
)
if not os.path.exists(outdir):
os.makedirs(outdir)
indir = cfg['general']['indir']
template_input_file = cfg['general']['template_input_file']
# Load data
particles = ['gamma', 'electron', 'proton']
evt_dict = dict() # Contain DL2 file for each type of particle
for particle in particles:
# template looks like dl2_{}_{}_merged.h5
infile = os.path.join(indir, template_input_file.format(args.mode, particle))
evt_dict[particle] = pd.read_hdf(infile, key='reco_events')
# Apply offset cut to proton and electron
for particle in ['electron', 'proton']:
# print('Initial stat: {} {}'.format(len(evt_dict[particle]), particle))
evt_dict[particle] = evt_dict[particle].query('offset <= {}'.format(
cfg['particle_information'][particle]['offset_cut'])
)
# Add required data in configuration file for future computation
for particle in particles:
n_files = cfg['particle_information'][particle]['n_files']
print(f"{n_files} files for {particle}")
cfg['particle_information'][particle]['n_files'] = \
len(np.unique(evt_dict[particle]['obs_id']))
cfg['particle_information'][particle]['n_simulated'] = \
cfg['particle_information'][particle]['n_files'] * cfg['particle_information'][particle]['n_events_per_file']
# Define model for the particles
model_dict = {'gamma': CrabSpectrum('hegra').model,
'proton': cosmic_ray_flux,
'electron': cosmic_ray_flux}
# Reco energy binning
cfg_binning = cfg['analysis']['ereco_binning']
ereco = np.logspace(np.log10(cfg_binning['emin']),
np.log10(cfg_binning['emax']),
cfg_binning['nbin'] + 1) * u.TeV
# Handle theta square cut optimisation
# (compute 68 % containment radius PSF if necessary)
thsq_opt_type = cfg['analysis']['thsq_opt']['type']
if thsq_opt_type in 'fixed':
thsq_values = np.array([cfg['analysis']['thsq_opt']['value']]) * u.deg
print('Using fixed theta cut: {}'.format(thsq_values))
elif thsq_opt_type in 'opti':
thsq_values = np.arange(0.05, 0.40, 0.01) * u.deg
print('Optimising theta cut for: {}'.format(thsq_values))
elif thsq_opt_type in 'r68':
print('Using R68% theta cut')
print('Computing...')
cfg_binning = cfg['analysis']['ereco_binning']
ereco = np.logspace(np.log10(cfg_binning['emin']),
np.log10(cfg_binning['emax']),
cfg_binning['nbin'] + 1) * u.TeV
radius = 68
thsq_values = list()
for ibin in range(len(ereco) - 1):
emin = ereco[ibin]
emax = ereco[ibin + 1]
energy_query = 'reco_energy > {} and reco_energy <= {}'.format(
emin.value, emax.value
)
data = evt_dict['gamma'].query(energy_query).copy()
min_stat = 0
if len(data) <= min_stat:
print(' ==> Not enough statistics:')
print('To be handled...')
thsq_values.append(0.3)
continue
# import sys
# sys.exit()
psf = np.percentile(data['offset'], radius)
psf_err = psf / np.sqrt(len(data))
thsq_values.append(psf)
thsq_values = np.array(thsq_values) * u.deg
# Set 0.05 as a lower value
idx = np.where(thsq_values.value < 0.05)
thsq_values[idx] = 0.05 * u.deg
print('Using theta cut: {}'.format(thsq_values))
# Cuts optimisation
print('### Finding best cuts...')
cut_optimiser = CutsOptimisation(
config=cfg,
evt_dict=evt_dict,
verbose_level=0
)
# Weight events
print('- Weighting events...')
cut_optimiser.weight_events(
model_dict=model_dict,
colname_mc_energy=cfg['column_definition']['mc_energy']
)
# Find best cutoff to reach best sensitivity
print('- Estimating cutoffs...')
cut_optimiser.find_best_cutoff(energy_values=ereco, angular_values=thsq_values)
# Save results and auxiliary data for diagnostic
print('- Saving results to disk...')
cut_optimiser.write_results(
outdir, '{}.fits'.format(cfg['general']['output_table_name']),
format='fits'
)
# Cuts diagnostic
print('### Building cut diagnostics...')
cut_diagnostic = CutsDiagnostic(config=cfg, indir=outdir)
cut_diagnostic.plot_optimisation_summary()
cut_diagnostic.plot_diagnostics()
# Apply cuts and save data
print('### Applying cuts to data...')
cut_applicator = CutsApplicator(config=cfg, evt_dict=evt_dict, outdir=outdir)
cut_applicator.apply_cuts()
# Irf Maker
print('### Building IRF...')
irf_maker = IrfMaker(config=cfg, evt_dict=evt_dict, outdir=outdir)
irf_maker.build_irf()
# Sensitivity maker
print('### Estimating sensitivity...')
sensitivity_maker = SensitivityMaker(config=cfg, outdir=outdir)
sensitivity_maker.load_irf()
sensitivity_maker.estimate_sensitivity()
if __name__ == '__main__':
main()
``` |
{
"source": "jouvin/release",
"score": 3
} |
#### File: documentation_builder/test/builder.py
```python
import sys
import os
import shutil
import filecmp
from tempfile import mkdtemp
from unittest import TestCase, main, TestLoader
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib'))) # noqa
from quattordocbuild import builder
class BuilderTest(TestCase):
"""Test class for builder.py."""
def setUp(self):
"""Set up temp dir for tests."""
self.tmpdir = mkdtemp()
def tearDown(self):
"""Remove temp dir."""
shutil.rmtree(self.tmpdir)
def test_which(self):
"""Test which function."""
self.assertFalse(builder.which('testtest1234'))
self.assertTrue(builder.which('python'))
def test_check_input(self):
"""Test check_input function."""
self.assertFalse(builder.check_input("", ""))
self.assertFalse(builder.check_input(self.tmpdir, ""))
self.assertFalse(builder.check_input("/nonexistingdir", ""))
self.assertTrue(builder.check_input(self.tmpdir, self.tmpdir))
os.makedirs(os.path.join(self.tmpdir, "test"))
self.assertTrue(builder.check_input(self.tmpdir, os.path.join(self.tmpdir, "test")))
def test_check_commands(self):
"""Test check_commands function."""
self.assertTrue(builder.check_commands(True))
def test_build_site_structure(self):
"""Test build_site_structure function."""
repomap = {
"configuration-modules-core": {
"sitesection": "components",
"targets": ["/NCM/Component/", "/components/", "/pan/quattor/"]
},
"CCM": {
"sitesection": "CCM",
"targets": ["EDG/WP4/CCM/"],
},
}
testdata = {'CCM': {'/tmp/qdoc/src/CCM/target/doc/pod/EDG/WP4/CCM/Fetch/Download.pod':
"# NAME\n\nEDG::WP4::CC"},
'configuration-modules-core':
{'/tmp/doc/src/configuration-modules-core/ncm-profile/target/pan/components/profile/functions.pan':
u'\n### Functions\n',
'/tmp/doc/src/configuration-modules-core/ncm-fmonagent/target/doc/pod/NCM/Component/fmonagent.pod':
'Hello',
'/tmp/doc/src/configuration-modules-core/ncm-freeipa/target/pan/quattor/aii/freeipa/schema.pan':
'Hello2'
}}
expected_response = {'CCM': {'Fetch::Download.md': '# NAME\n\nEDG::WP4::CC'},
'components': {'aii::freeipa::schema.md': 'Hello2',
'fmonagent.md': 'Hello',
'profile::functions.md': u'\n### Functions\n'}}
self.assertEquals(builder.build_site_structure(testdata, repomap), expected_response)
def test_make_interlinks(self):
"""Test make_interlinks function."""
# Replace one reference
test_data = {'components-grid': {'fmonagent.md': ''},
'components': {'icinga.md': 'I refer to `fmonagent`.'}}
expected = {'components-grid': {'fmonagent.md': ''},
'components': {'icinga.md': 'I refer to [fmonagent](../components-grid/fmonagent.md).'}}
self.assertEquals(builder.make_interlinks(test_data), expected)
# Replace two references
test_data = {'comps-gr': {'fmnt.md': ''},
'comps': {'icinga.md': 'refr `fmnt` and `fmnt`.'}}
expected = {'comps-gr': {'fmnt.md': ''},
'comps': {'icinga.md': 'refr [fmnt](../comps-gr/fmnt.md) and [fmnt](../comps-gr/fmnt.md).'}}
self.assertEquals(builder.make_interlinks(test_data), expected)
# Replace ncm- reference
test_data = {'components-grid': {'fmonagent.md': ''},
'components': {'icinga.md': 'I refer to `ncm-fmonagent`.'}}
expected = {'components-grid': {'fmonagent.md': ''},
'components': {'icinga.md': 'I refer to [fmonagent](../components-grid/fmonagent.md).'}}
self.assertEquals(builder.make_interlinks(test_data), expected)
# Replace newline reference
test_data = {'components-grid': {'fmonagent.md': ''},
'components': {'icinga.md': 'I refer to \n`ncm-fmonagent`.'}}
expected = {'components-grid': {'fmonagent.md': ''},
'components': {'icinga.md': 'I refer to \n[fmonagent](../components-grid/fmonagent.md).'}}
self.assertEquals(builder.make_interlinks(test_data), expected)
# Replace linked wrong reference
test_data = {'components-grid': {'fmonagent.md': ''},
'components': {'icinga.md': 'I refer \
to [NCM::Component::FreeIPA::Client](https://metacpan.org/pod/NCM::Component::FreeIPA::Client).',
'FreeIPA::Client': 'Allo'}}
expected = {'components-grid': {'fmonagent.md': ''},
'components': {'icinga.md': 'I refer to [FreeIPA::Client](../components/FreeIPA::Client).',
'FreeIPA::Client': 'Allo'}}
self.assertEquals(builder.make_interlinks(test_data), expected)
# Don't replace in own page
test_data = {'comps-grid': {'fmonagent.md': 'ref to `fmonagent`.'},
'comps': {'icinga.md': 'ref to `icinga` and `ncm-icinga`.'}}
self.assertEquals(builder.make_interlinks(test_data), test_data)
def test_write_site(self):
"""Test write_site function."""
input = {'CCM': {'fetch::download.md': '# NAME\n\nEDG::WP4::CC'},
'components': {'fmonagent.md': 'Hello',
'profile::functions.md': u'\n### Functions\n'}}
sitedir = os.path.join(self.tmpdir, "docs")
builder.write_site(input, self.tmpdir, "docs")
self.assertTrue(os.path.exists(os.path.join(sitedir, 'components')))
self.assertTrue(os.path.exists(os.path.join(sitedir, 'components/profile::functions.md')))
self.assertTrue(os.path.exists(os.path.join(sitedir, 'components/fmonagent.md')))
self.assertTrue(os.path.exists(os.path.join(sitedir, 'CCM')))
self.assertTrue(os.path.exists(os.path.join(sitedir, 'CCM/fetch::download.md')))
def test_write_toc(self):
"""Test write_toc function."""
toc = {'CCM': set(['fetch::download.md']), 'components': set(['fmonagent.md', 'profile::functions.md'])}
builder.write_toc(toc, self.tmpdir)
with open(os.path.join(self.tmpdir, "mkdocs.yml")) as fih:
print fih.read()
self.assertTrue(filecmp.cmp('test/testdata/mkdocs.yml', os.path.join(self.tmpdir, "mkdocs.yml")))
toc = {'components': set(['profile::functions.md', 'fmonagent.md']), 'CCM': set(['fetch::download.md'])}
builder.write_toc(toc, self.tmpdir)
with open(os.path.join(self.tmpdir, "mkdocs.yml")) as fih:
print fih.read()
self.assertTrue(filecmp.cmp('test/testdata/mkdocs.yml', os.path.join(self.tmpdir, "mkdocs.yml")))
def suite(self):
"""Return all the testcases in this module."""
return TestLoader().loadTestsFromTestCase(BuilderTest)
if __name__ == '__main__':
main()
```
#### File: scripts/plenary_template_library/plenary_template_library.py
```python
import logging
from urllib import urlopen
from json import load
from datetime import datetime, timedelta
from os.path import exists, isdir, join, abspath
from os import chdir, makedirs, sep
from shutil import rmtree
from tempfile import mkdtemp
from sys import exit as sys_exit
from argparse import ArgumentParser
import subprocess
import errno
RELEASES_URL = 'http://www.quattor.org/release/releases.json'
LIBRARY_URL_PATTERN = 'https://github.com/quattor/template-library-%s.git'
LIBRARY_BRANCHES = {
'core' : ['master'],
'grid' : ['emi-2', 'umd-3'],
'os' : ['sl5.x-x86_64', 'sl6.x-x86_64', 'el7.x-x86_64'],
'standard' : ['master'],
#'openstack' : ['icehouse'],
}
BIN_GIT = '/usr/bin/git'
BIN_RSYNC = '/usr/bin/rsync'
def execute(command):
"""Wrapper around subprocess, calls an external process, logging stdout and stderr to debug"""
logger = logging.getLogger('sync-template-library')
if command:
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
output, error = process.communicate()
logger.debug('Executed %s, stdout: "%s", stderr: "%s"', command[0], output.strip(), error.strip())
if process.returncode > 0:
logger.error('Executed %s with errors: "%s"', command[0], error.strip())
return False
return True
else:
logger.error('Called execute without specifing a command to run')
return False
def make_target_dir(directory):
"""Create specified directory. Stay silent if it already exists, but complain loudly if it isn't a directory"""
logger = logging.getLogger('sync-template-library')
try:
makedirs(directory)
logger.debug('Created directory %s', directory)
return True
except OSError, e:
if e.errno != errno.EEXIST:
raise
if exists(directory):
if isdir(directory):
logger.debug('Directory %s already exists', directory)
else:
logger.error('Tried to create directory %s, but it already exists and is not a directory', directory)
return False
def get_release_dates():
#logger = logging.getLogger('sync-template-library')
releases = load(urlopen(RELEASES_URL))
results = []
for release, properties in releases.iteritems():
release = '%s.0' % release
release_date = datetime.strptime(properties['target'], '%Y-%m-%d')
results.append((release, release_date))
return results
def get_current_releases():
logger = logging.getLogger('sync-template-library')
results = []
now = datetime.now()
threshold = now - timedelta(days=365)
for release, release_date in get_release_dates():
if release_date < now:
if release_date > threshold:
results.append(release)
else:
logger.debug('Skipping %s, release was too far in the past', release)
else:
logger.debug('Skipping %s, release is in the future', release)
return results
def sync_template_library(base_dir, releases):
logger = logging.getLogger('sync-template-library')
logger.debug('Using %s as base directory', base_dir)
for library, branches in LIBRARY_BRANCHES.iteritems():
logger.info('Processing library %s', library)
url = LIBRARY_URL_PATTERN % (library)
temp_dir = mkdtemp(prefix='quattor-template-library_')
logger.info('Cloning %s to %s', url, temp_dir)
execute([BIN_GIT, 'clone', url, temp_dir])
chdir(temp_dir)
logger.info('Done')
for release in releases:
logger.info('Release %s available', release)
for branch in branches:
logger.info(' Processing branch %s', branch)
tag = release
target_dir = join(base_dir, release, library)
logger.debug('Target dir is %s', target_dir)
if branch != 'master':
tag = '%s-%s' % (branch, tag)
target_dir = join(target_dir, branch)
logger.debug('Added branch to target dir, which is now %s', target_dir)
make_target_dir(target_dir)
logger.info(' Checking out tag %s', tag)
if execute([BIN_GIT, 'checkout', tag]):
source = temp_dir + sep
target = target_dir + sep
logger.info(' rsyncing tag from %s to %s', source, target)
execute([BIN_RSYNC, '-rtx', '--exclude=\.git', source, target])
chdir(base_dir)
try:
rmtree(temp_dir)
logger.debug('Removed temporary directory %s', temp_dir)
except OSError, e:
logger.error('Caught exception %s trying to remove temporary directory %s', temp_dir, e.strerror)
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s: %(message)s',
name='sync-template-library'
)
parser = ArgumentParser(description='Synchronise quattor template libraries')
parser.add_argument('--debug', action='store_true', help='Enable debug output')
parser.add_argument('--releases', help='Sync specific release(s), delimit multiple releases with commas')
parser.add_argument(
'path',
metavar='PATH',
type=str,
help='Target base path for template library',
)
args = parser.parse_args()
logger = logging.getLogger('sync-template-library')
if args.debug:
logger.setLevel(logging.DEBUG)
if args.releases:
releases = args.releases.split(',')
else:
releases = get_current_releases()
sync_template_library(abspath(args.path), releases)
sys_exit(0)
``` |
{
"source": "jouvin/SLAM",
"score": 2
} |
#### File: slam/webinterface/middleware.py
```python
import re
import itertools
from slam import interface
_HTML_TYPES = ('text/html', 'application/xhtml+xml')
_SUPPORTED_TRANSFORMS = ['PUT', 'DELETE']
_FORM_RE = re.compile(
r'((<form\W[^>]*\bmethod=(\'|"|))(%s)((\'|"|)\b[^>]*>))'
% '|'.join(_SUPPORTED_TRANSFORMS), re.IGNORECASE)
_MIDDLEWARE_KEY = 'method_middleware_transform'
class HttpMethodsMiddleware(object):
"""This middleware class allow web browser to perform PUT and DELETE
requests by adding a hidden field to the forms.
This middleware is *not* compatible with the Django's CSRF protection."""
# Snippet from http://djangosnippets.org/snippets/174/
def process_request(self, request):
if request.POST and request.POST.has_key(_MIDDLEWARE_KEY):
if request.POST[_MIDDLEWARE_KEY].upper() in _SUPPORTED_TRANSFORMS:
request.method = request.POST[_MIDDLEWARE_KEY]
return None
def process_response(self, request, response):
if response['Content-Type'].split(';')[0] in _HTML_TYPES:
# ensure we don't add the 'id' attribute twice (HTML validity)
idattributes = itertools.chain(("id='" + _MIDDLEWARE_KEY + "'",),
itertools.repeat(''))
def add_transform_field(match):
"""Returns the matched <form> tag with a modified method and
the added <input> element"""
return match.group(2) + "POST" + match.group(5) + \
"<div style='display:none;'>" + \
"<input type='hidden' " + idattributes.next() + \
" name='" + _MIDDLEWARE_KEY + "' value='" + \
match.group(4).upper() + "' /></div>"
# Modify any POST forms
response.content = _FORM_RE.sub(
add_transform_field, response.content)
return response
class LoginRecordMiddleware:
"""Record the login used inside slam.interface to correctly report it
inside the logs."""
def process_request(self, request):
if request.user and request.user.username:
interface.set_log_author(request.user.username)
else:
interface.set_log_author("rootEnDur")
return None
```
#### File: SLAM/test/__init__.py
```python
import os
import sys
sys.path.append("test/")
sys.path.append("src/")
os.environ["DJANGO_SETTINGS_MODULE"] = "webinterface.settings"
from django.core.management import call_command
from django.conf import settings
def setup():
call_command("syncdb", interactive=False)
def teardown():
os.unlink(settings.DATABASES["default"]["NAME"])
```
#### File: SLAM/test/test_range.py
```python
import sys
sys.path.append("../src")
from nose.tools import assert_raises
from slam import addrrange
from slam.addrrange import InvalidAddressError
def test_format_ip4():
assert addrrange._format_ip4(42) == "0.0.0.42"
assert addrrange._format_ip4(16974599) == "1.3.3.7"
assert addrrange._format_ip4(3232300823) == "192.168.255.23"
def test_parse_ip4():
assert addrrange._parse_ip4("0.0.0.42") == 42
assert addrrange._parse_ip4("1.3.3.7") == 16974599
assert addrrange._parse_ip4("192.168.255.23") == 3232300823
assert_raises(InvalidAddressError, addrrange._parse_ip4, "invalid")
assert_raises(InvalidAddressError, addrrange._parse_ip4, "192.168.256.0")
def test_ip4range():
ipr = addrrange.Ip4Range("172.16.50.80/12")
assert addrrange._format_ip4(ipr.net) == "172.16.0.0" and ipr.mask == 12
ipr = addrrange.Ip4Range("10.42.137.23/28")
assert addrrange._format_ip4(ipr.net) == "10.42.137.16" and ipr.mask == 28
assert_raises(InvalidAddressError, addrrange.Ip4Range, "1.2.3.4")
assert_raises(InvalidAddressError, addrrange.Ip4Range, "1.2.3.4/42")
assert_raises(InvalidAddressError, addrrange.Ip4Range, "1.23.4/23")
assert ipr[15] == "10.42.137.31"
assert_raises(IndexError, ipr.__getitem__, 16)
def test_ip4range_contains():
ipr = addrrange.Ip4Range("10.42.137.23/28")
assert "10.42.137.15" not in ipr
assert "10.42.137.16" in ipr
assert "10.42.137.23" in ipr
assert "10.42.137.31" in ipr
assert "10.42.137.32" not in ipr
def test_ip4range_str():
ipr = addrrange.Ip4Range("10.42.137.23/28")
assert str(ipr) == "10.42.137.16/28"
ipr = addrrange.Ip4Range("172.16.50.80/12")
assert str(ipr) == "172.16.0.0/12"
def test_format_ip6():
assert(addrrange._format_ip6(42) ==
"fc00:e968:6179::de52:7100")
assert(addrrange._format_ip6(0x1337 << 16) ==
"fc00:db20:35b:7399::5")
assert(addrrange._format_ip6(24197857203266734884469844682461802258) ==
"fc00:e968:6179::de52:7100")
def test_parse_ip6():
assert(addrrange._parse_ip6("fc00:e968:6179::de52:7100")
== 42)
assert(addrrange._parse_ip6("fc00:db20:35b:7399::5")
== 0x13370000)
assert(addrrange._parse_ip6("fc00:e968:6179::de52:7100")
== 24197857203266734884469844682461802258)
assert_raises(InvalidAddressError, addrrange._parse_ip6, "invalid")
assert_raises(InvalidAddressError, addrrange._parse_ip6,
"0000:0000:0000:00g0:0000:0000:0000:0000")
# short form
assert addrrange._parse_ip6("::") == 0
assert addrrange._parse_ip6("::2a") == 42
assert addrrange._parse_ip6("2a::") == 42 << 112
assert(addrrange._parse_ip6("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:abcd") ==
(42 << 112) + (0x1234 << 16) + 0xabcd)
assert(addrrange._parse_ip6("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:abcd") ==
(42 << 112) + (0x1234 << 96) + (0x5678 << 16) + 0xabcd)
def test_ip6range():
ipr = addrrange.Ip6Range("fc42:0f00:0ba2:cafe:1234:1234:1234:1234/64")
assert(addrrange._format_ip6(ipr.net) ==
"fc42:0f00:0ba2:cafe:0000:0000:0000:0000")
assert ipr.mask == 64
ipr = addrrange.Ip6Range("fc42:0f00:0ba2:cafe:1234:1234:efcd:1234/110")
assert(addrrange._format_ip6(ipr.net) ==
"fc42:0f00:0ba2:cafe:1234:1234:efcc:0000")
assert ipr.mask == 110
assert len(ipr) == 262144
assert_raises(InvalidAddressError, addrrange.Ip6Range, "1.2.3.4")
assert_raises(InvalidAddressError, addrrange.Ip6Range, "::1/154")
assert_raises(InvalidAddressError, addrrange.Ip6Range, "42")
assert ipr[262143] == "fc42:0f00:0ba2:cafe:1234:1234:efcf:ffff"
assert_raises(IndexError, ipr.__getitem__, 262144)
def test_ip6range_contains():
ipr = addrrange.Ip6Range("fc42:0f00:0ba2:cafe:1234:1234:efcd:1234/110")
assert "fc42:0f00:0ba2:cafe:1234:1234:efcb:ffff" not in ipr
assert "fc42:0f00:0ba2:cafe:1234:1234:efcc:0000" in ipr
assert "fc42:0f00:0ba2:cafe:1234:1234:efce:1234" in ipr
assert "fc42:0f00:0ba2:cafe:1234:1234:efcf:ffff" in ipr
assert "fc42:0f00:0ba2:cafe:1234:1234:efd0:0000" not in ipr
def test_ip6range_str():
ipr = addrrange.Ip6Range("fc42:0f00:0ba2:cafe:1234:1234:efcd:1234/110")
assert str(ipr) == "fc42:0f00:0ba2:cafe:1234:1234:efcc:0000/110"
ipr = addrrange.Ip6Range("feaf:1234:0000:0000:0000:0000:0000:1234/10")
assert str(ipr) == "fe80:0000:0000:0000:0000:0000:0000:0000/10"
def test_addrset():
addrs = addrrange.AddrSet()
addrs.add("1.2.3.4")
addrs.add("127.0.0.1")
assert "127.0.0.1" in addrs and "1.2.3.4" in addrs
addr = addrs.addr_set.pop()
assert addr == "127.0.0.1" or addr == "1.2.3.4"
addrs.add("iamanaddress")
assert addrs.len() == 2
addrs.remove("iamanaddress")
assert addrs.len() == 1
assert_raises(IndexError, addrs.__getitem__, 2)
``` |
{
"source": "JouziP/DOS",
"score": 3
} |
#### File: DOS/BasicFunctions/cleanNeighbors.py
```python
import numpy as np
import pandas as pd
#
def sortNeighbors(neighbs):
neighbs_df = pd.DataFrame(neighbs)
neighbs_df = neighbs_df.sort_values(1, ascending=False)
neighbs[:,:]= neighbs_df.values[:,:]
return neighbs
def cleanNeighbors(neighbs):
neighbs=sortNeighbors(np.array(neighbs) )
length=neighbs.shape[0]
val1=round(neighbs[0, 1], 3)
j=0
#
all_neighbs=[]
while j<length:
val=round(neighbs[j, 1], 3)
# print j, length
nn=[]
q=0
while val1==val:
# print val1, val
# print q
nn.append(neighbs[j+q, :])
q+=1
if (j+q)<length:
val1 = round(neighbs[j+q, 1],3)
else:
break
all_neighbs.append(nn)
j=j+q
return all_neighbs
```
#### File: Direct/Functions/latticeConstructor.py
```python
import numpy as np
#from numpy import linalg as lg
################
from cleanNeighbors import cleanNeighbors
def getFirstneighbors(neighb_table):
pass
def getCartesianFromCoords(coords, **kwgs):
a1_x=kwgs['a1_x']
a1_y=kwgs['a1_y']
#
a2_x=kwgs['a2_x']
a2_y=kwgs['a2_y']
#
q1=coords[0]
q2=coords[1]
x = a1_x*q1 + a2_x*q2
y = a1_y*q1 + a2_y*q2
return x,y
def getIndexFromCoords(coords, **kwgs):
N1 = kwgs['N1']
q1 = coords[0]
q2 = coords[1]
idx = q2*N1 + q1
return idx
def getCoordsFromIndex(idx, **kwgs):
N1 = kwgs['N1']
q2=idx/N1
q1=idx - q2*N1
return q1, q2
def getDistance(deltaX, deltaY):
distance = ( (deltaX)**2 + (deltaY)**2)**(0.5)
return distance
def constructLattice(**kwgs):
N1 = kwgs['N1']
N2 = kwgs['N2']
num_sites = N1* N2
neighbors_array = []
for n in range(num_sites):
neighbors_array.append(getNeighborTableGeneralized(n, **kwgs))
return neighbors_array
def getNeighborTableGeneralized(idx, **kwgs):
###
power = kwgs['power']
a1_x=kwgs['a1_x']
a1_y=kwgs['a1_y']
#
a2_x=kwgs['a2_x']
a2_y=kwgs['a2_y']
#
###
neighbs=[]
q1, q2 = getCoordsFromIndex(idx, **kwgs)
#
N1 = kwgs['N1']
#
N2 = kwgs['N2']
#
N = N1 * N2
for i in range(0, N, 1):
q1_n, q2_n = getCoordsFromIndex(i, **kwgs)
### direct
if q1==q1_n and q2==q2_n:
pass
else:
x_n, y_n = getCartesianFromCoords(np.array([q1_n, q2_n]),
**kwgs)
# cartesian coords
x0,y0 = getCartesianFromCoords(np.array([q1,q2]), **kwgs)
distance = getDistance( (x_n-(x0)) , (y_n-y0) )
### periodic 1
# cartesian coords
x=x0+N2*a1_x
y=y0+N2*a1_y
distance10 = getDistance( (x_n-x), (y_n-y) )
#
x=x0-N2*a1_x
y=y0-N2*a1_y
distance11 = getDistance( (x_n-x), (y_n-y) )
distance1=np.min([distance10, distance11])
### peridic in 2
x=x0+N1*a2_x
y=y0+N1*a2_y
distance20 = getDistance( (x_n-x), (y_n-y) )
x=x0-N1*a2_x
y=y0-N1*a2_y
distance21 = getDistance( (x_n-x), (y_n-y) )
distance2=np.min([distance20, distance21])
#### peridic in 1 and 2
# cartesian coords
x=x0+N1*a2_x+N2*a1_x
y=y0+N1*a2_y+N2*a1_y
distance300 = getDistance( (x_n-x), (y_n-y) )
x=x0+N1*a2_x-N2*a1_x
y=y0+N1*a2_y-N2*a1_y
distance301 = getDistance( (x_n-x), (y_n-y) )
x=x0-N1*a2_x+N2*a1_x
y=y0-N1*a2_y+N2*a1_y
distance310 = getDistance( (x_n-x), (y_n-y) )
x=x0-N1*a2_x-N2*a1_x
y=y0-N1*a2_y-N2*a1_y
distance311 = getDistance( (x_n-x), (y_n-y) )
distance3=np.min([distance300,
distance310,
distance301,
distance311])
distance = np.min([distance,
distance1,
distance2,
distance3,
])
#
strength = 1./distance**(power)
#
neighbs.append([i, strength])
if kwgs['first_neighb']==True:
all_neighbors = cleanNeighbors(neighbs)
neighbs=all_neighbors[0]
###
return np.array(neighbs)
```
#### File: DOS/FunctionsLayer1/getNumberMatrix.py
```python
import numpy as np
from BasicFunctions.generateNumberArray import generateNumberArray
def isRepeatedSample(numberArray, samples_list):
#
for recordedNumberArray in samples_list:
diff = [recordedNumberArray[i] - numberArray[i]\
for i in range(len(numberArray))]
if diff==[0 for i in range(len(numberArray))]:
return 0
return 1
def generateNumberMatrix(**args):
N_spins = args['N_spins']
max_n_spins_in_basket = args['max_n_spins_in_basket']
N_samples = args['N_samples']
sampling_method=args['sampling_method']
###########################
num_baskets = (N_spins/max_n_spins_in_basket+1,
N_spins/max_n_spins_in_basket)\
[N_spins-(N_spins/max_n_spins_in_basket)*\
max_n_spins_in_basket==0]
if num_baskets!=(N_spins/max_n_spins_in_basket):
num_spins_in_the_last_basket =\
N_spins - (N_spins/max_n_spins_in_basket)*max_n_spins_in_basket
else:
num_spins_in_the_last_basket=0
###########################
samples_matrix=[]
## first sample
numberArray_init = generateNumberArray(N_spins, max_n_spins_in_basket)
samples_matrix.append(numberArray_init)
N_sampled = 0
while N_sampled != N_samples-1 and (N_sampled<2**N_spins):
#
################## update method
if sampling_method =='update':
numberArray = updateNumberArray(numberArray_init,
max_n_spins_in_basket,
num_spins_in_the_last_basket )
################## direct
if sampling_method =='direct':
numberArray = generateNumberArray(N_spins, max_n_spins_in_basket)
#################
#
if isRepeatedSample(numberArray, samples_matrix)!=0:
samples_matrix.append(numberArray)
N_sampled+=1
numberArray_init = numberArray
samples_matrix = np.matrix(samples_matrix)
return samples_matrix , N_sampled
def updateNumberArray(numberArray_init,
max_n_spins_in_basket,
num_spins_in_the_last_basket ):
#
numberArray_new = np.copy(numberArray_init)
if num_spins_in_the_last_basket==0:
col = np.random.randint(len(numberArray_init))
new_number = np.random.randint(2**max_n_spins_in_basket)
numberArray_new[col] = new_number
else:
col = np.random.randint(len(numberArray_init))
if col !=len(numberArray_init)-1:
new_number = np.random.randint(2**max_n_spins_in_basket)
numberArray_new[col] = new_number
else:
new_number = np.random.randint(2**num_spins_in_the_last_basket)
numberArray_new[col] = new_number
return numberArray_new
#
#### testing
#
#args={}
#args['N_spins']=100
#args['max_n_spins_in_basket']=20
#args['N_samples']=10
##M=generateNumberMatrix(**args)
#M = generateNumberMatrix(**args)
#print M[:6, :]
#print M[6:, :]
```
#### File: DOS/FunctionsLayer2/getEnergy2Expectation.py
```python
import numpy as np
def getEnergy2Expectation(energy_hist, beta, E_0, **args):
energy_expect=0
Z=0
energies=(energy_hist[:, 0]-E_0)
####
for b in range(energy_hist.shape[0]):
energy = energies[b]
energy_density = energy_hist[b, 1]
# print [energy,
# energy_density,
# np.exp(-beta*energy),
# Z,
# energy_expect,
# ]
energy_expect+=energy_density * np.exp(-beta*energy)* (energy+E_0)**2
Z+=energy_density * np.exp(-beta*energy)
energy_expect = energy_expect/Z
# print '---------------', energy_expect
return energy_expect
``` |
{
"source": "jovahe/Mask_RCNN_RS",
"score": 3
} |
#### File: Mask_RCNN_RS/my/shp2json.py
```python
from osgeo import ogr
import gdal
import sys
import os
import fire
def ChangeToJson(vector, output):
print("Starting........")
#打开矢量图层
gdal.SetConfigOption("GDAL_FILENAME_IS_UTF8", "YES")
gdal.SetConfigOption("SHAPE_ENCODING", "GBK")
shp_ds = ogr.Open(vector)
shp_lyr = shp_ds.GetLayer(0)
numFeatures = shp_lyr.GetFeatureCount()
print("Features number:{}".format(numFeatures))
# 获取范围
extent = shp_lyr.GetExtent()
print("Extent:", extent)
print("UL:", extent[0], extent[3])
print("LR:", extent[1], extent[2])
# 循环每个要素属性
for i in range(numFeatures):
feature = shp_lyr.GetNextFeature()
# 获取字段“id”的属性
# id = feature.GetField('type')
# 获取空间属性
# print(id)
geometry = feature.GetGeometryRef()
# x = geometry.GetX()
polygonextent = geometry.GetEnvelope()
print(geometry.GetEnvelope())
# print(y)
# y = geometry.GetY()
print("UL:", polygonextent[0], polygonextent[3])
print("LR:", polygonextent[1], polygonextent[2])
print("segmentation:", geometry)
# # 创建结果Geojson
# baseName = os.path.basename(output)
# out_driver = ogr.GetDriverByName('GeoJSON')
# out_ds = out_driver.CreateDataSource(output)
# if out_ds.GetLayer(baseName):
# out_ds.DeleteLayer(baseName)
# out_lyr = out_ds.CreateLayer(baseName, shp_lyr.GetSpatialRef())
# out_lyr.CreateFields(shp_lyr.schema)
# out_feat = ogr.Feature(out_lyr.GetLayerDefn())
#
# #生成结果文件
# for feature in shp_lyr:
# out_feat.SetGeometry(feature.geometry())
# for j in range(feature.GetFieldCount()):
# out_feat.SetField(j, feature.GetField(j))
# out_lyr.CreateFeature(out_feat)
#
# del out_ds
# del shp_ds
print("Success........")
def getPolygonEnvelope(vector):
driver = ogr.GetDriverByName('ESRI Shapefile')
ds = driver.Open(vector,0)
if ds is None:
print("Could not open {}".format(vector))
return -1
layer = ds.GetLayer()
numFeatures=layer.GetFeatureCount()
print("Features number:{}".format(numFeatures))
# 获取范围
extent = layer.GetExtent()
print("Extent:", extent)
print("UL:", extent[0], extent[3])
print("LR:", extent[1], extent[2])
#循环每个要素属性
for i in range(numFeatures):
feature = layer.GetNextFeature()
# 获取字段“id”的属性
# id = feature.GetField('type')
# 获取空间属性
# print(id)
geometry = feature.GetGeometryRef()
# x = geometry.GetX()
polygonextent = geometry.GetEnvelope()
print(geometry.GetEnvelope())
# print(y)
# y = geometry.GetY()
print("UL:", polygonextent[0], polygonextent[3])
print("LR:", polygonextent[1], polygonextent[2])
return 0
if __name__ == '__main__':
shapefile = '/home/omnisky/PycharmProjects/data/maskRcnn/mytest/onepolygon/262985539_1709e54576_z.shp'
out = '/home/omnisky/PycharmProjects/data/maskRcnn/mytest/onepolygon/262985539_1709e54576_z.json'
ChangeToJson(shapefile, out)
# getPolygonEnvelope(shapefile)
# fire.Fire()
``` |
{
"source": "jovajiv/Knesset_Bill_Classifier",
"score": 3
} |
#### File: jovajiv/Knesset_Bill_Classifier/mergeXML.py
```python
import os, os.path, sys
import glob
import urllib.request
from xml.etree import ElementTree
import time
#specify the max skiptoken used in odata kneset gov for this query , as odata returns info in increments of 100 ,
# going higher then the max is ok , going lower is not
#also need to specify the query table for odata.
def download_by_count(query ,upto):
print("start download")
PATH="XML\\"
index=0
while(upto > index):
urllib.request.urlretrieve("http://knesset.gov.il/Odata/ParliamentInfo.svc/{}()?$skip={}&top=100".format(query,index),PATH+"xml_{}.xml".format(index))
index +=100
def init_download(query):
global index
print("start download")
PATH="XML\\xml_{}.xml".format(0)
url = "http://knesset.gov.il/Odata/ParliamentInfo.svc/{}()".format(query)
urllib.request.urlretrieve(url,PATH)
index +=100
return PATH
def download(url):
global index
print("start download")
PATH="XML\\xml_{}.xml".format(index)
retry(url,PATH)
#urllib.request.urlretrieve(url,PATH)
index +=100
return PATH
def retry(url,PATH):
remaining_download_tries = 15
while remaining_download_tries > 0:
try:
urllib.request.urlretrieve(url, PATH)
print("successfully downloaded: " + url)
#time.sleep(0.1)
except:
print("error downloading " + url + " on trial no: " + str(16 - remaining_download_tries))
remaining_download_tries = remaining_download_tries - 1
time.sleep(2)
continue
else:
break
def run(query):
ElementTree.register_namespace('', "http://www.w3.org/2005/Atom")
ElementTree.register_namespace('d',"http://schemas.microsoft.com/ado/2007/08/dataservices")
ElementTree.register_namespace('m',"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata")
ElementTree.register_namespace('base',"http://knesset.gov.il/Odata/ParliamentInfo.svc")
xml_element_tree = None
file=init_download(query)
while 1:
root = ElementTree.parse(file).getroot()
if xml_element_tree is None:
xml_element_tree = root
to_remove=root.findall(".")[0][-1] # remove the next label from the final result
next = root.findall(".")[0][-1].attrib['href']
file = download(next)
xml_element_tree.remove(to_remove)
continue
else:
for entry in root.iter('{http://www.w3.org/2005/Atom}entry'):
temp = ElementTree.Element('entry')
temp.append(entry)
xml_element_tree.extend(temp)
try:
next = root.findall(".")[0][-1].attrib['href']
print(next)
except:
break
file = download(next)
if xml_element_tree is not None:
#print(ElementTree.tostring(xml_element_tree))
f = open("myfile.xml", "wb")
f.write(ElementTree.tostring(xml_element_tree, encoding='utf8'))
f.close()
############################################################################################### this is main ###############################################################################
index=0
#download_by_count("KNS_Bill",100)
run("KNS_IsraelLaw")
# need to manually delete the next..
def old_run():
files = "./XML"
ElementTree.register_namespace('', "http://www.w3.org/2005/Atom")
ElementTree.register_namespace('d',"http://schemas.microsoft.com/ado/2007/08/dataservices")
ElementTree.register_namespace('m',"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata")
ElementTree.register_namespace('base',"http://knesset.gov.il/Odata/ParliamentInfo.svc")
xml_files = glob.glob(files +"/*.xml")
xml_element_tree = None
#init_download("KNS_IsraelLawClassificiation")
for xml_file in xml_files:
root = ElementTree.parse(xml_file).getroot()
if xml_element_tree is None:
xml_element_tree = root
temp= root.findall(".")[0][-1]
print(root.findall(".")[0][-1].attrib['href']) #next
#for link in xml_element_tree.findall('{http://www.w3.org/2005/Atom}link'):
# print(link.attrib)
continue
for entry in root.iter('{http://www.w3.org/2005/Atom}entry'):
temp = ElementTree.Element('entry')
temp.append(entry)
xml_element_tree.extend(temp)
#print(ElementTree.tostring(xml_element_tree,encoding='utf8'))
if xml_element_tree is not None:
toto=xml_element_tree.find('link')
print(ElementTree.tostring(xml_element_tree))
f = open("myfile.xml", "wb")
f.write(ElementTree.tostring(xml_element_tree,encoding='utf8'))
f.close()
``` |
{
"source": "j-ovalle/data-structures-and-algorithms",
"score": 4
} |
#### File: data-structures-and-algorithms/data-structures/linked_list.py
```python
class Element(object):
def __init__(self, value):
self.value = value
self.next = None
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def append(self, new_element):
# Add new element to the tail of the linked list
current = self.head
if self.head:
while current.next:
current = current.next
current.next = new_element
else:
self.head = new_element
def get_element(self, position):
# Get element by position (1 based)
counter = 1
current = self.head
if position < 1:
return None
while current and counter <= position:
if counter == position:
return current
current = current.next
counter += 1
return None
def insert(self, new_element, position):
# Insert new element at position (1 based)
counter = 1
current = self.head
if position > 1:
while current and counter < position:
if counter == position - 1:
new_element.next = current.next
current.next = new_element
current = current.next
counter += 1
elif position == 1:
new_element.next = self.head
self.head = new_element
def delete(self, value):
# Delete first element with the given value
current = self.head
previous = None
while current.value != value and current.next:
previous = current
current = current.next
if current.value == value:
if previous:
previous.next = current.next
else:
self.head = current.next
def insert_first(self, new_element):
# Insert new element as head
new_element.next = self.head
self.head = new_element
def delete_first(self):
# Delete first element and return it
if self.head:
deleted_element = self.head
temp = deleted_element.next
self.head = temp
return deleted_element
else:
return None
``` |
{
"source": "jovalle/fusion-rest-api",
"score": 3
} |
#### File: jovalle/fusion-rest-api/main.py
```python
import requests
import json
import argparse
import os
def print_json(obj):
"""Parse JSON objects"""
print(json.dumps(obj, sort_keys=True, indent=4))
# vm management
def get_vms():
"""Returns a list of VM IDs and paths for all VMs"""
return requests.get(api_url + '/vms', headers=headers).json()
def get_vm_by_name(name):
"""Returns the VM setting information of a VM"""
for vm in get_vms():
if os.path.split(vm['path'])[1].split('.')[0] == name:
r = requests.get(api_url + '/vms/' + vm['id'], headers=headers).json()
r['name'] = name
return r
def get_vm_by_id(id):
"""Returns the VM setting information of a VM"""
r = requests.get(api_url + '/vms/' + id, headers=headers).json()
# populate name
for vm in get_vms():
if vm['id'] == id:
r['name'] = os.path.split(vm['path'])[1].split('.')[0]
return r
def update_vm(name, processors=None, memory=None):
"""Updates the VM settings"""
vm = get_vm_by_name(name)
# Get current values to avoid empty parameters
if processors is None or memory is None:
if processors is None:
print("[DEBUG] Inheriting processor spec")
processors = int(vm['cpu']['processors'])
if memory is None:
print("[DEBUG] Inheriting memory spec")
memory = int(vm['memory'])
payload = {
"processors": int(processors),
"memory": int(memory)
}
return requests.put(url=api_url + '/vms/' + vm['id'], headers=headers, json=payload).json()
def create_vm(parent_name, name, processors, memory, vmnet):
"""Creates a copy of the VM"""
parent_vm = get_vm_by_name(parent_name)
print("[DEBUG] Arguments: %s, %s, %s, %s" % (str(parent_name), str(name), str(processors), str(memory)))
# Set default values
if processors is None or memory is None:
if processors is None:
print("[DEBUG] Inheriting processor spec")
processors = int(parent_vm['cpu']['processors'])
if memory is None:
print("[DEBUG] Inheriting memory spec")
memory = int(parent_vm['memory'])
payload = {
"cpu": {
"processors": int(processors)
},
"memory": int(memory),
"name": str(name),
"parentId": str(parent_vm['id'])
}
r = requests.post(url=api_url + '/vms', headers=headers, json=payload).json()
# BUG: cannot manipulate processor count on cloning. must update after
if parent_vm['cpu']['processors'] is not processors:
r = update_vm(name, processors, memory)
return r
def delete_vm(name):
"""Deletes a VM"""
r = requests.delete(api_url + '/vms/' + get_vm_by_name(name)['id'], headers=headers)
if r.status_code == 204:
print("[INFO] Deleted VM %s" % name)
return None
# vm power management
def get_power(name):
"""Returns power state of VM"""
return requests.get(api_url + '/vms/' + get_vm_by_name(name)['id'] + '/power', headers=headers).json()
def power_vm(name, state):
"""Power on or off a VM"""
r = requests.put(api_url + '/vms/' + get_vm_by_name(name)['id'] + '/power', headers=headers, data=state)
if r.status_code == 200:
print("[INFO] Powered %s %s" % (state, name))
return None
# vm network adapters management
def get_ip(name):
"""Returns current IP of (assuming) first NIC"""
return requests.get(api_url + '/vms/' + get_vm_by_name(name)['id'] + '/ip', headers=headers).json()
def get_nics(name):
"""Returns list of NICs for the VM"""
return requests.get(api_url + '/vms/' + get_vm_by_name(name)['id'] + '/nic', headers=headers).json()
def get_nic(name, index):
"""List specs of specific NIC of VM"""
for nic in get_nics(name)['nics']:
print("[DEBUG] Current Index: %s, Expected Index: %s" % (str(nic['index']), str(index)))
if nic['index'] == index:
return nic
def update_nic(name, index, type):
"""Updates a network adapter in the VM"""
nic = get_nic(name, int(index))
# Set default values
if type is None:
print("[DEBUG] Inheriting type key")
type = nic['type']
if mac_addr is None:
print("[DEBUG] Inheriting mac_addr")
mac_addr = nic['macAddress']
payload = {
"index": nic['index'],
"type": type,
"vmnet": vmnet,
"macAddress": mac_addr
}
return requests.put(url=api_url + '/vms/' + get_vm_by_name(name)['id'] + '/nic/' + str(nic['index']), headers=headers, json=payload).json()
def create_nic(name, index, type):
"""Creates a network adapter in the VM"""
# Set next index if not defined
if index is None:
index = 0
for nic in get_nics(name)['nics']:
if int(nic['index']) >= index:
index = int(nic['index']) + 1
payload = {
"index": index,
"type": type,
"vmnet": vmnet,
"macAddress": mac_addr
}
return requests.post(url=api_url + '/vms/' + get_vm_by_name(name)['id'] + '/nic', headers=headers, json=payload).json()
def delete_nic(name, index):
"""Deletes a VM network adapter"""
r = requests.delete(api_url + '/vms/' + get_vm_by_name(name)['id'] + '/nic/' + str(index), headers=headers)
if r.status_code == 204:
print("[INFO] Deleted NIC %s on %s" % (index, name))
return None
if __name__ == '__main__':
# parse positional arguments
parser = argparse.ArgumentParser(description="Wrapper script for VMware Fusion REST API")
parser.add_argument("method", choices=['get','create','update','delete', 'power'], default='get', help="Execute API operation")
parser.add_argument("resource", nargs='?', default='vm')
# api options
parser.add_argument("--api-key", type=str, help="Your VMware Fusion REST API key")
# vm management options
parser.add_argument("--name", type=str, help="Resource identifier")
parser.add_argument("--parent-name", type=str, help="Target VM for cloning")
parser.add_argument("--processors", type=int, help="Set vCPUs for VM")
parser.add_argument("--memory", type=int, help="Set system memory for VM")
# vm power management options
parser.add_argument("--state", type=str, help="Set power state of VM")
# vm net adapter options
parser.add_argument("--index", type=int, help="Adapter identifier")
parser.add_argument("--type", type=str, help="Set adapter type (hostonly, nat, bridged, etc.)")
# finalize parsing
args = parser.parse_args()
# Check for API URL
if os.getenv('VMWARE_FUSION_REST_API_URL') is not None:
api_url = os.getenv('VMWARE_FUSION_REST_API_URL')
else:
api_url = 'http://127.0.0.1:8697/api'
# Check/prompt for API key
if os.getenv('VMWARE_FUSION_REST_API_KEY') is not None:
api_key = os.getenv('VMWARE_FUSION_REST_API_KEY')
else:
if args.api_key is not None:
api_key = args.api_key
else:
api_key = input("VMware Fusion REST API Key: ")
# Boilerplate header
headers = {
"Content-Type": "application/vnd.vmware.vmw.rest-v1+json",
"Accept": "application/vnd.vmware.vmw.rest-v1+json",
"Authorization": "Basic " + api_key
}
# switchboard
if args.method == 'get':
if args.resource == 'vm':
if args.name is None:
print("[INFO] List all VMs")
print_json(get_vms())
else:
print("[INFO] Get VM (Name: %s)" % str(args.name))
print_json(get_vm_by_name(args.name))
elif args.resource == 'ip':
print("[INFO] Get VM IP")
print_json(get_ip(args.name))
elif args.resource == 'nics':
print("[INFO] Get network adapters for VM")
print_json(get_nics(args.name))
elif args.method == 'update':
if args.resource == 'vm':
print("[INFO] Update VM (Name: %s)" % str(args.name))
print_json(update_vm(args.name, args.processors, args.memory))
elif args.resource == 'nic':
print("[INFO] Update NIC (VM: %s, Index %s)" % (str(args.name), str(args.index)))
print_json(update_nic(args.name, args.index, args.type))
elif args.method == 'create':
if args.resource == 'vm':
print("[INFO] Create VM (Name: %s)" % str(args.name))
print_json(create_vm(args.parent_name, args.name, args.processors, args.memory, args.vmnet))
elif args.resource == 'nic':
print("[INFO] Create NIC (Name: %s)" % str(args.name))
print_json(create_nic(args.name, args.index, args.type))
elif args.method == 'delete':
if args.resource == 'vm':
print("[INFO] Delete VM (Name: %s)" % str(args.name))
delete_vm(args.name)
elif args.resource == 'nic':
print("[INFO] Delete NIC (VM: %s, Index: %s)" % (str(args.name), str(args.index)))
delete_nic(args.name, args.index)
elif args.method == 'power':
if args.resource == 'on' or args.resource == 'off':
print("[DEBUG] power_vm(%s, %s)" % (str(args.name), str(args.resource)))
power_vm(args.name, args.resource)
elif args.resource == 'state':
if args.name is not None:
if args.state == None:
print("[DEBUG] get_power(%s)" % str(args.name))
print_json(get_power(args.name))
else:
print("[DEBUG] power_vm(%s, %s)" % (str(args.name), str(args.state)))
print_json(power_vm(args.name, args.state))
else:
for vm in get_vms():
print("[DEBUG] get_power(get_vm_by_id(%s))" % str(vm['id']))
print_json(get_power(get_vm_by_id(vm['id'])['name']))
``` |
{
"source": "JoValo/realEstate",
"score": 3
} |
#### File: realEstate/api/validations.py
```python
import boto3, json
class CoordinatesValidation():
def valid(self, latitude, longitude):
coordinates= json.dumps({
"latitude": latitude,
"longitude": longitude
})
client = boto3.client('lambda')
response = client.invoke(
FunctionName='CoordinatesValidation',
InvocationType='RequestResponse',
Payload=bytes(coordinates, 'utf8')
)
approved = response['Payload'].read().decode("utf-8")
return approved == "true"
``` |
{
"source": "Jovamih/EndpointAppBuscaTutor",
"score": 2
} |
#### File: Jovamih/EndpointAppBuscaTutor/endpoint_tutores.py
```python
import pymysql
import sys
import json
host="buscatutordatabase.cuxsffuy95k9.us-east-1.rds.amazonaws.com"
port=3306
user="admin"
password="<PASSWORD>"
db="buscatutor"
def lambda_handler(event, context):
params=event["queryStringParameters"]
#definimos la estructura de las consultas
if "id" in params.keys():
id=params['id']
query_info=f"""SELECT T.id_tutor, E.nombre_completo, E.num_telefono, T.descripcion,T.foto FROM Tutor as T
LEFT JOIN Estudiante as E ON T.id_estudiante=E.id_estudiante
WHERE T.id_tutor={id}
;"""
elif "select" in params.keys():
limit=int(params['select'])
if limit<0: #si el query es negativo -1 devolvemos todos los registros
query_info=f"""SELECT T.id_tutor, E.nombre_completo, E.num_telefono,T.descripcion, T.foto FROM Tutor as T
LEFT JOIN Estudiante as E ON T.id_estudiante=E.id_estudiante
;"""
else:
query_info=f"""SELECT T.id_tutor, E.nombre_completo, E.num_telefono,T.descripcion, T.foto FROM Tutor as T
LEFT JOIN Estudiante as E ON T.id_estudiante=E.id_estudiante
LIMIT {limit}
;"""
elif "especialidad" in params.keys():
especialidad=params['especialidad']
query_info=f"""
SELECT T.id_tutor, E.nombre_completo, E.num_telefono,T.descripcion,T.foto FROM Tutor as T
LEFT JOIN Estudiante as E ON T.id_estudiante=E.id_estudiante
LEFT JOIN EspecialidadesTutor as ET ON T.id_tutor=ET.id_tutor
WHERE ET.desc_especialidad LIKE '%{especialidad}%';
"""
else:
return { 'statusCode': 200,
'body': json.dumps({"mensaje":"Los parametros enviados son incorrectos. Use id o select"})
}
#consulta de especialidades de los tutores
query_especialidades="""SELECT ET.desc_especialidad FROM Tutor as T LEFT JOIN EspecialidadesTutor as ET ON T.id_tutor= ET.id_tutor WHERE T.id_tutor= %s;"""
#consulta de las habilidades de los tutores
query_habilidades="""SELECT HT.desc_habilidad FROM Tutor as T LEFT JOIN HabilidadesTutor as HT ON T.id_tutor= HT.id_tutor WHERE T.id_tutor= %s;"""
lista_tutores=list()
#todo implement
conn=pymysql.connect(host=host,user=user,password=password,db=db,connect_timeout=5)
with conn.cursor() as cursor:
cursor.execute(query_info)
result=cursor.fetchall()
#primero consultamos al tutor
for row in result:
content={
"id_tutor":row[0],
"nombre_completo":row[1],
"num_telefono":row[2],
"descripcion":row[3],
"foto":row[4],
"especialidades":[],
"habilidades":[]
}
id_tutor=row[0]
#consultamos las espececialidades del tutor
cursor.execute(query_especialidades,(id_tutor,))
result_especialidades=cursor.fetchall()
for row_esp in result_especialidades:
if row_esp[0]:
content["especialidades"].append(row_esp[0])
cursor.execute(query_habilidades,(id_tutor,))
result_habilidades=cursor.fetchall()
for row_hab in result_habilidades:
if row_hab[0]:
content["habilidades"].append(row_hab[0])
#agregamos el registro a la lista
lista_tutores.append(content)
return {
'statusCode': 200,
'body': json.dumps({"tutores":lista_tutores})
}
``` |
{
"source": "Jovamih/Modelo-Deteccion-COVID19",
"score": 3
} |
#### File: Modelo-Deteccion-COVID19/Notebooks/gradcam.py
```python
import cv2
import numpy as np
from PIL import Image
import tensorflow as tf
import tensorflow.keras as K
import matplotlib.pyplot as plt
from skimage.transform import resize
from tensorflow.keras.models import Model
#aqui obtenemos el modelo original del transfer learning
def get_transfer_model(model):
model_transfer=None
for layer in model.layers:
if isinstance(layer,tf.keras.Model):
model_transfer=layer
break
return model_transfer
def get_last_conv_layer(model):
for layer in model.layers[::-1]:
if isinstance(layer,K.layers.Conv2D):
return layer
return None
def VizGradCAM(model, image, interpolant=0.5, plot_results=True):
# Sanity Check
#if interpolant < 0 or interpolant >1:
# print("Heatmap Interpolation Must Be Between 0 - 1")
# return None
transfer_model=get_transfer_model(model)
last_conv_layer = get_last_conv_layer(transfer_model)
target_layer = transfer_model.get_layer(last_conv_layer.name)
original_img = image
img = np.expand_dims(original_img, axis=0)
prediction = model.predict(img)
# Obtain Prediction Index
prediction_idx = np.argmax(prediction)
# Compute Gradient of Top Predicted Class
with tf.GradientTape() as tape:
gradient_model = Model([transfer_model.inputs], [target_layer.output, transfer_model.output])
conv2d_out, prediction = gradient_model(img)
# Obtain the Prediction Loss
loss = prediction[:, prediction_idx]
# Gradient() computes the gradient using operations recorded
# in context of this tape
gradients = tape.gradient(loss, conv2d_out)
# Obtain the Output from Shape [1 x H x W x CHANNEL] -> [H x W x CHANNEL]
output = conv2d_out[0]
# Obtain Depthwise Mean
weights = tf.reduce_mean(gradients[0], axis=(0, 1))
# Create a 7x7 Map for Aggregation
activation_map = np.zeros(output.shape[0:2], dtype=np.float32)
# Multiply Weights with Every Layer
for idx, weight in enumerate(weights):
activation_map += weight * output[:, :, idx]
# Resize to Size of Image
activation_map = cv2.resize(
activation_map.numpy(), (original_img.shape[1], original_img.shape[0])
)
# Ensure No Negative Numbers
activation_map = np.maximum(activation_map, 0)
# Convert Class Activation Map to 0 - 255
activation_map = (activation_map - activation_map.min()) / (
activation_map.max() - activation_map.min()
)
activation_map = np.uint8(255 * activation_map)
# Convert to Heatmap
heatmap = cv2.applyColorMap(activation_map, cv2.COLORMAP_JET)
# Superimpose Heatmap on Image Data
original_img = np.uint8(
(original_img - original_img.min())
/ (original_img.max() - original_img.min())
* 255
)
cvt_heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
# Enlarge Plot
plt.rcParams["figure.dpi"] = 100
if plot_results == True:
plt.imshow(
np.uint8(original_img * interpolant + cvt_heatmap * (1 - interpolant))
)
else:
return cvt_heatmap
``` |
{
"source": "Jovamih/PythonProyectos",
"score": 3
} |
#### File: PythonProyectos/Intellicense/motion-detector.py
```python
import cv2
import numpy as np
import sys
def motion_detector(video):
while video.isOpened():
ret,image_last=video.read()
ret,image_current=video.read()
if not ret:
break
diff= cv2.absdiff(image_last,image_current)
if np.mean(diff)>3:
print("Movimiento detectado :{}".format(np.mean(diff)))
cv2.imshow("Motion Dectector",image_current)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
if __name__=="__main__":
if len(sys.argv)<2:
video=cv2.VideoCapture(0)
else:
video=cv2.VideoCapture(sys.argv[1])
try:
motion_detector(video)
video.release()
cv2.destroyAllWindows()
except Exception as e:
print(e)
```
#### File: PythonProyectos/Matplotlib/graficos-dispersion-simples.py
```python
import matplotlib.pyplot as plt
import numpy as np
def dispersion():
x=np.linspace(-20,20,100)
y=np.sin(x)
plt.plot(x,y,'-^',color='black') #podemos especificar '-ok'
#la estrcutura del 3er argumento de la declaracion de plot() es 'linea de separacion|disperso|color'
#los marcadores de dispersion puden ser: [o,>,<,^]
#plt.show()
#pero la mejor herramienta para hacer dispersiones es plt.scatter()
plt.scatter(x,y,marker='o')
plt.show()
def dispersionAdvanced():
#la diferencia de usar plt.scatter y no plt.plt es que la primera nos permiye asignar
#propiedades indivuales a cada punto (color tamaño transaparencia)
#obtendremos rangso de valors aleatorios para demstrarlo
rng=np.random.RandomState(0)
x=np.random.randint(10,200,size=100)
y=np.random.randint(20,200,size=100)
color=rng.rand(100)
size=100*rng.rand(100)
plt.scatter(x,y,c='green',s=size,alpha=0.5,cmap='viridis')
plt.axis('tight')
plt.colorbar()
plt.show()
if __name__=="__main__":
dispersionAdvanced()
```
#### File: PythonProyectos/Matplotlib/histogramas.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#los histogramas sirven para representar variables cuantitativas en Graficas
#como distribucion de personas por el rango de edad presentado .etc
def histogramas():
#la mayoria de histogramas se representan de forma simple
data=np.random.randint(1,100,size=100)
plt.hist(data,bins=6,color='green',histtype='bar',alpha=0.5)
#plt.hist()
plt.show()
#si lo que queremos en contar el numero de ocurrencias dentro de cada contenedor(bin) dado
#sin mostrar el grafico podems recurrir a la funcion de Numpy histogram
#np.histogram(data,bins=10) : data->los datos, bins->Los contenedores o intervalos de datos parejos
counts,bin_edges=np.histogram(data,bins=6)
for (count,_bin) in zip(counts,bin_edges):
print("{} -> {} ocurrencias".format(_bin,count))
#podemos tamien reutilizar fragmentos de codigo para vada grafica hist() que necesitamos analizar
data1=np.random.normal(-10,10,100)
data2=np.random.normal(-10,10,100)
data3=np.random.normal(-10,10,100)
#normed ya quedo en desiuso y se recomienda usar density
kwargs=dict(histtype='stepfilled',alpha=0.3,density=True,bins=20)
plt.hist(data1,**kwargs)
plt.hist(data2,**kwargs)
plt.hist(data3,**kwargs)
plt.show()
def histograma2d():
#podemos tener histogramas en dos dimensiones
#para ello utilizaremos una distribucion normal multivariada
mean=[0,0]
cov=[[1,1],[1,2]]
x,y=np.random.multivariate_normal(mean,cov,100000).T
plt.hist2d(x,y,bins=20,cmap='Blues')
cb=plt.colorbar()
cb.set_label('Cantidad por contenedores')
plt.show()
def histogramaHex():
mean=[0,0]
cov=[[1,1],[1,2]]
x,y=np.random.multivariate_normal(mean,cov,100000).T
plt.hexbin(x, y, gridsize=30, cmap='Blues')
cb = plt.colorbar(label='count in bin')
plt.show()
if __name__=="__main__":
histogramaHex()
```
#### File: Numpy/Arrays/atributos.py
```python
import numpy as np
import sys
def atributos():
np.random.seed(0)
x1= np.random.randint(2,10,size=6)
x2=np.random.randint(2,10,size=(3,5))
x3=np.random.randint(2,10,size=(3,4,5))
#ndim= Numero de dimesiones
#shape=Retorna el tamaño de las dimensiones (a,b)
#size= El tamaño de las dimensiones (n de elementos)
#dtype= tipo de elemento
#itemsize=El tamaño de cada elemnto de la matriz
#nbytes El tamaño total de la matriz en byte's
print(x3.ndim)
print(x3.shape)
print(x3.size)
print(x3.itemsize,x3.nbytes)
if __name__=="__main__":
atributos()
```
#### File: Numpy/FuncionesUniversales/Otros.py
```python
import numpy as np
def otros():
x=np.random.randint(10,size=8)
y=np.empty(8)
np.multiply(x,2, out=y)
print(y)
def agregados():
#reduce y acumulate
x=np.random.randint(9,23,size=20)
z=np.arange(1,4)
print(x)
print(np.add.reduce(x))
print(np.add.accumulate(x))
#funciones externas
f= np.multiply.outer(x,z)
print(f)
if __name__=="__main__":
agregados()
```
#### File: Numpy/FuncionesUniversales/trigonometricos.py
```python
import numpy as np
def trig():
z= np.random.randint(0,10,size=(3,4))
x= np.cos(z)
x=np.sin(z)
x=np.tan(z)
x=np.acos(z)
x=np.asin(z)
x=np.atan(z)
#Exponentes y logaritmos
x=np.exp(z)
x=np.exp2(z)
x=np.power(z,4)
##logaritmos
x=np.log(z)
x=np.log2(z)
x=np.log10(z)
```
#### File: Pandas/Data Sciensist/alto-rendimiento.py
```python
import pandas as pd
import numpy as np
import numexpr
def rendimiento():
#la libreria numexpr proporciona eval() funcion que evalua literales de cadena a expresiones python logicas
data=pd.DataFrame(np.random.randint(12,200,size=(8,4)),columns=list('ABCD'))
#la libreria Pandas tambien posee pd.eval() para operar atraves de estos
data_mask=pd.eval('data.A>100')
print(data[data_mask])
#podemos usar eval() en python en asignaciones internas
data.eval('TOTAL=A+B+C+D',inplace=True)
print(data)
#el uso particular de eval() es para sumas y gestion de columnas de forma mas rapida y eficiente
#tambien podemos usar query() y combinarlos con el uso de variables temporales
data_mean=(data.mean(axis=0)).mean()
data_mask=data.query('(A>@data_mean )& (D>@data_mean)')
print(data[data_mask])
if __name__=="__main__":
rendimiento()
```
#### File: Pandas/Data Sciensist/combinacion-subconjuntos.py
```python
import pandas as pd
import numpy as np
def make_df(cols='ABCD',ind=range(2)):
info={ c: [str(c)+ str(i) for i in ind]
for c in cols
}
return pd.DataFrame(info,index=ind)
def make_fr(cols=list('ABCD'),ind=range(2)):
info=[[str(i)+str(e) for i in cols] for e in ind]
return pd.DataFrame(info,index=ind,columns=cols)
def combinacion_DataSets():
#combinacion de conjunto de datos DataSets con Concatenate() y concat()
a=np.random.randint(1,200,size=(5,8))
b=np.random.randint(34,890,size=(5,10))
c=np.concatenate([a,b],axis=1)
print(c)
print('Concatenando Series de Pandas')
#combinacion de Datasets con concat del paquete Pandas--> pd.concat
#se aplica para series y DataFrames
ser1= pd.Series(np.random.randint(1,200,size=3),name='Indice')
ser2=pd.Series(np.random.randint(1,200,size=4),index=range(3,7),name='Indice')
c=pd.concat([ser1,ser2],axis=0)
print(c)
p1= make_fr(list('ABCD'),range(3))
p2=make_fr(list('CDEF'),range(3))
c1=pd.concat([p1,p2],axis=0,ignore_index=True).fillna(axis=0,method='ffill') #tambien se pudo usar axis='columns'
print(c1)
#recomendacion: ignore_index=True--> cuando el indice no importa se recmienda ignorarlo para una concatenacion limpia
#añadiendo etiquetas a las fuentes de datos con key
c1=pd.concat([p1,p2],axis=0,keys=['Datos 1','Datos 2'])
print(c1)
#concat tambien nos permite especificar el tipo de uniion de datos
print("Fusion de datoss con join='inner'")
#el tipo por defecto pd.concat(join='outer'), asi que podemos extennder a join='inner' para que intesecte las columnas
c1=pd.concat([p1,p2],axis=0,keys=['D1','D2'],join='inner')
print(c1)
print("Podemos lograr un efecto similar a pd.concat() con append()")
c1=p1.append(p2)
print(c1)
if __name__=="__main__":
combinacion_DataSets()
```
#### File: Pandas/Data Sciensist/pivot-tables.py
```python
import numpy as np
import pandas as pd
from seaborn import load_dataset
import matplotlib.pyplot as plt
def pivot():
#podemo usar pivot para mostrar una mejor vista
#pibot usala funcion de agregado mean() por defecto en aggfunc='mean'
data=load_dataset('titanic')
a=data.pivot_table('survived',index='sex',columns='class')
print(a)
#ppodemos usar pd.cut() apara greagr mas indices
ages=pd.cut(data['age'],[0,18,80])
a=data.pivot_table('survived',index=['sex',ages],columns='class')
print(a)
#pivot tambien posee fill_value para asignar valores por defcto
#dropna: para borrar columnas que no poseen datos True por defecto
#podemos graficar datos con matplotlib
def datosAdvanced():
data=pd.read_csv('births.csv',sep=',')
a=data.pivot_table('births',index='year',columns='gender',aggfunc='sum')
a.plot()
plt.ylabel('Total de nacimientos por decada')
plt.show()
def explorarcion_datos():
pass
#pronta implementacion
if __name__=="__main__":
datosAdvanced()
```
#### File: PythonProyectos/Pandas/readcsv.py
```python
import pandas as pd
import matplotlib.pyplot as plt
def read(csv):
#cells=['ID','Nombre','Apellido','Nacimiento','Domicilio','Email','Observacion','DNI']
data=pd.read_csv(csv)
data.head()
print(data)
print(data["nombre"])
if __name__=="__main__":
read(r"D:\User\PythonProyectos\Pandas\Pacientes.csv")
```
#### File: PythonProyectos/Pandas/readexcel.py
```python
import pandas as pd
import sys
import matplotlib.pyplot as plt
def readExcel(url):
data= pd.ExcelFile(url)
print(data.sheet_names)
data_gender= data.parse('Paciente',skiprows=0)#skipfooter=0,names=[)
data_gender.set_index('idPais',inplace=True)
data_gender.plot()
plt.show()
print(data_gender)
print(type(data))
print(type(data_gender))
if __name__=="__main__":
if len(sys.argv)<2:
print("Ingrese una <PATH> relativa")
exit(0)
try:
readExcel(sys.argv[1])
except Exception as e:
print(e)
```
#### File: PythonProyectos/Pytest/face_detector.py
```python
import cv2
import sys
def identifier(path):
cascadePath=r"D:\User\PythonProyectos\Intellicense\haarcascade_frontalface_alt.xml"
detector=cv2.CascadeClassifier(cascadePath)
image=cv2.imread(path)
image_gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#ubicamos las caras en la imagen
array_faces=detector.detectMultiScale(
image_gray,
scaleFactor=1.2,
minNeighbors=2,
minSize=(1,1)
)
#dibujamos las caras en la imagen segun sean las cordenadas encontradas
for (x,y,w,h) in array_faces:
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
print("{} rostros encontrados".format(len(array_faces)))
#mostramos la imagen
cv2.imshow("{} Faces found".format(len(array_faces)),image)
if cv2.waitKey(0) & 0xFF==ord('q'):
return
if __name__=="__main__":
if len(sys.argv)<2:
print("Debe especifica una imagen como ruta de acceso")
exit(0)
try:
identifier(sys.argv[1])
except Exception as e:
print(e)
```
#### File: PythonProyectos/WordCount/wordcount.py
```python
import sys
def word_count(text):
word= text.split(" ")
return len(word)
def lines_count(text):
lines= text.split("\n")
for l in lines:
if not l :
lines.remove(l)
return len(lines)
if __name__=="__main__":
if len(sys.argv)<2:
print("Ingrese la ruta del archivo",file=sys.stderr)
exit(1)
filename= sys.argv[1]
with open(filename,"r") as f:
text=f.read()
num_word=word_count(text)
num_lines=lines_count(text)
print("Number of Words:",num_word)
print("Number of lines:",num_lines)
``` |
{
"source": "JovanCe/mfp",
"score": 2
} |
#### File: mfp/test/run_benchmark_parallel.py
```python
__author__ = '<NAME> <<EMAIL>>'
__date__ = '26 September 2015'
__copyright__ = 'Copyright (c) 2015 Seven Bridges Genomics'
import multiprocessing
import time
import json
import maxflow
import util
from graph.graph import DIMACSGraphFactory
class Worker(object):
def __init__(self, density, batch_size):
self._density = density
self._batch_size = batch_size
def measure_execution_time(self, func, *args):
t1 = time.clock()
func(*args)
t2 = time.clock()
return t2 - t1
def _run_maxflow(self, maxflow_func, data_file):
flow_network = DIMACSGraphFactory.create(util.get_data_file(data_file))
return self.measure_execution_time(maxflow_func, flow_network)
def _run_by_density(self, maxflow_func):
results = [self._run_maxflow(maxflow_func, '%s_%d.txt' % (self._density, i))
for i in range(self._start, self._start + self._batch_size)]
return results
def _run_batch(self):
results = {
'ek': self._run_by_density(maxflow.edmonds_karp),
'df': self._run_by_density(maxflow.edmonds_karp_dfs),
'ff': self._run_by_density(maxflow.ford_fulkerson),
'pq': self._run_by_density(maxflow.ford_fulkerson_pq),
'cs': self._run_by_density(maxflow.capacity_scaling),
'pr': self._run_by_density(maxflow.generic_push_relabel),
'rf': self._run_by_density(maxflow.relabel_to_front)
}
return results
def __call__(self, ord):
self._ord = ord + 1
self._start = ord * self._batch_size + 1
results = self._run_batch()
with open(util.get_out_file('worker', 'sparse_%d.txt' % self._ord), 'w') as f:
json.dump(results, f)
print "WORKER %d DONE" % self._ord
def _execute(density):
num_process = 10
batch_size = 20
pool = multiprocessing.Pool(processes=num_process)
pool.map(Worker(density, batch_size), range(num_process))
pool.close()
pool.join()
results = {
'ff': [],
'pq': [],
'ek': [],
'df': [],
'cs': [],
'pr': [],
'rf': []
}
for i in range(num_process):
res = json.load(open(util.get_out_file('worker', '%s_%d.txt' % (density, i+1))))
for k, v in res.items():
results[k].extend(v)
with open(util.get_out_file('%s.txt' % density), 'w') as f:
json.dump(results, f)
print '%s DONE' % density.upper()
if __name__ == '__main__':
_execute('sparse')
_execute('medium')
_execute('dense')
```
#### File: mfp/test/run_benchmark.py
```python
__author__ = '<NAME> <<EMAIL>>'
__date__ = '08 September 2015'
__copyright__ = 'Copyright (c) 2015 Seven Bridges Genomics'
import time
import json
import maxflow
import util
from graph.graph import DIMACSGraphFactory
def measure_execution_time(func, *args):
t1 = time.clock()
func(*args)
t2 = time.clock()
return t2 - t1
def _run_maxflow(maxflow_func, data_file):
flow_network = DIMACSGraphFactory.create(util.get_data_file(data_file))
return measure_execution_time(maxflow_func, flow_network)
def _run_by_density(density, maxflow_func):
results = [_run_maxflow(maxflow_func, '%s_%d.txt' % (density, i+1)) for i in range(200)]
return results
def _run_batch(density):
results = {
'ek': _run_by_density(density, maxflow.edmonds_karp),
'df': _run_by_density(density, maxflow.edmonds_karp_dfs),
'ff': _run_by_density(density, maxflow.ford_fulkerson),
'pq': _run_by_density(density, maxflow.ford_fulkerson_pq),
'cs': _run_by_density(density, maxflow.capacity_scaling),
'pr': _run_by_density(density, maxflow.generic_push_relabel),
'rf': _run_by_density(density, maxflow.relabel_to_front)
}
return results
if __name__ == '__main__':
sparse_results = _run_batch('sparse')
with open(util.get_out_file('sparse.txt'), 'w') as f:
json.dump(sparse_results, f)
medium_results = _run_batch('medium')
with open(util.get_out_file('medium.txt'), 'w') as f:
json.dump(medium_results, f)
dense_results = _run_batch('dense')
with open(util.get_out_file('dense.txt'), 'w') as f:
json.dump(dense_results, f)
``` |
{
"source": "jovandeginste/jenkins-jobs-mattermost",
"score": 2
} |
#### File: jenkins-jobs-mattermost/jenkins_jobs_mattermost/mattermost.py
```python
import xml.etree.ElementTree as XML
def mattermost_publisher(parser, xml_parent, data):
"""yaml: mattermost
Example::
publishers:
- mattermost:
notify-start: true
notify-success: true
notify-aborted: true
notify-notbuilt: true
notify-unstable: true
notify-failure: true
notify-backtonormal: true
notify-repeatedfailure: true
include-test-summary: true
show-commit-list: true
endpoint: example.com
room: '#jenkins'
icon: 'http://url.to/image.png'
custom-message: message
"""
if data is None:
data = dict()
notifier = XML.SubElement(
xml_parent, 'jenkins.plugins.mattermost.MattermostNotifier')
notifier.set('plugin', '@1.0')
for opt, attr in (('notify-start', 'startNotification'),
('notify-success', 'notifySuccess'),
('notify-aborted', 'notifyAborted'),
('notify-notbuilt', 'notifyNotBuilt'),
('notify-unstable', 'notifyUnstable'),
('notify-failure', 'notifyFailure'),
('notify-backtonormal', 'notifyBackToNormal'),
('notify-repeatedfailure', 'notifyRepeatedFailure'),
('include-test-summary', 'includeTestSummary'),
('show-commit-list', 'showCommitList')):
(XML.SubElement(notifier, attr)
.text) = 'true' if data.get(opt, True) else 'false'
for opt, attr in (('endpoint', 'endpoint'),
('room', 'room'),
('icon', 'icon')):
(XML.SubElement(notifier, attr)
.text) = data.get(opt)
if data.get('include-custom-message'):
(XML.SubElement(notifier, 'includeCustomMessage')
.text) = 'true'
(XML.SubElement(notifier, 'customMessage')
.text) = data.get('custom-message')
else:
(XML.SubElement(notifier, 'includeCustomMessage')
.text) = 'false'
(XML.SubElement(notifier, 'customMessage')
.text) = ''
``` |
{
"source": "JovaneJames/CollectionOfExercises",
"score": 4
} |
#### File: JovaneJames/CollectionOfExercises/ex4.py
```python
def ex4():
running = True
shortest_word = ""
longest_word = ""
while running:
try:
user_input = input("Please input a line of text: ").split()
if len(user_input) == 0:
print("Invalid input")
continue
print(f"String input from the user \"{user_input}\"")
longest_word, running, shortest_word = figure_out_which_is_shortest_and_longest(longest_word, shortest_word, user_input)
except IOError as e:
print(e)
print(f"The length of the shortest word '{shortest_word}' is {len(shortest_word)}")
print(f"The length of the shortest word '{longest_word}' is {len(longest_word)}")
def figure_out_which_is_shortest_and_longest(longest_word, shortest_word, user_input):
for word in user_input:
if len(shortest_word) == 0 or len(word.casefold()) < len(shortest_word.casefold()):
shortest_word = word
if len(longest_word) == 0 or len(word) > len(longest_word.casefold()):
longest_word = word
print(word)
running = False
return longest_word, running, shortest_word
```
#### File: JovaneJames/CollectionOfExercises/ex7.py
```python
def ex7():
running = True
while running:
try:
user_input = int(input("Enter an integer: "))
if user_input < 2 or str(user_input) == 0:
print("Invalid input")
if user_input == 1:
print(f"Btw the integer {user_input} is not a prime number, you know that right?")
continue
if user_input in range(4):
print(f"{user_input} is a prime integer")
check_for_prime(user_input)
running = False
except ValueError as v_error:
print(v_error)
def check_for_prime(user_input):
for i in range(2, user_input):
if user_input % i == 0:
print(f"{user_input} is not a prime number")
break
else:
print(f"{user_input} is a prime number")
break
```
#### File: JovaneJames/CollectionOfExercises/exercises.py
```python
from ex1 import ex1
from ex2 import ex2
from ex3 import ex3
from ex4 import ex4
from ex5 import ex5
from ex6 import ex6
from ex7 import ex7
from ex8 import ex8
from ex9 import ex9
from ex10 import ex10
def select_exercise():
# list_of_exercises = [None, ex1, ex2, ex3, ex4, ex5, ex6, ex7, ex8, ex9, e10]#e11
# running = True
# while running:
# line = input("Select an exercise (0 or 'q' to quit): ")
# if line == "0" or line == "q":
# running = False
# elif len(line) == 1 and "1" <= line <= "11":
# list_of_exercises[int(line)]()
# else:
# print("Invalid input - try again")
ex10()
if __name__ == '__main__':
select_exercise()
```
#### File: JovaneJames/CollectionOfExercises/validation_handler.py
```python
def check_if_input_is_positive(user_input, token):
# checks if user input is a positive integer = if not prompts user to do so and then returns result
if user_input <= 0:
running = True
if token == "bin<PASSWORD>_k" and user_input == 0:
return 0
while running:
print("Please enter a positive integer")
user_input = determine_message_to_be_shown(user_input, token)
if user_input > 0.0:
running = False
return user_input
def determine_message_to_be_shown(user_input, token):
match token:
case "height" | "width":
return float(input(f"Enter the {token} of the triangle: "))
case "fibonacci":
return int(input("How many numbers of the fibonacci sequence should be displayed? "))
case "binomial_n":
return int(input("Enter the value of integer n: "))
case "binomial_k" if user_input < 0:
return int(input("Enter the value of integer k: "))
case "line of text":
return input("Please input a line of text: ")
case "rpw" | "column" :
return int(input(f"Enter how many {token} you want to be displayed: "))
``` |
{
"source": "Jovanez/flask",
"score": 3
} |
#### File: Jovanez/flask/filme.py
```python
from flask import render_template,redirect,url_for, Blueprint, request, json, session
from banco import Filme, Programacao, Cinema, DiaSemana, Sessao, Sala, TipoAudio, TipoTela
from peewee import DoesNotExist
import datetime
filme = Blueprint("filme", __name__,static_folder='static', url_prefix="/filme")
@filme.route("/sessoes/<int:codigoCinema>/<int:codigoDia>/<int:codigoFilme>")
def sessoesJson(codigoCinema, codigoFilme, codigoDia):
listaGeral = Sessao.select(Sessao).where((Sessao.dataHora > datetime.datetime.now()) & (
Sala.cinema == codigoCinema) & (Sessao.filme == codigoFilme)).join(Sala).group_by(Sessao.codigo)
listaVazio = []
for s in listaGeral:
if s.dataHora.strftime("%w") == str(codigoDia):
print ("fiu")
listaVazio.append(s)
return render_template("painelSessoesFilme.html", sessoes=listaVazio)
@filme.route("/diasJson/<int:codigoCinema>")
def diasJson(codigoCinema):
listaDiasSemana = DiaSemana.select().where((DiaSemana.cinema == codigoCinema)).order_by(DiaSemana.numero)
return render_template('painelDiasFilme.html', dias=listaDiasSemana)
@filme.route("/", methods=['POST', 'GET'])
def index():
session['url'] = 'filme/'
listaCinemas = Cinema.select()
filmes = (Filme.select(Filme).where((Programacao.periodoInicio < datetime.datetime.now()) & (
Programacao.periodoFinal > datetime.datetime.now())).join(Programacao).group_by(Filme.codigo))
if request.method == 'GET':
return render_template("product.html", listaCinemas=listaCinemas, listaFilmes=filmes)
else:
codigoCinema = request.form['cinema']
if 'codigoFilme' in request.form:
codigo = request.form['codigoFilme']
if codigo:
try:
filmes = (Filme.select(Filme).where((Programacao.periodoInicio < datetime.datetime.now()) & (
Programacao.periodoFinal > datetime.datetime.now()) & (Programacao.cinema == codigoCinema) & (Filme.codigo == codigo)).join(Programacao).group_by(Filme.codigo))
except DoesNotExist:
pass
if 'nomeFilme' in request.form:
nome = request.form['nomeFilme']
if nome:
try:
filmes = (Filme.select(Filme).where((Programacao.periodoInicio < datetime.datetime.now()) & (
Programacao.periodoFinal > datetime.datetime.now()) & (Programacao.cinema == codigoCinema) & (Filme.nome.contains(nome))).join(Programacao).group_by(Filme.codigo))
except DoesNotExist:
pass
return render_template("product.html", listaCinemas=listaCinemas, listaFilmes=filmes)
@filme.route("/detalhes", methods=['POST', 'GET'])
def detalhes():
if request.method == 'POST':
session['url'] = '/detalhes'
codigo = request.form['codigo']
filme = None
cinemas = None
try:
filme = Filme.select(Filme).where(
(Filme.codigo == codigo) & (
Programacao.filme == Filme.codigo) & (
Programacao.periodoInicio < datetime.datetime.now()) & (
Programacao.periodoFinal > datetime.datetime.now()) & (
Programacao.cinema == Cinema.codigo)
).join(Programacao).join(Cinema).get()
recomendados = Filme.select().where((Filme.codigo != codigo) & (Filme.genero == filme.genero))
except DoesNotExist:
return redirect("/")
try:
cinemas = Cinema.select(Cinema).where((Programacao.filme == codigo)& (Programacao.cinema == Cinema.codigo)).join(Programacao).join(Filme).group_by(Cinema.codigo)
except DoesNotExist:
return redirect("/")
return render_template("product-detail.html", filme=filme, listaCinema=cinemas, recomendados=recomendados)
@filme.route("/teste")
def teste():
filmes = (Filme.select(Filme).where((Programacao.periodoInicio < datetime.datetime.now()) & (Programacao.periodoFinal > datetime.datetime.now())).join(Programacao).group_by(Filme.codigo))
string = ""
for ob in filmes:
string+="Filme: %s \n "%(ob.nome)
return string
def teste2():
filmes = (Programacao.select(Programacao,Filme).where((Programacao.periodoInicio < datetime.datetime.now()) & (Programacao.periodoFinal > datetime.datetime.now())).join(Filme))
string = ""
for ob in filmes:
string+="Filme: %s Data:%s "%(ob.filme.nome,ob.periodoInicio.strftime(' %d, %b %Y'))
return string
``` |
{
"source": "JovaniPink/awesomenbadata-api",
"score": 2
} |
#### File: app/views/landing.py
```python
from flask import Blueprint, redirect, render_template, current_app
from flask import (
request,
url_for,
flash,
send_from_directory,
jsonify,
render_template_string,
)
# When using a Flask app factory we must use a blueprint to avoid needing 'app' for '@app.route'
main_blueprint = Blueprint("main", __name__, template_folder="templates")
@main_blueprint.route("/")
def member_page():
if current_user.is_authenticated:
return redirect(url_for("main.home_page"))
return render_template("pages/global.html")
@main_blueprint.route("/home")
@login_required
def home_page():
return render_template("pages/home.html")
``` |
{
"source": "JovaniPink/belly-graphs-dash",
"score": 3
} |
#### File: app/dashbelly/callbacks.py
```python
from datetime import datetime as dt
import pandas_datareader as pdr
from dash.dependencies import Input
from dash.dependencies import Output
def register_callbacks(dashapp):
@dashapp.callback(Output("my-graph", "figure"), [Input("my-dropdown", "value")])
def update_graph(selected_dropdown_value):
df = pdr.get_data_yahoo(
selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now()
)
return {
"data": [{"x": df.index, "y": df.Close}],
"layout": {"margin": {"l": 40, "r": 0, "t": 20, "b": 30}},
}
``` |
{
"source": "JovaniPink/cocktail-data",
"score": 3
} |
#### File: JovaniPink/cocktail-data/cocktail_data.py
```python
import json
import os
import typing
temp_dict = {}
def map_values(obj, fn):
ret = {}
for key in obj.keys():
ret[key] = fn(obj[key])
return ret
def json_extract(obj, key):
"""Recursively fetch values from nested JSON."""
arr = {}
def extract(obj, arr, key):
"""Recursively search for values of key in JSON tree."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)) and k != key:
extract(v, arr, key)
elif k == key:
arr[obj["id"]] = obj[key]
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
values = extract(obj, arr, key)
return values
# def double_check_dict(dict):
# return(key, value in dict.items() if value == val)
def find_keys(dict, val):
return list(key for key, value in dict.items() if value == val and isinstance(value, (dict, list)))
file_to_load = os.path.join("data/cocktails.json")
with open(file_to_load, "r") as cocktail_data:
cocktail_data_loaded = json.load(cocktail_data)
# json.dump(json_extract(cocktail_data_loaded, "ingredients"), open("out.json", "w"))
temp_dict = find_keys(json_extract(cocktail_data_loaded, "ingredients"), "Cranberry Juice")
print(temp_dict)
``` |
{
"source": "JovaniPink/masky",
"score": 3
} |
#### File: masky/app/__init__.py
```python
from datetime import datetime
import json, os, re, base64
import connexion
from flask_marshmallow import Marshmallow
from flask import (
Flask,
render_template,
request,
jsonify,
make_response,
send_from_directory,
)
from flask_wtf.csrf import CSRFProtect
from werkzeug.utils import secure_filename
# import app.predictor
basedir = os.path.abspath(os.path.dirname(__file__))
# Instantiate Flask extensions
ma = Marshmallow()
csrf = CSRFProtect()
# https://flask.palletsprojects.com/en/0.12.x/patterns/appfactories/
def create_app(extra_config_settings={}):
"""Create a Flask application."""
# Create the connexion application instance
app = connexion.FlaskApp(__name__, specification_dir=basedir)
# Read the openapi.yaml file to configure the endpoints
app.add_api("openapi.yaml")
application = app.app
# Load App Config settings
# Load common settings from 'app/settings.py' file
application.config.from_object("app.settings")
# Load local settings from 'app/local_settings.py'
application.config.from_object("app.local_settings")
# Load extra config settings from 'extra_config_settings' param
application.config.update(extra_config_settings)
# Setup Flask-Extensions -- do this _after_ app config has been loaded
# We are doing this because our web application could have different
# config files depending the server environment and context.
# Setup Marshmallow
ma.init_app(application)
# Setup CSRF
csrf.init_app(application)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/features")
def features():
return render_template("features.html")
@app.route("/<path:filename>")
def locations_json(filename):
return send_from_directory("static", filename)
@app.route("/photo_capture", methods=["POST"])
@csrf.exempt
def process_capture():
req = request.get_json()
header, encoded = req["photo_cap"].split(",", 1)
binary_data = base64.b64decode(encoded)
image_name = "capture.jpeg"
with open(
os.path.join(application.config["IMAGE_UPLOADS"], image_name), "wb"
) as f:
f.write(binary_data)
# facial recognition operations
response = {"msg": "success", "size": 20}
return make_response(jsonify(response=response), 200)
return application
``` |
{
"source": "JovaniPink/orlando-job-market-data",
"score": 4
} |
#### File: orlando-job-market-data/config/set_matching_data.py
```python
import json
class MatchingData:
@staticmethod
def make_matching_data():
# Prompt the user for matching keywords
print(
"\nTERMS MATCHING: Let's make your search more specific!"
"\tNow you will have the chance to introduce keywords you would like the job titles and description"
"to contain"
)
user_input = True
title_selected_keywords = []
while user_input:
term = input(
"\nEnter a term you want in the title"
"\n\tIf you don't want to add more title words, hit enter\n"
).lower()
if len(term) < 1:
user_input = False
break
title_selected_keywords.append(term)
user_input = True
title_discarded_keywords = []
while user_input:
term = input(
"\n\nEnter a term DON'T want in the job title"
"\n\tIf you don't want to add more title words, hit enter\n"
).lower()
if len(term) < 1:
user_input = False
break
title_discarded_keywords.append(term)
user_input = True
description_selected_keywords = []
while user_input:
term = input(
"\n\nEnter a term you want in the job description\n"
"\tIf you don't want to add more description words, hit enter\n"
).lower()
if len(term) < 1:
user_input = False
break
description_selected_keywords.append(term)
user_input = True
description_discarded_keywords = []
while user_input:
term = input(
"\n\nEnter a term you DON'T want in the job description\n"
"\tIf you don't want to add more words, hit enter\n"
).lower()
if len(term) < 1:
user_input = False
break
description_discarded_keywords.append(term)
# Make the keyword matching dictionary to later append the matching data
# and save it to a json file
matching_data = {
"titleMatching": {"select": [], "discard": []},
"descriptionMatching": {"select": [], "discard": []},
}
# Add selected terms for title
for i in range(len(title_selected_keywords)):
matching_data["titleMatching"]["select"].append(
{"term": title_selected_keywords[i]}
)
# Add discarded terms for title
for i in range(len(title_discarded_keywords)):
matching_data["titleMatching"]["discard"].append(
{"term": title_discarded_keywords[i]}
)
# Add selected terms for description
for i in range(len(description_selected_keywords)):
matching_data["descriptionMatching"]["select"].append(
{"term": description_selected_keywords[i]}
)
# Add discarded terms for description
for i in range(len(description_discarded_keywords)):
matching_data["descriptionMatching"]["discard"].append(
{"term": description_discarded_keywords[i]}
)
# Dump matching data into json file
with open("config/matching_terms.json", "w", encoding="utf-8") as mt:
json.dump(matching_data, mt, ensure_ascii=False, indent=4)
```
#### File: orlando-job-market-data/indeed_jobs_crawler/data_matcher.py
```python
class MatchJob:
def __init__(
self,
titles,
descriptions,
locations,
companies,
salaries,
ratings,
urls,
days,
discarded_title_terms,
# discarded_desc_terms,
selected_title_terms,
# selected_description_terms,
):
self.keep_titles = []
self.keep_description = []
self.keep_locations = []
self.keep_companies = []
self.keep_salaries = []
self.keep_ratings = []
self.keep_urls = []
self.keep_days = []
self.titles = titles
self.descriptions = descriptions
self.locations = locations
self.companies = companies
self.salaries = salaries
self.ratings = ratings
self.urls = urls
self.days = days
self.discarded_title_items = discarded_title_terms
# self.discarded_desc_terms = discarded_desc_terms
self.selected_title_terms = selected_title_terms
# self.selected_description_terms = selected_description_terms
def keep_job_data(self, i):
self.keep_titles.append(self.titles[i])
self.keep_description.append(self.descriptions[i])
self.keep_locations.append(self.locations[i])
self.keep_companies.append(self.companies[i])
self.keep_salaries.append(self.salaries[i])
self.keep_ratings.append(self.ratings[i])
self.keep_urls.append(self.urls[i])
self.keep_days.append(self.days[i])
def check_title_has_selected_terms(self, title):
if self.selected_title_terms:
if any(term in title.lower() for term in self.selected_title_terms):
return True
else:
return False
# def check_description_has_selected_terms(self, description):
# if self.selected_description_terms:
# if any(
# term in description.lower() for term in self.selected_description_terms
# ):
# return True
# else:
# return False
def check_title_has_discarded_terms(self, title):
if self.discarded_title_items:
if any(term in title.lower() for term in self.discarded_title_items):
return True
else:
return False
# def check_description_has_discarded_terms(self, description):
# if self.discarded_desc_terms:
# if any(term in description.lower() for term in self.discarded_desc_terms):
# return True
# else:
# return False
# def matching(self):
# for t, d in zip(self.titles, self.descriptions):
# if self.check_title_has_selected_terms(
# t
# ) or self.check_description_has_selected_terms(d):
# if not self.check_title_has_discarded_terms(
# t
# ) or not self.check_description_has_discarded_terms(d):
# idx = self.titles.index(t)
# self.keep_job_data(idx)
def get_targeted_jobs_data(self):
if (
self.selected_title_terms
# or self.selected_description_terms
or self.discarded_title_items
# or self.discarded_desc_terms
):
# self.matching()
return (
self.keep_titles,
self.keep_description,
self.keep_locations,
self.keep_companies,
self.keep_salaries,
self.keep_ratings,
self.keep_urls,
self.keep_days,
)
else:
return (
self.titles,
self.descriptions,
self.locations,
self.companies,
self.salaries,
self.ratings,
self.urls,
self.days,
)
``` |
{
"source": "JovaniPink/surfs-up",
"score": 3
} |
#### File: JovaniPink/surfs-up/app_refactored.py
```python
__version__ = "0.1.0"
import os
from datetime import date, datetime, timedelta
import connexion
from flask.templating import render_template
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
# @connex_app.route("/api/v1.0/precipitation")
def precipitation():
db.create_all()
precipitation = Measurement.query.all()
print(type(precipitation))
precipitation_schema = MeasurementSchema(many=True)
print(type(precipitation_schema))
prep_sch_json = precipitation_schema.dump(precipitation)
print(type(prep_sch_json))
return prep_sch_json
# @connex_app.route("/api/v1.0/stations")
def stations():
return
# @connex_app.route("/api/v1.0/tobs")
def temp_monthly():
return
# @connex_app.route("/api/v1.0/temp/<start>")
# @connex_app.route("/api/v1.0/temp/<start>/<end>")
def stats():
return
# Create the connexion application instance
connex_app = connexion.FlaskApp(__name__)
# Read the openapi.yaml file to configure the endpoints
connex_app.add_api("openapi.yaml")
# Get the underlying Flask app instance
app = connex_app.app
# basedir = os.path.abspath(os.path.dirname(__file__))
# # Build the Sqlite ULR for SQLAlchemy
# sqlite_url = "sqlite:////" + os.path.join(basedir, "hawaii.db")
# Configure the SQLAlchemy part of the app instance
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///notebook/hawaii.db"
app.config["SQLALCHEMY_ECHO"] = True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
# Flask-SQLAlchemy makes using the database outside of a Flask context difficult.
# I ran into this issue when I wanted to still use my SQLAlchemy scripts in FastAPI.
# Create the SQLAlchemy db instance
db = SQLAlchemy(app)
class Measurement(db.Model):
__tablename__ = "measurement"
id = db.Column(db.Integer, primary_key=True)
station = db.Column(db.String)
date = db.Column(db.String)
prcp = db.Column(db.Float)
tobs = db.Column(db.Float)
class Station(db.Model):
__tablename__ = "station"
id = db.Column(db.Integer, primary_key=True)
station = db.Column(db.String)
name = db.Column(db.String)
latitude = db.Column(db.Float)
longitude = db.Column(db.Float)
elevation = db.Column(db.Float)
# Initialize Marshmallow
ma = Marshmallow(app)
class MeasurementSchema(ma.SQLAlchemySchema):
class Meta:
model = Measurement
sqla_session = db.session
id = ma.auto_field()
station = ma.auto_field()
date = ma.auto_field()
prcp = ma.auto_field()
tobs = ma.auto_field()
class StationSchema(ma.SQLAlchemySchema):
class Meta:
model = Station
sqla_session = db.session
id = ma.auto_field()
station = ma.auto_field()
name = ma.auto_field()
latitude = ma.auto_field()
longitude = ma.auto_field()
elevation = ma.auto_field()
db.init_app(app)
# Create a URL route in our application for "/"
@connex_app.route("/")
def index():
"""
This function just responds to the browser URL
localhost:5000/
:return: the rendered template "index.html"
"""
return render_template("index.html")
if __name__ == "__main__":
connex_app.run(debug=True)
``` |
{
"source": "JovanMarkovic99/blockade-board-game",
"score": 4
} |
#### File: JovanMarkovic99/blockade-board-game/board.py
```python
import heapq
from copy import deepcopy, copy
from math import inf
class Board:
def __init__(self, rows, columns, player_1_pawns, player_2_pawns):
self.rows = rows
self.columns = columns
self.num_placed_walls = 0
self.player_1_pawns = deepcopy(player_1_pawns)
self.player_2_pawns = deepcopy(player_2_pawns)
self.player_1_start = (copy(player_1_pawns[0]), copy(player_1_pawns[1]))
self.player_2_start = (copy(player_2_pawns[0]), copy(player_2_pawns[1]))
self.board = [[BoardSquare() for _ in range(columns)] for _ in range(rows)]
self.board[player_1_pawns[0][0]][player_1_pawns[0][1]].set_start('X')
self.board[player_1_pawns[1][0]][player_1_pawns[1][1]].set_start('X')
self.board[player_2_pawns[0][0]][player_2_pawns[0][1]].set_start('O')
self.board[player_2_pawns[1][0]][player_2_pawns[1][1]].set_start('O')
def print_board(self):
for i in range(2 * self.rows + 3):
# When i and j are divisible by 2 index_i and index_j are board coordinates
index_i = i // 2 - 1
for j in range(2 * self.columns + 3):
index_j = j // 2 - 1
# Top-bottom border
if i == 0 or i == 2 * self.rows + 2:
if j == 0 or j == 2 * self.columns + 2 or j % 2 == 1:
print(" ", end="")
else:
print(self.matrix_index_to_board_index(index_j), end="")
elif i == 1 or i == 2 * self.rows + 1:
if j == 0 or j == 2 * self.columns + 2 or j % 2 == 1:
print(" ", end="")
else:
print("=", end="")
# Square rows
elif i % 2 == 0:
# Left-right border
if j == 0 or j == 2 * self.columns + 2:
print(self.matrix_index_to_board_index(index_i), end="")
elif j == 1 or j == 2 * self.columns + 1:
print("‖", end="")
# Squares
elif j % 2 == 0:
print(self.board[index_i][index_j].center, end="")
# Left-right walls
else:
print("‖" if self.board[index_i][index_j].right else "|", end="")
# Top-bottom wall rows
# Top-bottom walls
elif j != 0 and j != 2 * self.columns + 2 and j % 2 == 0:
print("=" if self.board[index_i][index_j].bottom else "—", end="")
else:
print(" ", end="")
print()
def game_end(self):
return self.player_1_pawns[0] in self.player_2_start or self.player_1_pawns[1] in self.player_2_start or \
self.player_2_pawns[0] in self.player_1_start or self.player_2_pawns[1] in self.player_1_start
def valid_pawn_move(self, player, pawn_index, row, column, print_failure=True):
# Check if pawn indices are in range
if row >= self.rows or column >= self.columns:
self.conditional_print("Pawn indices are out of bounds!", print_failure)
return False
prev_pos = self.player_1_pawns[pawn_index] if player == 'X' else self.player_2_pawns[pawn_index]
old_square = self.board[prev_pos[0]][prev_pos[1]]
new_square = self.board[row][column]
if abs(prev_pos[0] - row) + abs(prev_pos[1] - column) == 0 or \
abs(prev_pos[0] - row) + abs(prev_pos[1] - column) > 2:
self.conditional_print("You cannot stay in place or move more than two squares from you current position!",
print_failure)
return False
if (new_square.center == 'X' or new_square.center == 'O') and \
(new_square.starting is None or new_square.starting == player):
self.conditional_print("You cannot jump to a square with a pawn!", print_failure)
return False
# Top
if row < prev_pos[0]:
# Top-Left
if column < prev_pos[1]:
if old_square.top_left() or new_square.bottom_right() or \
(old_square.left and new_square.right) or (old_square.top and new_square.bottom):
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
# Top-Right
elif column > prev_pos[1]:
if old_square.top_right() or new_square.bottom_left() or \
(old_square.right and new_square.left) or (old_square.top and new_square.bottom):
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
else:
# Top-Middle
if prev_pos[0] - row == 1:
if new_square.bottom:
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
elif new_square.starting is not None and new_square.starting != player:
pass
elif new_square.top:
self.conditional_print("You cannot jump just one space forward!", print_failure)
return False
elif row == 0 or (self.board[row - 1][column].center != 'X' and
self.board[row - 1][column].center != 'O'):
self.conditional_print("You cannot jump just one space forward!", print_failure)
return False
# Top-Middle-Long
else:
if new_square.bottom or old_square.top:
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
# Bottom
elif row > prev_pos[0]:
# Bottom-Left
if column < prev_pos[1]:
if old_square.bottom_left() or new_square.top_right() or \
(old_square.bottom and new_square.top) or (old_square.left and new_square.right):
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
# Bottom-Right
elif column > prev_pos[1]:
if old_square.bottom_right() or new_square.top_left() or \
(old_square.bottom and new_square.top) or (old_square.right and new_square.left):
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
else:
# Bottom-Middle
if row - prev_pos[0] == 1:
if new_square.top:
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
elif new_square.starting is not None and new_square.starting != player:
pass
elif new_square.bottom:
self.conditional_print("You cannot jump just one space forward!", print_failure)
return False
elif row == self.rows - 1 or (self.board[row + 1][column].center != 'X' and
self.board[row + 1][column].center != 'O'):
self.conditional_print("You cannot jump just one space forward!", print_failure)
return False
# Bottom-Middle-Long
else:
if new_square.top or old_square.bottom:
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
elif column > prev_pos[1]:
# Middle-Right
if column - prev_pos[1] == 1:
if new_square.left:
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
elif new_square.starting is not None and new_square.starting != player:
pass
elif new_square.right:
self.conditional_print("You cannot jump just one space forward!", print_failure)
return False
elif column == self.columns - 1 or (self.board[row][column + 1].center != 'X' and
self.board[row][column + 1].center != 'O'):
self.conditional_print("You cannot jump just one space forward!", print_failure)
return False
# Middle-Right-Long
else:
if new_square.left or old_square.right:
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
else:
# Middle-Left
if prev_pos[1] - column == 1:
if new_square.right:
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
elif new_square.starting is not None and new_square.starting != player:
pass
elif new_square.left:
self.conditional_print("You cannot jump just one space forward!", print_failure)
return False
elif column == 0 or (self.board[row][column - 1].center != 'X' and
self.board[row][column - 1].center != 'O'):
self.conditional_print("You cannot jump just one space forward!", print_failure)
return False
# Middle-Left-Long
else:
if new_square.right or old_square.left:
self.conditional_print("You cannot jump over a wall!", print_failure)
return False
return True
def move_pawn(self, player, pawn_index, row, column):
player_pawns = self.player_1_pawns if player == 'X' else self.player_2_pawns
old_row, old_column = player_pawns[pawn_index][0], player_pawns[pawn_index][1]
# Update pawn position
player_pawns[pawn_index][0], player_pawns[pawn_index][1] = row, column
# Update board
self.board[old_row][old_column].center = \
' ' if self.board[old_row][old_column].starting is None else '·'
self.board[row][column].center = player
# Return the undoing move
return player, pawn_index, old_row, old_column
def valid_wall_placement(self, wall_type, row, column, print_failure=True):
# Check if wall indices are in range
if row >= self.rows - 1 or column >= self.columns - 1:
self.conditional_print("Wall indices out of bound!", print_failure)
return False
if (wall_type == 'Z' and (self.board[row][column].right or self.board[row + 1][column].right)) or \
(wall_type == 'P' and (self.board[row][column].bottom or self.board[row][column + 1].bottom)):
self.conditional_print("A wall already exists on those coordinates!", print_failure)
return False
return True
def place_wall(self, wall_type, row, column, lift=False):
if wall_type == 'Z':
self.board[row][column].right = not lift
self.board[row][column + 1].left = not lift
self.board[row + 1][column].right = not lift
self.board[row + 1][column + 1].left = not lift
else:
self.board[row][column].bottom = not lift
self.board[row][column + 1].bottom = not lift
self.board[row + 1][column].top = not lift
self.board[row + 1][column + 1].top = not lift
self.num_placed_walls += not lift
def check_paths_after_move(self, move, print_failure=True):
# Make the move
undo_move = self.move_pawn(*(move[0]))
self.place_wall(*(move[1]))
if not self.check_path(self.player_1_pawns[0], self.player_2_start[0]) or \
not self.check_path(self.player_1_pawns[0], self.player_2_start[1]) or \
not self.check_path(self.player_1_pawns[1], self.player_2_start[0]) or \
not self.check_path(self.player_1_pawns[1], self.player_2_start[1]) or \
not self.check_path(self.player_2_pawns[0], self.player_1_start[0]) or \
not self.check_path(self.player_2_pawns[0], self.player_1_start[1]) or \
not self.check_path(self.player_2_pawns[1], self.player_1_start[0]) or \
not self.check_path(self.player_2_pawns[1], self.player_1_start[1]):
# Undo the move
self.place_wall(*(move[1]), lift=True)
self.move_pawn(*undo_move)
self.conditional_print("You cannot block one of the pawns' path to the goal!", print_failure)
return False
# Undo the move
self.place_wall(*(move[1]), lift=True)
self.move_pawn(*undo_move)
return True
# A* algorithm to check if there is a pawn path from the source to the destination
def check_path(self, source, destination):
# Dictionary for keeping track of visited nodes
seen_set = {(source[0], source[1])}
prio_queue = [(self.non_diagonal_distance(source, destination), *source)]
while len(prio_queue):
# noinspection PyTupleAssignmentBalance
_, row, column = heapq.heappop(prio_queue)
if row == destination[0] and column == destination[1]:
return True
for new_pos in filter(lambda jump: jump not in seen_set, self.iter_non_blocking_jumps(row, column)):
seen_set.add(new_pos)
heapq.heappush(prio_queue, (self.non_diagonal_distance(new_pos, destination), *new_pos))
return False
# Returns all one square jumps from the square with the row and column taking into account only the walls.
# It's similar to all legal jumps except it's jumps one square away and doesn't account for player position on
# the squares. Used for path-checking
def iter_non_blocking_jumps(self, row, column):
# Top side
if row > 0:
# Top
if not self.board[row][column].top:
yield row - 1, column
# Top-Left
if column > 0 and not (
self.board[row][column].top_left() or
self.board[row - 1][column - 1].bottom_right() or
(self.board[row][column].top and self.board[row][column - 1].top) or
(self.board[row][column].left and self.board[row - 1][column].left)
):
yield row - 1, column - 1
# Top-Right
if column < self.columns - 1 and not (
self.board[row][column].top_right() or
self.board[row - 1][column + 1].bottom_left() or
(self.board[row][column].top and self.board[row][column + 1].top) or
(self.board[row][column].right and self.board[row - 1][column].right)
):
yield row - 1, column + 1
# Bottom side
if row < self.rows - 1:
# Bottom
if not self.board[row][column].bottom:
yield row + 1, column
# Bottom-Left
if column > 0 and not (
self.board[row][column].bottom_left() or
self.board[row + 1][column - 1].top_right() or
(self.board[row][column].bottom and self.board[row][column - 1].bottom) or
(self.board[row][column].left and self.board[row + 1][column].left)
):
yield row + 1, column - 1
# Bottom-Right
if column < self.columns - 1 and not (
self.board[row][column].bottom_right() or
self.board[row + 1][column + 1].top_left() or
(self.board[row][column].bottom and self.board[row][column + 1].bottom) or
(self.board[row][column].right and self.board[row + 1][column].right)
):
yield row + 1, column + 1
# Left
if column > 0 and not self.board[row][column].left:
yield row, column - 1
# Right
if column < self.columns - 1 and not self.board[row][column].right:
yield row, column + 1
@staticmethod
def non_diagonal_distance(source, destination):
return abs(source[0] - destination[0]) + abs(source[1] - destination[1])
def static_evaluation(self):
evaluation = 0
for pawn in self.player_1_pawns:
pawn_distance_1 = self.non_diagonal_distance(pawn, self.player_2_start[0])
pawn_distance_2 = self.non_diagonal_distance(pawn, self.player_2_start[1])
if pawn_distance_1 == 0 or pawn_distance_2 == 0:
return inf
evaluation += 1 / pawn_distance_1 + 1 / pawn_distance_2
for pawn in self.player_2_pawns:
pawn_distance_1 = self.non_diagonal_distance(pawn, self.player_1_start[0])
pawn_distance_2 = self.non_diagonal_distance(pawn, self.player_1_start[1])
if pawn_distance_1 == 0 or pawn_distance_2 == 0:
return -inf
evaluation -= 1 / pawn_distance_1 + 1 / pawn_distance_2
return evaluation
@staticmethod
def matrix_index_to_board_index(index):
return chr(ord('0') + index + 1) if index < 9 else chr(ord('A') - 9 + index)
@staticmethod
def board_index_to_matrix_index(char):
return ord(char) - ord('0') - 1 if ord('0') <= ord(char) <= ord('9') else ord(char) - ord('A') + 9
# Helper method for printing a message if a condition is true
@staticmethod
def conditional_print(message, condition):
if condition:
print(message)
class BoardSquare:
def __init__(self, center=" ", top=False, left=False, right=False, bottom=False):
self.center = center
self.top = top
self.left = left
self.right = right
self.bottom = bottom
# Variable for remembering the starting position of the first or second player
self.starting = None
def set_start(self, player):
self.center = player
self.starting = player
def top_left(self):
return self.top and self.left
def top_right(self):
return self.top and self.right
def bottom_left(self):
return self.bottom and self.left
def bottom_right(self):
return self.bottom and self.right
``` |
{
"source": "Jovanny02/GradingGUI",
"score": 2
} |
#### File: Jovanny02/GradingGUI/main.py
```python
from tkinter import *
from tkinter import ttk
from tkinter.font import Font
from tkinter.scrolledtext import ScrolledText
from tkinter.ttk import Combobox, Style
import subprocess
from subprocess import PIPE, Popen, STDOUT
from OpenFiles import *
from functools import partial
from helpers import *
__author__ = "<NAME>"
class Application(Tk):
def __init__(self):
super().__init__()
self.minsize(1000, 600)
# init attributes
self.leftFrame = None
self.rightFrame = None
self.uploadFileFrame = None
self.generateNewRunFrame = None
self.subProcess = None
self.terminalScrolledText = None
self.exitFlag = False
self.runThread = None
self.runAllThread = None
self.currentProject = None
self.isRunning = False
self.timer = 0
self.currentStudent = None
self.isGenerating = False
self.style = None
self.selectedTestbenches = []
# Create a style
self.style = Style(self)
# Import the tcl file
self.tk.call('source', 'azure-dark.tcl')
self.tk.call('source', 'azure.tcl')
# Set the theme with the theme_use method
self.style.theme_use('azure')
self.createLayout()
self.createUploadFiles()
self.createNewProjectFrame()
self.createScrollWindow()
self.createRunProjectFrame()
# Load Runs
loadProjects(self)
style = ttk.Style()
style.configure("Red.TLabel", foreground="red")
style2 = ttk.Style()
style2.configure("Green.TLabel", foreground="green")
self.updateTimer()
def createLayout(self):
# create two columns
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=5)
self.grid_rowconfigure(0, weight=1)
# create left column
self.leftFrame = ttk.Frame(self)
self.leftFrame.grid(row=0, column=0, sticky="NSWE")
# create left column subgrid
self.leftFrame.grid_columnconfigure(0, weight=1)
self.leftFrame.grid_rowconfigure(0, weight=1)
self.leftFrame.grid_rowconfigure(1, weight=3)
# create right column
self.rightFrame = ttk.Frame(self)
self.rightFrame.grid(row=0, column=1, sticky="NSWE")
# create right column subgrid
self.rightFrame.grid_columnconfigure(0, weight=1)
self.rightFrame.grid_rowconfigure(0, weight=1)
self.rightFrame.grid_rowconfigure(1, weight=6)
def createUploadFiles(self):
self.uploadFileFrame = ttk.Frame(self.leftFrame, borderwidth=1,
relief='solid') # highlightthickness=1, highlightbackground="black")
self.uploadFileFrame.grid(row=0, column=0, sticky="NSEW", padx=5, pady=5)
# configure columns
self.uploadFileFrame.grid_columnconfigure(0, weight=1)
self.uploadFileFrame.grid_columnconfigure(1, weight=1, minsize=100)
self.uploadFileFrame.grid_columnconfigure(2, weight=1, minsize=100)
self.uploadFileFrame.grid_columnconfigure(3, weight=4)
# Titlettk.Label
uploadFileTitle = ttk.Label(self.uploadFileFrame, text="Copy Files to GUI Directory", font=("TkDefaultFont", 18))
uploadFileTitle.grid(row=0, column=0, columnspan=3, sticky="NSEW", padx=5, pady=(5, 0))
# Title sep
sep = ttk.Separator(self.uploadFileFrame, orient='horizontal')
sep.grid(row=1, column=0, columnspan=3, padx=5, pady=(0, 5), sticky='NEW')
# TCL upload
self.tclUploadVar = StringVar(self.uploadFileFrame)
# tclLabel = ttk.Label(self.uploadFileFrame, text="TCL Files: ")
# tclLabel.grid(row=2, column=0, sticky=W)
self.tclStatusLabel = ttk.Label(self.uploadFileFrame, textvariable=self.tclUploadVar)
self.tclStatusLabel.grid(row=2, column=1, columnspan=3, sticky=W)
tclButton = ttk.Button(self.uploadFileFrame, text="Copy TCL Files", style='AccentButton',
command=partial(uploadTCLFile, self))
tclButton.grid(row=2, column=0, sticky=NSEW, padx=5, pady=5)
# tclButton.pack(side=TOP, fill=BOTH, expand=FALSE, padx=(0, 5), pady=5)
# Testbench upload
self.tbUploadVar = StringVar(self.uploadFileFrame)
# testBenchLabel = ttk.Label(self.uploadFileFrame, text="VHDL Test Benches: ").grid(row=3, column=0, sticky=W)
self.tbUploadLabel = ttk.Label(self.uploadFileFrame, textvariable=self.tbUploadVar)
self.tbUploadLabel.grid(row=3, column=1, columnspan=3, sticky=W)
testBenchButton = ttk.Button(self.uploadFileFrame, style='AccentButton', text="Copy Test Benches",
command=partial(uploadVHDFile, self))
testBenchButton.grid(row=3, column=0, sticky=NSEW, padx=5, pady=5)
# testBenchButton.pack(side=TOP, fill=BOTH, expand=FALSE, padx=(0, 5), pady=5)
# Student List upload
self.textUploadVar = StringVar(self.uploadFileFrame)
# studentListLabel = ttk.Label(self.uploadFileFrame, text="Student Lists: ").grid(row=4, column=0, sticky=W)
self.studentListLabel = ttk.Label(self.uploadFileFrame, textvariable=self.textUploadVar)
self.studentListLabel.grid(row=4, column=1, columnspan=3, sticky=W)
studentListButton = ttk.Button(self.uploadFileFrame, text="Copy Student Lists", style='AccentButton',
command=partial(uploadTextFile, self))
studentListButton.grid(row=4, column=0, sticky=NSEW, padx=5, pady=5)
#studentListButton.pack(side=TOP, fill=BOTH, expand=FALSE, padx=(0, 5), pady=5)
def createNewProjectFrame(self):
self.generateNewRunFrame = ttk.Frame(self.leftFrame, borderwidth=1,
relief='solid') # highlightthickness=1, highlightbackground="black")
self.generateNewRunFrame.grid(row=1, column=0, sticky="NSEW", padx=5, pady=5)
# Titlettk.Label
uploadFileTitle = ttk.Label(self.generateNewRunFrame, text="Create Grading Project", font=("TkDefaultFont", 18))
uploadFileTitle.grid(row=0, column=0, columnspan=2, padx=5, pady=(5, 0), sticky=W)
# Title sep
sep = ttk.Separator(self.generateNewRunFrame, orient='horizontal')
sep.grid(row=1, column=0, columnspan=2, padx=5, pady=(0, 5), sticky='NEW')
# choose TCL
self.tclVar = StringVar(self.generateNewRunFrame)
self.chooseTclLabel = ttk.Label(self.generateNewRunFrame, text="")
self.chooseTclLabel.grid(row=2, column=1, sticky=W)
tclButton = ttk.Button(self.generateNewRunFrame, style='AccentButton', text="Select TCL Script",
command=partial(openTCLFile, self))
tclButton.grid(row=2, column=0, sticky=EW, padx=5, pady=5)
# choose student list
self.studentListVar = StringVar(self.generateNewRunFrame)
self.chooseStudentsLabel = ttk.Label(self.generateNewRunFrame, text="")
self.chooseStudentsLabel.grid(row=3, column=1, sticky=W)
tclButton = ttk.Button(self.generateNewRunFrame, text="Select Student List", style='AccentButton',
command=partial(openStudentListFile, self))
tclButton.grid(row=3, column=0, sticky=EW, padx=5, pady=5)
# ZIP file upload
self.zipVar = StringVar(self.generateNewRunFrame)
self.zipLabel = ttk.Label(self.generateNewRunFrame, text="")
self.zipLabel.grid(row=4, column=1, sticky=W)
zipButton = ttk.Button(self.generateNewRunFrame, style='AccentButton', text="Choose Zip File",
command=partial(uploadZipFile, self))
zipButton.grid(row=4, column=0, sticky=EW, padx=5, pady=5)
self.deleteZipVal = IntVar()
self.zipCheckBox = ttk.Checkbutton(self.generateNewRunFrame, variable=self.deleteZipVal, style='Switch',
text="delete zip")
self.zipCheckBox.grid(row=4, column=2, sticky="NSEW", padx=5, pady=5)
# testbench section
tbButton = ttk.Button(self.generateNewRunFrame, style='AccentButton', text="Choose Testbenches",
command=partial(chooseVHDFiles, self))
tbButton.grid(row=5, column=0, sticky=NW, padx=5, pady=5)
self.tbSelectLabel = ttk.Label(self.generateNewRunFrame, text="")
self.tbSelectLabel.grid(row=5, column=1, sticky=NSEW)
# choose generated name
generationLabel = ttk.Label(self.generateNewRunFrame, text="Grading Project Name: ").grid(row=7, column=0,
sticky=W)
self.generationEntry = ttk.Entry(self.generateNewRunFrame)
self.generationEntry.grid(row=7, column=1, sticky=W, padx=5, pady=5)
# choose generated name
generateButton = ttk.Button(self.generateNewRunFrame, text="Create", style='AccentButton',
command=partial(generateProject, self))
generateButton.grid(row=7, column=2, sticky="NSEW", padx=5, pady=5)
# error label
self.errorLabel = Label(self.generateNewRunFrame, text="", fg="red")
self.errorLabel.grid(row=8, column=0, columnspan=3, sticky=W)
def createRunProjectFrame(self):
runFrame = ttk.Frame(self.rightFrame, borderwidth=1,
relief='solid') # highlightthickness=1, highlightbackground="black")
runFrame.grid(row=0, column=0, columnspan=2, sticky="NSEW", padx=5, pady=5)
runTitle = ttk.Label(runFrame, text="Load Grading Project", font=("TkDefaultFont", 18))
runTitle.grid(row=0, column=0, columnspan=2, padx=5, pady=(5, 0), sticky=W)
# Title sep
sep = ttk.Separator(runFrame, orient='horizontal')
sep.grid(row=1, column=0, columnspan=2, padx=5, pady=(0, 5), sticky='NEW')
# select run comboBox and button
self.projectComboBox = Combobox(runFrame, state='readonly')
self.projectComboBox.grid(row=2, column=0, sticky="NSEW", padx=5, pady=5)
self.projectComboBox.set('Choose Grading Project')
loadButton = ttk.Button(runFrame, text="Load Project", style='AccentButton',
command=partial(loadStudents, self))
loadButton.grid(row=2, column=1, sticky="NSEW", padx=5, pady=5)
deleteButton = ttk.Button(runFrame, text="Delete Project", style='AccentButton',
command=partial(deleteProject, self))
deleteButton.grid(row=2, column=2, sticky="NSEW", padx=5, pady=5)
self.themeVar = IntVar()
self.themeCheckBox = ttk.Checkbutton(runFrame, variable=self.themeVar, style='Switch',
text="Dark Mode", command=self.toggleTheme)
self.themeCheckBox.grid(row=2, column=3, sticky="W", padx=5, pady=5)
# select student comboBox
self.studentComboBox = Combobox(runFrame, state='readonly')
self.studentComboBox.grid(row=3, column=0, sticky="EW", padx=5, pady=5)
self.studentComboBox.set('Choose Student')
buttonFrame = ttk.Frame(runFrame)
buttonFrame.grid(row=3, column=1, columnspan=5, sticky="NSEW", padx=5, pady=5)
loadButton = ttk.Button(buttonFrame, style='AccentButton', text="Load Result",
command=partial(loadStudentResult, self))
loadButton.pack(side=LEFT, fill=Y, expand=FALSE, padx=(0, 5), pady=5)
previousButton = ttk.Button(buttonFrame, style='AccentButton', text="Load Previous Result",
command=partial(changeComboBox, self.studentComboBox, -1, self))
previousButton.pack(side=LEFT, fill=Y, expand=FALSE, padx=(5, 5), pady=5)
nextButton = ttk.Button(buttonFrame, style='AccentButton', text="Load Next Result",
command=partial(changeComboBox,self.studentComboBox, 1, self))
nextButton.pack(side=LEFT, fill=Y, expand=FALSE, padx=(5, 5), pady=5)
return
def createScrollWindow(self):
# create scrollWindow
terminalWindowFrame = ttk.Frame(self.rightFrame, borderwidth=1,
relief='solid')
terminalWindowFrame.grid(row=1, column=0, sticky="NSEW", padx=5, pady=5)
self.terminalScrolledText = ScrolledText(terminalWindowFrame, highlightthickness=1, highlightbackground="black",
font=("TkDefaultFont", 9))
self.terminalScrolledText.pack(side=TOP, fill=BOTH, expand=TRUE)
self.terminalScrolledText.pack_propagate(False)
self.windowStatusVar = StringVar(terminalWindowFrame)
self.windowStatusLabel = ttk.Label(terminalWindowFrame, textvariable=self.windowStatusVar, font=("TkDefaultFont", 9))
self.windowStatusLabel.pack(side=BOTTOM)
# Font change for terminal window
file = os.getcwd() + '\\images\\add.png'
try:
self.addIcon = PhotoImage(file=file)
self.increase = ttk.Button(self.terminalScrolledText, style='AccentButton', image=self.addIcon,
command=partial(changeTerminalFont, self.windowStatusLabel, self.terminalScrolledText, 1))
self.increase.place(relx=1.0, rely=0.0, x=-2, y=2, anchor="ne")
except:
self.increase = ttk.Button(self.terminalScrolledText, style='AccentButton', text='+',
command=partial(changeTerminalFont, self.windowStatusLabel, self.terminalScrolledText, 1))
self.increase.place(relx=1.0, rely=0.0, x=-2, y=2, anchor="ne", height=40, width=40)
file = os.getcwd() + '\\images\\subtract.png'
try:
self.subIcon = PhotoImage(file=file)
self.increase = ttk.Button(self.terminalScrolledText, style='AccentButton', image=self.subIcon,
command=partial(changeTerminalFont, self.windowStatusLabel, self.terminalScrolledText, -1))
self.increase.place(relx=1.0, rely=0.0, x=-45, y=2, anchor="ne")
except:
self.increase = ttk.Button(self.terminalScrolledText, style='AccentButton', text='-',
command=partial(changeTerminalFont, self.windowStatusLabel, self.terminalScrolledText, -1))
self.increase.place(relx=1.0, rely=0.0, x=-45, y=2, anchor="ne", height=40, width=40)
# Terminal controls
terminalControlFrame = ttk.Frame(self.rightFrame, borderwidth=1,
relief='solid') # highlightthickness=1, highlightbackground="black")
terminalControlFrame.grid(row=2, column=0, sticky="NSEW", padx=5, pady=5)
runButton = ttk.Button(terminalControlFrame, style='AccentButton', text="Run",
command=partial(runStudent, self))
runButton.pack(side=LEFT, fill=BOTH, padx=5, pady=5)
runNextButton = ttk.Button(terminalControlFrame, style='AccentButton', text="Run Next",
command=partial(runNextStudent, self))
runNextButton.pack(side=LEFT, fill=BOTH, padx=5, pady=5)
runAllButton = ttk.Button(terminalControlFrame, style='AccentButton', text="Run All",
command=partial(runAllStudents, self))
runAllButton.pack(side=LEFT, fill=BOTH, padx=5, pady=5)
clearWindowButton = ttk.Button(terminalControlFrame, style='AccentButton', text="Clear Window",
command=partial(clearWindowText, self.terminalScrolledText))
clearWindowButton.pack(side=LEFT, fill=BOTH, padx=5, pady=5)
quitWindowButton = ttk.Button(terminalControlFrame, style='AccentButton', text="Stop",
command=partial(stopRunning, self))
quitWindowButton.pack(side=LEFT, fill=BOTH, padx=5, pady=5)
self.guiCheckBoxVal = IntVar()
self.guiCheckBox = ttk.Checkbutton(terminalControlFrame, variable=self.guiCheckBoxVal, style='Switch',
text="gui")
self.guiCheckBox.pack(side=LEFT, fill=BOTH, padx=5, pady=5)
self.parseOutputVar = IntVar()
self.parseOutputCheckBox = ttk.Checkbutton(terminalControlFrame, variable=self.parseOutputVar, style='Switch',
text="Result Only")
self.parseOutputCheckBox.pack(side=LEFT, fill=BOTH, padx=5, pady=5)
def clearSubprocess(self):
self.subProcess = None
def assignSubprocess(self, cmd):
self.subProcess = None
self.subProcess = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=STDOUT, text=True, universal_newlines=True,
creationflags=subprocess.CREATE_NO_WINDOW)
return self.subProcess
def setCurrStudent(self, student):
self.currentStudent = student
def getTimer(self):
return self.timer
def setExitFlag(self, flag):
self.exitFlag = flag
def setIsGenerating(self, val):
self.isGenerating = val
def getExitFlag(self):
return self.exitFlag
def clearTimer(self):
self.timer = 0
def toggleTheme(self):
style = self.style.theme_use()
if self.style.theme_use() == 'azure':
self.style.theme_use('azure-dark')
# set window background
self.terminalScrolledText["bg"] = "#333333"
self.terminalScrolledText["fg"] = "#ffffff"
# set label background
self.errorLabel["bg"] = "#333333"
# only toggle text color if it is black or white
if self.errorLabel["fg"] == "#000000":
self.errorLabel["fg"] = "#ffffff"
return
# set ttk element backgrounds
self.style.theme_use('azure')
# set other backgrounds that cant be set using style
# set window background
self.terminalScrolledText["bg"] = "#ffffff"
self.terminalScrolledText["fg"] = "#000000"
# set label background
self.errorLabel["bg"] = "#ffffff"
if self.errorLabel["fg"] == "#ffffff":
self.errorLabel["fg"] = "#000000"
def updateTimer(self):
if not self.isRunning and not self.isGenerating:
self.after(1000, self.updateTimer)
return
if self.isRunning:
self.windowStatusVar.set(
f'''Running {self.currentStudent}. Time Elapsed: {getTimerText(self.timer, operator.floordiv)}:{getTimerText(self.timer, operator.mod)}''')
# update time
self.timer += 1
self.after(1000, self.updateTimer)
def setIsRunning(self, val):
self.isRunning = val
def handleClosing():
app.exitFlag = True
if app.subProcess is not None:
app.subProcess.terminate()
app.destroy()
# Create directories if they dont exist
if not os.path.exists(os.getcwd() + os.path.join("\\lab_tcl")):
os.makedirs(os.getcwd() + os.path.join("\\lab_tcl"))
if not os.path.exists(os.getcwd() + os.path.join("\\testbenches")):
os.makedirs(os.getcwd() + os.path.join("\\testbenches"))
if not os.path.exists(os.getcwd() + os.path.join("\\studentlists")):
os.makedirs(os.getcwd() + os.path.join("\\studentlists"))
if not os.path.exists(os.getcwd() + os.path.join("\\grading_projects")):
os.makedirs(os.getcwd() + os.path.join("\\grading_projects"))
app = Application()
app.title("Digital Design Grading GUI")
app.iconbitmap(os.getcwd() + os.path.join("\\images\\integrated-circuit.ico"))
app.protocol("WM_DELETE_WINDOW", handleClosing)
app.mainloop()
``` |
{
"source": "jovannypcg/python_scheduler",
"score": 3
} |
#### File: jovannypcg/python_scheduler/scraper.py
```python
import requests
from BeautifulSoup import BeautifulSoup
from celery import Celery
HOST = 'amqp://guest@localhost'
QUEUE = 'celery_pages'
app = Celery(QUEUE, broker=HOST)
@app.task
def scrape(url):
print "-> Starting: [%s]" % url
r = requests.get(url)
soup = BeautifulSoup(r.text)
print "-> Extracted: %s" % soup.html.head.title
print "-> Done: [%s]" % url
```
#### File: jovannypcg/python_scheduler/with_schedule_library.py
```python
import pika # RabbitMQ client library
import schedule # Schedules processes
import time # To use the sleep method
# List of urls to send via RabbitMQ
URLS = ['http://ebay.to/1G163Lh',
'http://www.google.com.mx',
'http://localhost:8080']
# The host in which RabbitMQ is running
HOST = 'localhost'
# The name of the queue
QUEUE = 'urls'
print 'Connecting to RabbitMQ...\n'
# Starting the connection
# Creation of the channel based on the connection
# The queue is declared
connection = pika.BlockingConnection(pika.ConnectionParameters(host=HOST))
channel = connection.channel()
channel.queue_declare(queue=QUEUE)
""" Sends the list of urls to RabbitMQ """
def produce():
for url in URLS:
print '* Pushed: [%s]' % url
channel.publish(exchange='',
routing_key=QUEUE,
body=url)
# Every 2 seconds the method 'produce' is executed
schedule.every(2).seconds.do(produce)
# Runs the scheduler as a daemon
while True:
schedule.run_pending()
time.sleep(1)
# The connection is closed
connection.close()
``` |
{
"source": "Jovansam/ConsumptionSaving",
"score": 4
} |
#### File: ConsumptionSaving/consav/grids.py
```python
import numpy as np
from numba import njit
def nonlinspace(x_min,x_max,n,phi):
""" like np.linspace. but with unequal spacing
Args:
x_min (double): minimum value
x_max (double): maximum value
n (int): number of points
phi (double): phi = 1 -> eqaul spacing, phi up -> more points closer to minimum
Returns:
y (list): grid with unequal spacing
"""
assert x_max > x_min
assert n >= 2
assert phi >= 1
# 1. recursion
y = np.empty(n)
y[0] = x_min
for i in range(1, n):
y[i] = y[i-1] + (x_max-y[i-1]) / (n-i)**phi
# 3. assert increaing
assert np.all(np.diff(y) > 0)
return y
@njit
def nonlinspace_jit(x_min,x_max,n,phi):
""" like nonlinspace, but can be used in numba """
y = np.zeros(n)
y[0] = x_min
for i in range(1,n):
y[i] = y[i-1] + (x_max-y[i-1]) / (n-i)**phi
return y
def equilogspace(x_min,x_max,n):
""" like np.linspace. but (close to) equidistant in logs
Args:
x_min (double): maximum value
x_max (double): minimum value
n (int): number of points
Returns:
y (list): grid with unequal spacing
"""
pivot = np.abs(x_min) + 0.25
y = np.geomspace(x_min + pivot, x_max + pivot, n) - pivot
y[0] = x_min # make sure *exactly* equal to x_min
return y
```
#### File: ConsumptionSaving/consav/quadrature.py
```python
import math
import numpy as np
def gauss_hermite(n):
""" gauss-hermite nodes
Args:
n (int): number of points
Returns:
x (numpy.ndarray): nodes of length n
w (numpy.ndarray): weights of length n
"""
# a. calculations
i = np.arange(1,n)
a = np.sqrt(i/2)
CM = np.diag(a,1) + np.diag(a,-1)
L,V = np.linalg.eig(CM)
I = L.argsort()
V = V[:,I].T
# b. nodes and weights
x = L[I]
w = np.sqrt(math.pi)*V[:,0]**2
return x,w
def normal_gauss_hermite(sigma,n=7,mu=None):
""" normal gauss-hermite nodes
Args:
sigma (double): standard deviation
n (int): number of points
mu (double,optinal): mean (if None, then mean zero)
Returns:
x (numpy.ndarray): nodes of length n
w (numpy.ndarray): weights of length n
"""
if sigma == 0.0 or n == 1:
w = np.ones(n)/n
if mu is None:
x = np.zeroes(n)
else:
x = np.zeroes(n)+mu
return x,w
# a. GaussHermite
x,w = gauss_hermite(n)
x *= np.sqrt(2)*sigma
w /= np.sqrt(math.pi)
# b. adjust mean
if mu is None:
x = x
else:
x = x + mu
return x,w
def log_normal_gauss_hermite(sigma,n=7,mu=None):
""" log-normal gauss-hermite nodes
Args:
sigma (double): standard deviation
n (int): number of points
mu (double,optinal): mean (if None, the mean one)
Returns:
x (numpy.ndarray): nodes of length n
w (numpy.ndarray): weights of length n
"""
if sigma == 0.0 or n == 1:
w = np.ones(n)/n
if mu is None:
x = np.exp(np.zeros(n))
else:
x = np.exp(np.zeros(n)+ np.log(mu))
return x,w
# a. GaussHermite
x,w = gauss_hermite(n)
x *= np.sqrt(2)*sigma
w /= np.sqrt(math.pi)
# b. adjust mean
if mu is None:
x = np.exp(x-0.5*sigma**2)
else:
x = np.exp(x+np.log(mu)-0.5*sigma**2)
return x,w
def create_PT_shocks(sigma_psi,Npsi,sigma_xi,Nxi,pi=0,mu=None):
""" log-normal gauss-hermite nodes for permanent transitory model
Args:
sigma_psi (double): standard deviation of permanent shock
Npsi (int): number of points for permanent shock
sigma_xi (double): standard deviation of transitory shock
Nxi (int): number of points for transitory shock
pi (double): probability of low income shock
mu (double): value of low income shock
Returns:
psi (numpy.ndarray): nodes for permanent shock of length Npsi*Nxi+1
psi_w (numpy.ndarray): weights for permanent shock of length Npsi*Nxi+1
xi (numpy.ndarray): nodes for transitory shock of length Npsi*Nxi+1
xi_w (numpy.ndarray): weights for transitory shock of length Npsi*Nxi+1
Nshocks (int): number of nodes = Npsi*Nxi+1
"""
# a. gauss hermite
psi, psi_w = log_normal_gauss_hermite(sigma_psi,Npsi)
xi, xi_w = log_normal_gauss_hermite(sigma_xi,Nxi)
# b. add low inncome shock
if pi > 0:
# i. weights
xi_w *= (1.0-pi)
xi_w = np.insert(xi_w,0,pi)
# ii. values
xi = (xi-mu*pi)/(1.0-pi)
xi = np.insert(xi,0,mu)
# c. tensor product
psi,xi = np.meshgrid(psi,xi,indexing='ij')
psi_w,xi_w = np.meshgrid(psi_w,xi_w,indexing='ij')
return psi.ravel(),psi_w.ravel(),xi.ravel(),xi_w.ravel(),psi.size
``` |
{
"source": "jovanshernandez/basic-flask-template",
"score": 2
} |
#### File: jovanshernandez/basic-flask-template/app.py
```python
from flask import Flask, render_template
app = Flask(__name__)
app_data = {
"name": "BBI RND Template for a Flask Web App",
"description": "A basic Flask app using bootstrap for layout",
"author": "BBI DevOps",
"html_title": "A template for a Flask Web App",
"project_name": "RND Web App Demo",
"keywords": "flask, webapp, template, basic"
}
@app.route('/')
def index():
return render_template('index.html', app_data=app_data)
@app.route('/b1')
def b1():
return render_template('b1.html', app_data=app_data)
@app.route('/b2')
def b2():
return render_template('b2.html', app_data=app_data)
@app.route('/b3')
def b3():
return render_template('b3.html', app_data=app_data)
# ------- PRODUCTION CONFIG -------
#if __name__ == '__main__':
# app.run()
# ------- DEVELOPMENT CONFIG -------
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "JovanWang/AdaptiveStreamSpTTMFramework",
"score": 3
} |
#### File: dynamic/pytool/generalmatrix.py
```python
import math
import random
import sys
import numpy
# 生成稀疏度不同,规模不同的随机矩阵
rows = 100
cols = 100
degree = 0.8
def generalmat(rows, cols, degree):
nonzeros_num = int(rows*cols*degree)
index_array = random.sample(range(rows*cols), nonzeros_num)
index_array.sort()
# write file
output = 'exp_gemat/m%d_%d_%.4f.tns' %(rows,cols,degree)
f = open(output, 'w')
f.write('%d %d %d' % (rows,cols,nonzeros_num))
f.write('\n')
for i in range(nonzeros_num):
value = random.random()
f.write('%d %d %f \n' % (int(index_array[i]/cols)+1, index_array[i]%cols+1, value))
mat_num = 1000
for i in range(mat_num):
degree_list = [random.uniform(0.001,0.01), random.uniform(0.01,0.1), random.uniform(0.1,0.3)]
size_list = [random.randint(50,150),random.randint(150,300),random.randint(300,500),random.randint(500,800),random.randint(800,1000),random.randint(1000,1500),random.randint(1500,2500),random.randint(2500,5000)]
rows = size_list[i%len(size_list)]
cols = random.choice(size_list)
degree = random.choice(degree_list)
generalmat(rows, cols, degree)
nonzeros_num = int(rows*cols*degree)
print("%d(%d %d %d) general success!" % (i,rows,cols,nonzeros_num))
```
#### File: partiExp/tools/generate_tensor.py
```python
import math
import random
import sys
if sys.version_info < (3,):
range = xrange
def randround(x):
int_part = math.floor(x)
frac_part = x - int_part
return int(math.ceil(x) if random.random() < frac_part else int_part)
def human_size(nbytes):
if nbytes < 1024000:
if nbytes < 1000:
return '%d bytes' % nbytes
else:
return '%.1f KiB' % (nbytes / 1024.0)
else:
if nbytes < 1048576000:
return '%.1f MiB' % (nbytes / 1048576.0)
else:
return '%.1f GiB' % (nbytes / 1073741824.0)
def main(argv):
if len(argv) < 3:
print('Usage: %s output.tns [nonzero_rate%%]mode_dimension ...' % argv[0])
print()
# print('Example: %s output.tns 256 50%%1024 2%%4096' % argv[0])
print('Example: %s output.tns 50%%1024 2%%4096' % argv[0])
print()
print('Each non-zero element will be a gaussian random number (mu=0, sigma=1).')
print()
return 1
output = argv[1]
rates = []
dims = []
for i in argv[2:]:
if '%' in i:
rate, dim = i.split('%', 1)
rates.append(float(rate) * 0.01)
dims.append(int(dim))
else:
rates.append(1)
dims.append(int(i))
ndims = len(dims)
nnz = 1
for i in range(ndims):
nnz *= rates[i] * dims[i]
print('%d non-zero elements estimated.' % round(nnz))
written = 0
percent = 0
f = open(output, 'w')
f.write('%d\n' % ndims)
f.write('\t'.join(map(str, dims)))
f.write('\n')
inds = [None] * ndims
ptrs = [0] * ndims
for i in range(ndims):
if rates[i] == 1:
inds[i] = range(dims[i])
else:
inds[i] = random.sample(range(dims[i]), randround(rates[i] * dims[i]))
inds[i].sort()
while ptrs[0] != len(inds[0]):
for i in range(ndims):
f.write('%d\t' % (inds[i][ptrs[i]]+1))
f.write('% .16f\n' % random.gauss(0, 1))
ptrs[ndims-1] += 1
written += 1
if nnz != 0:
new_percent = int(written * 100.0 / nnz)
if new_percent < 100 and new_percent != percent:
percent = new_percent
print('%3d%% completed, %d generated, %s written.' % (percent, written, human_size(f.tell())), end='\r', flush=True)
for i in range(ndims-1, 0, -1):
if ptrs[i] == len(inds[i]):
if rates[i] == 1:
inds[i] = range(dims[i])
else:
inds[i] = random.sample(range(dims[i]), randround(rates[i] * dims[i]))
inds[i].sort()
ptrs[i] = 0
ptrs[i-1] += 1
print('100%% completed, %d generated, %s written.' % (written, human_size(f.tell())))
f.close()
print('Successfully written into %s.' % output)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
```
#### File: partiExp/tools/transset2tensor.py
```python
import numpy as np
import pandas as pd
from plyfile import PlyData,PlyElement
# 读取点云
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x,y,z in pc])
return pc_array
# 读取点云数据集
def read_dir_ply(dirname):
pc_list = []
for i in range(0,336+24,24):
filename = dirname + "happyStandRight_%d.ply" % i
""" read XYZ point cloud from filename PLY dirs """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_list.extend([[x, y, z] for x,y,z in pc])
pc_array = np.array(pc_list)
return pc_array
# 读取点云ply文件
# pc_array = read_ply("../tensors/oridataset/happy_stand/happyStandRight_0.ply")
pc_array = read_dir_ply("../tensors/oridataset/happy_stand/")
print(pc_array.shape)
df = pd.DataFrame(pc_array,columns=['x', 'y','z'])
df['x'] = df['x'] - df['x'].min()
df['y'] = df['y'] - df['y'].min()
df['z'] = df['z'] - df['z'].min()
# 用来划分维度和精度679,保证以1为index,防止后面代码出错
df = df*50000+1
df = df.astype(int)
print(df.describe())
print(df.dtypes)
# print(df.min())
nnz = df["x"].count()
dim_num = df.shape[1]
dims_list = df.max().values
arr = df.values-1
# print(df.values-1)
with open('../tensors/ply0336_%d_%d.tns'%(dims_list[-1],nnz), 'w') as f:
f.write('%d\n' % (dim_num))
for i in range(dim_num):
f.write('%d ' % (dims_list[i]))
f.write('\n')
for i in range(nnz):
for j in range(dim_num):
f.write('%d ' % arr[i, j])
f.write('%f\n' % (1.0))
# 读取CSV
# df = pd.read_csv("../tensors/oridataset/speeddata_Aug.csv", delimiter=',')
# 开始数据装换成张量的形式
# nnz = df["speed"].count()
# dim_num = df.shape[1]-1
# dims_list = df.max().values
# arr = df.values-1
# # print(df.values-1)
# with open('../tensors/speedAug_%d_%d.tns'%(dims_list[-2],nnz), 'w') as f:
# f.write('%d\n' % (dim_num))
# for i in range(dim_num):
# f.write('%d ' % (dims_list[i]))
# f.write('\n')
# for i in range(nnz):
# for j in range(dim_num):
# f.write('%d ' % arr[i, j])
# f.write('%f\n' % (arr[i, dim_num]))
```
#### File: AdaptiveStreamSpTTMFramework/pygExp/graphclasstestgpu.py
```python
import os.path as osp
import torch
import torch.nn.functional as F
# from torch.nn import Linear
from torch_geometric.data import DataLoader
from torch_geometric.datasets import Matformattestgpu
# from torch_geometric.data import Matformattestgpu
from torch_geometric import transforms as T
import numpy as np
import torch
from torch.nn import Sequential as Seq, Linear as Lin, ReLU
from torch_geometric.nn import GraphConv, TopKPooling, SAGEConv
from torch_geometric.nn import global_add_pool
from torch_geometric.nn import TopKPooling
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
from torch_scatter import scatter_mean
import time
embed_dim = 10
batch_size = 1
num_epochs = 10000
lr=0.00001
path = osp.join(osp.dirname(osp.realpath(__file__)), '.', 'data', 'Matformattestgpu')
train_dataset = Matformattestgpu(path,train_type=0)
# val_dataset = Matformattestgpu(path,train_type=1)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size)
# val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size)
# dataset_val = Matformattestgpu(path,train_type=1)
# data = dataset[0]
# print(data.x)
# print(dataset.num_classes)
# 额外信息文件读取并保存 xelist=[]
xelist = np.loadtxt('./data/Matformattestgpu/raw/matformattest_exinfo.txt')
def getexinfo(graph_no_list):
# print("graph_no_list:",graph_no_list)
result = None
for graph_no in graph_no_list:
result_temp = xelist[np.where(xelist[:,0] == graph_no)]
if result is None:
result = result_temp[:,1:]
else:
# result = result + result_temp
result = np.concatenate((result,result_temp[:,1:]),axis=0)
# result.append(result_temp[:,1:].astype(np.float32))
# print("result:",result)
result = torch.from_numpy(np.array(result).astype(np.float32))
return result.to(device)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# 卷积神经网络
self.conv1 = GraphConv(train_dataset.num_features-2, 256)
# self.pool1 = TopKPooling(256, ratio=0.8)
self.conv2 = GraphConv(256, 256)
# self.pool2 = TopKPooling(256, ratio=0.8)
# self.conv3 = GraphConv(128, 128)
# self.pool3 = TopKPooling(128, ratio=0.8)
# 加上2遍conv再加上额外信息
self.lin1 = torch.nn.Linear(256*2+3, 128)
# self.lin1 = torch.nn.Linear(128*2, 64)
# 只使用大图的属性做MLP-NN
# self.lin1 = torch.nn.Linear(10, 64)
self.lin2 = torch.nn.Linear(128, 32)
self.lin3 = torch.nn.Linear(32, train_dataset.num_classes)
# 添加额外信息8个
def forward(self, data, exinfo):
# 两层卷积层
x, edge_index, batch = data.x[:,2:4], data.edge_index, data.batch
x1 = F.relu(self.conv1(x, edge_index))
x1 = F.dropout(x1, p=0.2, training=self.training)
x2 = F.relu(self.conv2(x1, edge_index))
x2 = F.dropout(x2, p=0.2, training=self.training)
x = torch.cat([x1, x2], dim=-1)
x = gmp(x, batch)
xe = exinfo
# print("xe:",xe)
# print("x:",x)
x = torch.cat([x, xe], dim=-1)
# x = torch.cat([x], dim=-1)
# 直接MLP
# x = exinfo
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.2, training=self.training)
x = F.relu(self.lin2(x))
x = F.log_softmax(self.lin3(x), dim=-1)
return x
@torch.no_grad()
def graphtest(loader):
model.eval()
total_correct = total_examples = 0
total_Traincorrect = total_Trainexamples = 0
# time_start = time.time()
for data in loader:
# 额外信息
exinfo = getexinfo(list(set(data.x[:,0].numpy())))
# exinfo = []
data = data.to(device)
out = model(data, exinfo)
pred = out.max(1)[1]
total_correct += int((pred==data.y).sum())
total_examples += int((pred==pred).sum())
# time_end=time.time()
# print('totally cost',time_end-time_start,'s')
# 看看训练集上的准确率
# for data_train in train_loader:
# # 额外信息
# exinfo_train = getexinfo(list(set(data_train.x[:,0].numpy())))
# data_train = data_train.to(device)
# out_train = model(data_train, exinfo_train)
# pred_train = out_train.max(1)[1]
# total_Traincorrect += int((pred_train==data_train.y).sum())
# total_Trainexamples += int((pred_train==pred_train).sum())
return total_correct, total_examples
# return total_correct, total_examples
# 保存所有测试结果
@torch.no_grad()
def saveall(loader):
sava_list = []
model.eval()
for data in loader:
no_list = list(set(data.x[:,0].numpy()))
# 额外信息
exinfo = getexinfo(no_list)
# exinfo = []
data = data.to(device)
out = model(data, exinfo)
pred = out.max(1)[1]
# 拼接序号和预测结果
batch_list = list(zip(no_list, pred))
sava_list.extend(batch_list)
return sava_list
# # total_correct = total_examples = 0
# for i,data in enumerate(loader):
# # print(data)
# data = data.to(device)
# out = model(data)
# # print(out)
# _ , out_indices = torch.sort(out,dim=0)
# # _ , y_indices = torch.sort(data.y[:,0],dim=0)
# # total_correct += int((out_indices==y_indices).sum())
# order_list = list(out_indices.cpu().numpy().flatten())
# # sava_list = [0] * len(order_list)
# # for item,val in enumerate(order_list):
# # sava_list[val] = item
# # print(i)
# # print(test_dt[i])
# # print(names_list[test_dt[i]])
# # sava_list = [i]
# filename = int(data.x.cpu().numpy()[0,0])
# np.savetxt("./resultdata/{}.mtx".format(filename), order_list, fmt='%d', delimiter='\t', newline='\n')
# # sava_list.extend(list(out_indices.cpu().numpy().flatten()))
# # print(sava_list)
# # np.savetxt("s3rmt3m3.txt", sava_list, fmt='%d', delimiter='\t', newline='\n')
# return sava_list
device = torch.device('cpu')
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# model = Net().to(device)
model = torch.load('./models/gpuexpbestmodel.torch', map_location=lambda storage, loc: storage)
# optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# 看看准确率
total_correct, total_examples = graphtest(train_loader)
print(
# f'Epoch: {epoch:03d}, Loss: {loss:.4f}, '
# print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, '
f'total_correct: {total_correct:.1f},'
f'total_examples: {total_examples:.1f},'
f'accuracy: {total_correct/total_examples:.4f},'
# f'Train_accuracy: {total_Traincorrect/total_Trainexamples:.4f}'
)
# 存入文件
time_start = time.time()
# graphtest(test_loader)
sava_list = saveall(train_loader)
time_end=time.time()
# sava_list 排个序再保存
sava_list.sort(key=lambda i:(i[0]))
np.savetxt("./result_out/test_gpu_nolabel.txt", sava_list, fmt='%d', delimiter='\t', newline='\n')
# print('totally cost',time_end-time_start)
print('totally time',time_end-time_start)
# crit = torch.nn.BCELoss()
# train_loader = DataLoader(train_dataset, batch_size=batch_size)
# for epoch in range(1):
# # loss = graphtrain()
# # # train_acc = graphtest(dataset_val)
# # # val_acc = graphtest(dataset_val)
# # # # time_start = time.time()
# # # # test_acc = graphtest(test_loader)
# # # # time_end=time.time()
# # # # print('totally cost',time_end-time_start)
# # # # test_acc = saveall(test_loader)
# # # # val_acc = graphtest(val_loader)
# # # # test_acc = graphtest(test_loader)
# # train_acc = graphtest(train_loader)
# best_correct = 0
# total_correct, total_examples, total_Traincorrect, total_Trainexamples = graphtest(val_loader)
# # if epoch % 10 == 0:
# if total_correct > best_correct:
# # total_correct, total_examples = graphtest(val_loader)
# print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, '
# # print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, '
# f'total_correct: {total_correct:.1f},'
# f'total_examples: {total_examples:.1f},'
# f'accuracy: {total_correct/total_examples:.4f},'
# f'Train_accuracy: {total_Traincorrect/total_Trainexamples:.4f}')
# torch.save(model, './models/bestmodel_ep{}.torch'.format(epoch))
``` |
{
"source": "JovanXin/data_collector",
"score": 3
} |
#### File: JovanXin/data_collector/app.py
```python
from flask import Flask, url_for, render_template, flash, redirect
from flask_sqlalchemy import SQLAlchemy
import os
from search import Stalk
from models import db
from models import Number
from forms import DataForm
from sqlalchemy import exc
app = Flask(__name__)
user_data_dict = {}
db.init_app(app)
app.config["SECRET_KEY"] = "SECRET KEY"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///data.db"
with app.app_context():
db.create_all()
@app.route("/", methods=["GET", "POST"])
def home():
form = DataForm()
print("OH YES")
if form.validate_on_submit():
print("OH AGAIN")
number_from = form.number_from.data
number_to = form.number_to.data
email = form.email.data
with app.app_context():
number = Number(email=email, number_from=number_from, number_to=number_to)
db.session.add(number)
db.session.commit()
flash("Successfully sent message, wait a few minutes to recieve")
user_data_dict[email] = Stalk(email)
user_data_dict[email].search(email)
user_data_dict[email].write(user_data_dict[email].results)
return render_template("index.jinja2", form=form)
@app.route("/data")
def data():
with app.app_context():
numbers = Number.query.all()
return render_template("data.jinja2", data=numbers)
@app.route("/data/<string:email>")
def user_data(email):
with app.app_context():
numbers = Number.query.filter_by(email=email).all()
return render_template("data.jinja2", data=numbers)
@app.route("/data/<string:email>/search_results")
def search_results(email):
links = []
with open(f"{str(email)}.txt", "r") as user_data:
user_data = eval(user_data.read())
return render_template("user_data.jinja2", data=user_data.values())
if __name__ == '__main__':
app.run(debug=True)
```
#### File: JovanXin/data_collector/models.py
```python
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Number(db.Model):
_id = db.Column(db.Integer, primary_key=True, nullable=False)
email = db.Column(db.String(50), nullable=False)
date = db.Column(db.DateTime, default=datetime.utcnow(), nullable=False)
number_from = db.Column(db.String(200), nullable=False)
number_to = db.Column(db.String(200), nullable=False)
def __repr__(self):
return f"Number(_id={self._id}, user_id={self.user_id}"
``` |
{
"source": "Jovany-Rong/minerva",
"score": 2
} |
#### File: minerva/views/main.py
```python
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from forms.Ui_mainWindow import Ui_MainWindow
from views.core import *
from views.maps import *
import cv2
import os
class Minerva(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.setWindowIcon(QIcon("lib/src/icon.png"))
self.actionAbout_Minerva.triggered.connect(self.about_minerva)
self.btnCopy.clicked.connect(self.copy_text)
self.btnSave.clicked.connect(self.save_text_to_file)
self.btnFromCb.clicked.connect(self.convert_from_clipboard)
self.btnFromFile.clicked.connect(self.convert_from_file)
def convert_from_clipboard(self):
img = load_img_from_clipboard()
if not img:
QMessageBox.information(self, "Info", "No picture in the clipboard.")
return
self.convert_to_text(img)
self.show_image()
def convert_from_file(self):
fileName, fileType = QFileDialog.getOpenFileName(self, "Select a picture", os.getcwd(),
"Picture Files (*.png *.jpeg *.jpg *.webp *.bmp)")
if fileName == "":
return
img = load_img_from_file(fileName)
if not img:
QMessageBox.information(self, "Info", "It's not a picture.")
return
self.convert_to_text(img)
self.show_image()
def convert_to_text(self, img):
lang = LANGUAGES[self.comboBoxLang.currentIndex()]
text = img_to_txt(img, lang)
self.textBrowser.clear()
self.textBrowser.append(text)
def show_image(self):
img = cv2.imread("temp.png")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
x = img.shape[1]
y = img.shape[0]
frame = QImage(img, x, y, x*3, QImage.Format_RGB888)
pix = QPixmap.fromImage(frame)
self.graphicsView.item = QGraphicsPixmapItem(pix)
self.graphicsView.scene = QGraphicsScene()
self.graphicsView.scene.addItem(self.graphicsView.item)
self.graphicsView.setScene(self.graphicsView.scene)
def copy_text(self):
text = self.textBrowser.toPlainText()
clipboard = QApplication.clipboard()
clipboard.setText(text)
def about_minerva(self):
text = """Minerva %s
Minerva is a OCR tool which can get text from pictures. It has complete functions even if there is no network.
Author: <NAME>
""" % (VERSION)
QMessageBox.about(self, "About", text)
def save_text_to_file(self):
text = self.textBrowser.toPlainText()
fileName, fileType = QFileDialog.getSaveFileName(self, "Save", os.getcwd(),
"All Files (*);;Text Files (*.txt)")
if fileName != "":
with open(fileName, "w+", encoding="utf-8") as f:
f.write(text)
QMessageBox.information(self, "Info", "Text saved to file: %s." % fileName)
``` |
{
"source": "jovany-wang/pygloo",
"score": 2
} |
#### File: pygloo/tests/test_gather.py
```python
import pygloo
import numpy as np
import os
import ray
import time
import shutil
import torch
@ray.remote(num_cpus=1)
def test_gather(rank, world_size, fileStore_path):
'''
rank # Rank of this process within list of participating processes
world_size # Number of participating processes
'''
if rank==0:
if os.path.exists(fileStore_path):
shutil.rmtree(fileStore_path)
os.makedirs(fileStore_path)
else: time.sleep(0.5)
context = pygloo.rendezvous.Context(rank, world_size)
attr = pygloo.transport.tcp.attr("localhost")
# Perform rendezvous for TCP pairs
dev = pygloo.transport.tcp.CreateDevice(attr)
fileStore = pygloo.rendezvous.FileStore(fileStore_path)
store = pygloo.rendezvous.PrefixStore(str(world_size), fileStore)
context.connectFullMesh(store, dev)
sendbuf = np.array([rank, rank+1], dtype=np.float32)
sendptr = sendbuf.ctypes.data
recvbuf = np.zeros((1, world_size*2), dtype=np.float32)
recvptr = recvbuf.ctypes.data
# sendbuf = torch.Tensor([i+1 for i in range(sum([j+1 for j in range(world_size)]))]).float()
# sendptr = sendbuf.data_ptr()
# recvbuf = torch.zeros(rank+1).float()
# recvptr = recvbuf.data_ptr()
data_size = sendbuf.size if isinstance(sendbuf, np.ndarray) else sendbuf.numpy().size
datatype = pygloo.glooDataType_t.glooFloat32
pygloo.gather(context, sendptr, recvptr, data_size, datatype, root = 0)
print(f"rank {rank} sends {sendbuf}, receives {recvbuf}")
## example output
# (pid=23172) rank 2 sends [2. 3.], receives [[0. 0. 0. 0. 0. 0.]]
# (pid=23171) rank 1 sends [1. 2.], receives [[0. 0. 0. 0. 0. 0.]]
# (pid=23173) rank 0 sends [0. 1.], receives [[0. 1. 1. 2. 2. 3.]]
if __name__ == "__main__":
ray.init(num_cpus=6)
world_size = 3
fileStore_path = f"{ray.worker._global_node.get_session_dir_path()}" + "/collective/gloo/rendezvous"
fns = [test_gather.remote(i, world_size, fileStore_path) for i in range(world_size)]
ray.get(fns)
```
#### File: pygloo/tests/test_send_recv.py
```python
import pygloo
import numpy as np
import os
import ray
import time
import shutil
import torch
@ray.remote(num_cpus=1)
def test_send_recv(rank, world_size, fileStore_path):
'''
rank # Rank of this process within list of participating processes
world_size # Number of participating processes
'''
if rank==0:
if os.path.exists(fileStore_path):
shutil.rmtree(fileStore_path)
os.makedirs(fileStore_path)
else: time.sleep(0.5)
context = pygloo.rendezvous.Context(rank, world_size)
attr = pygloo.transport.tcp.attr("localhost")
# Perform rendezvous for TCP pairs
dev = pygloo.transport.tcp.CreateDevice(attr)
fileStore = pygloo.rendezvous.FileStore(fileStore_path)
store = pygloo.rendezvous.PrefixStore(str(world_size), fileStore)
context.connectFullMesh(store, dev)
if rank == 0:
sendbuf = np.array([[1,2,3],[1,2,3]], dtype=np.float32)
sendptr = sendbuf.ctypes.data
# sendbuf = torch.Tensor([[1,2,3],[1,2,3]]).float()
# sendptr = sendbuf.data_ptr()
data_size = sendbuf.size if isinstance(sendbuf, np.ndarray) else sendbuf.numpy().size
datatype = pygloo.glooDataType_t.glooFloat32
peer = 1
pygloo.send(context, sendptr, data_size, datatype, peer)
print(f"rank {rank} sends {sendbuf}")
elif rank == 1:
recvbuf = np.zeros((2,3), dtype=np.float32)
recvptr = recvbuf.ctypes.data
# recvbuf = torch.zeros(2,3).float()
# recvptr = recvbuf.data_ptr()
data_size = recvbuf.size if isinstance(recvbuf, np.ndarray) else recvbuf.numpy().size
datatype = pygloo.glooDataType_t.glooFloat32
peer = 0
pygloo.recv(context, recvptr, data_size, datatype, peer)
print(f"rank {rank} receives {recvbuf}")
else:
raise Exception("Only support 2 process to test send function and recv function")
## example output
if __name__ == "__main__":
ray.init(num_cpus=6)
world_size = 2
fileStore_path = f"{ray.worker._global_node.get_session_dir_path()}" + "/collective/gloo/rendezvous"
fns = [test_send_recv.remote(i, world_size, fileStore_path) for i in range(world_size)]
ray.get(fns)
``` |
{
"source": "jovany-wang/ray",
"score": 2
} |
#### File: utils/exploration/slate_soft_q.py
```python
from typing import Union
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.exploration import TensorType
from ray.rllib.utils.exploration.soft_q import SoftQ
from ray.rllib.utils.framework import try_import_tf, try_import_torch
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class SlateSoftQ(SoftQ):
@override(SoftQ)
def get_exploration_action(
self,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: bool = True,
):
assert (
self.framework == "torch"
), "ERROR: SlateSoftQ only supports torch so far!"
cls = type(action_distribution)
# Re-create the action distribution with the correct temperature
# applied.
action_distribution = cls(
action_distribution.inputs, self.model, temperature=self.temperature
)
# per_slate_q_values = dist.inputs
all_slates = self.model.slates
batch_size = action_distribution.inputs.size()[0]
action_logp = torch.zeros(batch_size, dtype=torch.float)
self.last_timestep = timestep
# Explore.
if explore:
# Return stochastic sample over (q-value) logits.
explore_indices = action_distribution.sample()
explore_action = all_slates[explore_indices]
return explore_action, action_logp
# Return the deterministic "sample" (argmax) over (q-value) logits.
else:
exploit_indices = action_distribution.deterministic_sample()
exploit_action = all_slates[exploit_indices]
return exploit_action, action_logp
``` |
{
"source": "jovanzac/Captain",
"score": 3
} |
#### File: Captain/Scripts/splash_pg.py
```python
import tkinter as tk
from PIL import Image, ImageTk
import tkinter.ttk as ttk
from ttkthemes import ThemedTk
class SplashScreen :
def __init__(self,parent:object,nxt_func:object) :
"""Initialize main background image and set widow size."""
self.parent = parent
self.parent.overrideredirect(True)
self.nxt_func = nxt_func
self.splash()
self.window()
def splash(self) :
self.image1 = Image.open('project_pics\\splash.png')
def window(self) :
width, height = self.image1.size
setwinwidth = (self.parent.winfo_screenwidth()-width)//2
setwinheight = (self.parent.winfo_screenheight()-height)//2
self.parent.geometry(f"{width}x{height}+{setwinwidth}+{setwinheight}")
def proceed(self) :
self.nxt_func()
def display(self) :
self.parent.deiconify()
screen_width = self.parent.winfo_screenwidth()
screen_height = self.parent.winfo_screenheight()
splash_canvas = tk.Canvas(self.parent,width=screen_width,height=screen_height)
splash_canvas.pack()
self.tkimage = ImageTk.PhotoImage(self.image1)
splash_canvas.create_image(0,0,image=self.tkimage,anchor="nw")
splash_canvas.create_text(200,34,text="[LOADING]...PLEASE WAIT",font=("small fonts",20),fill="white")
progressbar = ttk.Progressbar(orient=tk.HORIZONTAL, length=900, mode='determinate',style="Horizontal.TProgressbar")
progressbar.place(x=450,y=25)
progressbar.start()
if self.nxt_func != None :
self.parent.after(5050,self.proceed)
if __name__ == '__main__' :
root = ThemedTk("black")
root["bg"] = "gray21"
style = ttk.Style()
style.configure("Horizontal.TProgressbar", foreground='gray21')
app = SplashScreen(root,None)
app.display()
root.after(5050,root.destroy)
root.mainloop()
``` |
{
"source": "jovanzers/quart",
"score": 2
} |
#### File: quart/flask_patch/globals.py
```python
from __future__ import annotations
from typing import Any, AnyStr
from werkzeug.datastructures import MultiDict
from werkzeug.local import LocalProxy
from quart.globals import (
_app_ctx_stack,
_ctx_lookup,
_request_ctx_stack,
current_app,
g,
request as quart_request,
session,
)
from ._synchronise import sync_with_context
class FlaskRequestProxy(LocalProxy):
@property
def data(self) -> bytes:
return sync_with_context(self._get_current_object().data)
@property
def form(self) -> MultiDict:
return sync_with_context(self._get_current_object().form)
@property
def files(self) -> MultiDict:
return sync_with_context(self._get_current_object().files)
@property
def json(self) -> Any:
return sync_with_context(self._get_current_object().json)
def get_json(self, *args: Any, **kwargs: Any) -> Any:
return sync_with_context(self._get_current_object().get_json(*args, **kwargs))
def get_data(self, *args: Any, **kwargs: Any) -> AnyStr:
return sync_with_context(self._get_current_object().get_data(*args, **kwargs))
request = FlaskRequestProxy(lambda: quart_request)
def _lookup_app_object(name: str) -> Any:
return _ctx_lookup([_app_ctx_stack], name)
def _lookup_req_object(name: str) -> Any:
return _ctx_lookup([_request_ctx_stack], name)
__all__ = (
"_app_ctx_stack",
"_lookup_app_object",
"_lookup_req_object",
"_request_ctx_stack",
"current_app",
"g",
"request",
"session",
)
```
#### File: src/quart/globals.py
```python
from __future__ import annotations
from contextvars import ContextVar
from functools import partial
from typing import Any, List, TYPE_CHECKING
from werkzeug.local import ContextVar as WerkzeugContextVar, LocalProxy, LocalStack
if TYPE_CHECKING:
from .app import Quart
from .ctx import _AppCtxGlobals
from .sessions import SessionMixin
from .wrappers import Request, Websocket
if WerkzeugContextVar != ContextVar:
raise RuntimeError("Error with Werkzeug's locals, please open a Quart issue for help")
def _ctx_lookup(ctx_stacks: List[LocalStack], name: str) -> Any:
top = None
for ctx_stack in ctx_stacks:
top = ctx_stack.top
if top is not None:
break
if top is None:
raise RuntimeError(f"Attempt to access {name} outside of a relevant context")
return getattr(top, name)
_app_ctx_stack = LocalStack()
_request_ctx_stack = LocalStack()
_websocket_ctx_stack = LocalStack()
current_app: Quart = LocalProxy(partial(_ctx_lookup, [_app_ctx_stack], "app")) # type: ignore
g: _AppCtxGlobals = LocalProxy(partial(_ctx_lookup, [_app_ctx_stack], "g")) # type: ignore
request: Request = LocalProxy(partial(_ctx_lookup, [_request_ctx_stack], "request")) # type: ignore
session: SessionMixin = LocalProxy(partial(_ctx_lookup, [_request_ctx_stack, _websocket_ctx_stack], "session")) # type: ignore # noqa: E501
websocket: Websocket = LocalProxy(partial(_ctx_lookup, [_websocket_ctx_stack], "websocket")) # type: ignore # noqa: E501
```
#### File: quart/wrappers/base.py
```python
from __future__ import annotations
from typing import Any, Dict, List, Optional, TYPE_CHECKING
from hypercorn.typing import WWWScope
from werkzeug.datastructures import Headers
from werkzeug.sansio.request import Request as SansIORequest
from ..globals import current_app
if TYPE_CHECKING:
from ..routing import QuartRule # noqa
class BaseRequestWebsocket(SansIORequest):
"""This class is the basis for Requests and websockets..
Attributes:
routing_exception: If an exception is raised during the route
matching it will be stored here.
url_rule: The rule that this request has been matched too.
view_args: The keyword arguments for the view from the route
matching.
"""
routing_exception: Optional[Exception] = None
url_rule: Optional["QuartRule"] = None
view_args: Optional[Dict[str, Any]] = None
def __init__(
self,
method: str,
scheme: str,
path: str,
query_string: bytes,
headers: Headers,
root_path: str,
http_version: str,
scope: WWWScope,
) -> None:
"""Create a request or websocket base object.
Arguments:
method: The HTTP verb.
scheme: The scheme used for the request.
path: The full unquoted path of the request.
query_string: The raw bytes for the query string part.
headers: The request headers.
root_path: The root path that should be prepended to all
routes.
http_version: The HTTP version of the request.
scope: Underlying ASGI scope dictionary.
Attributes:
args: The query string arguments.
scheme: The URL scheme, http or https.
"""
super().__init__(
method,
scheme,
scope.get("server"),
root_path,
path,
query_string,
headers,
headers.get("Remote-Addr"),
)
self.http_version = http_version
self.scope = scope
@property
def max_content_length(self) -> Optional[int]:
"""Read-only view of the ``MAX_CONTENT_LENGTH`` config key."""
if current_app:
return current_app.config["MAX_CONTENT_LENGTH"]
else:
return None
@property
def endpoint(self) -> Optional[str]:
"""Returns the corresponding endpoint matched for this request.
This can be None if the request has not been matched with a
rule.
"""
if self.url_rule is not None:
return self.url_rule.endpoint
else:
return None
@property
def blueprint(self) -> Optional[str]:
"""Returns the blueprint the matched endpoint belongs to.
This can be None if the request has not been matched or the
endpoint is not in a blueprint.
"""
if self.endpoint is not None and "." in self.endpoint:
return self.endpoint.rsplit(".", 1)[0]
else:
return None
@property
def blueprints(self) -> List[str]:
"""Return the names of the current blueprints.
The returned list is ordered from the current blueprint,
upwards through parent blueprints.
"""
# Avoid circular import
from ..helpers import _split_blueprint_path
if self.blueprint is not None:
return _split_blueprint_path(self.blueprint)
else:
return []
@property
def script_root(self) -> str:
return self.root_path
@property
def url_root(self) -> str:
return self.root_url
```
#### File: quart/tests/test_utils.py
```python
from __future__ import annotations
from functools import partial
from werkzeug.datastructures import Headers
from quart.utils import decode_headers, encode_headers, is_coroutine_function
def test_is_coroutine_function() -> None:
async def async_func() -> None:
pass
assert is_coroutine_function(async_func)
assert is_coroutine_function(partial(async_func))
def test_encode_headers() -> None:
assert encode_headers(Headers({"Foo": "Bar"})) == [(b"foo", b"Bar")]
def test_decode_headers() -> None:
assert decode_headers([(b"foo", b"Bar")]) == Headers({"Foo": "Bar"})
``` |
{
"source": "Jovasa/bjontegaard",
"score": 2
} |
#### File: bjontegaard/bd/bd_cubic.py
```python
__author__ = "<EMAIL> (<NAME>),"
__author__ += "<EMAIL> (<NAME>)"
__author__ += "<EMAIL> (<NAME>)"
# AH: source of this file: https://github.com/google/compare-codecs/blob/master/lib/visual_metrics.py
# Then slightly modified.
import math # noqa: E402
import numpy # noqa: E402
import matplotlib.pyplot as plt # noqa: E402
def bd_PSNR(rate1, psnr1, rate2, psnr2):
"""
BJONTEGAARD Bjontegaard metric calculation
Bjontegaard's metric allows to compute the average gain in psnr between two
rate-distortion curves [1].
rate1,psnr1 - RD points for curve 1
rate2,psnr2 - RD points for curve 2
returns the calculated Bjontegaard metric 'dsnr'
code adapted from code written by : (c) 2010 <NAME>
http://www.mathworks.com/matlabcentral/fileexchange/27798-bjontegaard-metric/content/bjontegaard.m
"""
# pylint: disable=too-many-locals
# numpy seems to do tricks with its exports.
# pylint: disable=no-member
# map() is recommended against.
# pylint: disable=bad-builtin
# log_rate1 = map(math.log, rate1)
# log_rate2 = map(math.log, rate2)
log_rate1 = numpy.log(rate1)
log_rate2 = numpy.log(rate2)
# Best cubic poly fit for graph represented by log_ratex, psrn_x.
poly1 = numpy.polyfit(log_rate1, psnr1, 3)
poly2 = numpy.polyfit(log_rate2, psnr2, 3)
# Integration interval.
min_int = max([min(log_rate1), min(log_rate2)])
max_int = min([max(log_rate1), max(log_rate2)])
# Integrate poly1, and poly2.
p_int1 = numpy.polyint(poly1)
p_int2 = numpy.polyint(poly2)
# Calculate the integrated value over the interval we care about.
int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)
# Calculate the average improvement.
if max_int != min_int:
avg_diff = (int2 - int1) / (max_int - min_int)
else:
avg_diff = 0.0
return avg_diff
def bd_rate(rate1, psnr1, rate2, psnr2, *, ax=None, convert_to_percentage=True):
"""
BJONTEGAARD Bjontegaard metric calculation
Bjontegaard's metric allows to compute the average % saving in bitrate
between two rate-distortion curves [1].
rate1,psnr1 - RD points for curve 1
rate2,psnr2 - RD points for curve 2
adapted from code from: (c) 2010 <NAME>
"""
# numpy plays games with its exported functions.
# pylint: disable=no-member
# pylint: disable=too-many-locals
# pylint: disable=bad-builtin
# log_rate1 = map(math.log, rate1)
# log_rate2 = map(math.log, rate2)
log_rate1 = numpy.log(rate1)
log_rate2 = numpy.log(rate2)
# Best cubic poly fit for graph represented by log_ratex, psrn_x.
poly1 = numpy.polyfit(psnr1, log_rate1, 3)
poly2 = numpy.polyfit(psnr2, log_rate2, 3)
# Integration interval.
min_int = max([min(psnr1), min(psnr2)])
max_int = min([max(psnr1), max(psnr2)])
# find integral
p_int1 = numpy.polyint(poly1)
p_int2 = numpy.polyint(poly2)
# Calculate the integrated value over the interval we care about.
int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)
# Calculate the average improvement.
avg_exp_diff = (int2 - int1) / (max_int - min_int)
# In really bad formed data the exponent can grow too large.
# clamp it.
if avg_exp_diff > 200:
avg_exp_diff = 200
# Convert to a percentage.
if convert_to_percentage:
avg_diff = (math.exp(avg_exp_diff) - 1) * 100
else:
avg_diff = (math.exp(avg_exp_diff) - 1)
if ax:
ax.set_title('Cubic interpolation (non-piece-wise)')
# plot rateA and distA
pA = ax.plot(log_rate1, psnr1, '-o', label='encoder1')
dists = numpy.linspace(psnr1.min(), psnr1.max(), num=10, endpoint=True)
ax.plot(numpy.poly1d(poly1)(dists), dists, '--', color=pA[-1].get_color())
# plot rateB and distB
pB = plt.plot(log_rate2, psnr2, '-o', label='encoder2')
dists = numpy.linspace(psnr2.min(), psnr2.max(), num=10, endpoint=True)
ax.plot(numpy.poly1d(poly2)(dists), dists, '--', color=pB[-1].get_color())
ax.set_xlabel('log(rate)')
ax.set_ylabel('PSNR')
ax.grid()
ax.legend()
return avg_diff
``` |
{
"source": "Jovascript/DrawPy",
"score": 3
} |
#### File: hardware/hardware_internals/rpgpio.py
```python
from .rpgpio_private import *
import time
from collections import deque
import sys
import logging
logger = logging.getLogger(__name__)
class GPIO(object):
MODE_OUTPUT = 1
MODE_INPUT_NOPULL = 2
MODE_INPUT_PULLUP = 3
MODE_INPUT_PULLDOWN = 4
def __init__(self):
""" Create object which can control GPIO.
This class writes directly to CPU registers and doesn't use any
libs or kernel modules.
"""
self._mem = PhysicalMemory(PERI_BASE + GPIO_REGISTER_BASE)
def _pull_up_dn(self, pin, mode):
p = self._mem.read_int(GPIO_PULLUPDN_OFFSET)
p &= ~3
if mode == self.MODE_INPUT_PULLUP:
p |= 2
elif mode == self.MODE_INPUT_PULLDOWN:
p |= 1
self._mem.write_int(GPIO_PULLUPDN_OFFSET, p)
address = 4 * int(pin / 32) + GPIO_PULLUPDNCLK_OFFSET
self._mem.write_int(address, 1 << (pin % 32))
p = self._mem.read_int(GPIO_PULLUPDN_OFFSET)
p &= ~3
self._mem.write_int(GPIO_PULLUPDN_OFFSET, p)
self._mem.write_int(address, 0)
def init(self, pin, mode):
""" Initialize or re-initialize GPIO pin.
:param pin: pin number.
:param mode: one of MODE_* variables in this class.
"""
address = 4 * int(pin / 10) + GPIO_FSEL_OFFSET
v = self._mem.read_int(address)
v &= ~(7 << ((pin % 10) * 3)) # input value
if mode == self.MODE_OUTPUT:
v |= (1 << ((pin % 10) * 3)) # output value, base on input
self._mem.write_int(address, v)
else:
self._mem.write_int(address, v)
self._pull_up_dn(pin, mode)
def set(self, pin):
""" Set pin to HIGH state.
:param pin: pin number.
"""
address = 4 * int(pin / 32) + GPIO_SET_OFFSET
self._mem.write_int(address, 1 << (pin % 32))
def clear(self, pin):
""" Set pin to LOW state.
:param pin: pin number.
"""
address = 4 * int(pin / 32) + GPIO_CLEAR_OFFSET
self._mem.write_int(address, 1 << (pin % 32))
def read(self, pin):
""" Read pin current value.
:param pin: pin number.
:return: integer value 0 or 1.
"""
address = 4 * int(pin / 32) + GPIO_INPUT_OFFSET
v = self._mem.read_int(address)
v &= 1 << (pin % 32)
if v == 0:
return 0
return 1
# When DMAGPIO is an active with two channels simultaneously, delay time shifts
# a little bit, because all DMA channels query the same PWM(which is used as
# clock for delay). So, do not create two or more instances of DMAGPIO.
class DMAGPIO(DMAProto):
_DMA_CONTROL_BLOCK_SIZE = 32
_DMA_CHANNEL = 4
_BLOCK_NUMBER = 983040
_PREFERRED_BATCH = _BLOCK_NUMBER / 10
def __init__(self):
""" Create object which control GPIO pins via DMA(Direct Memory
Access).
This object allows to add arbitrary sequence of pulses to any GPIO
outputs and run this sequence in background without using CPU since
DMA is a separated hardware module.
Note: keep this object out of garbage collector until it stops,
otherwise memory will be unlocked and it could be overwritten by
operating system.
"""
super(DMAGPIO, self).__init__(self._BLOCK_NUMBER*self._DMA_CONTROL_BLOCK_SIZE, self._DMA_CHANNEL)
self.__current_address = 0
# get helpers registers, this class uses PWM module to create precise
# delays
self._pwm = PhysicalMemory(PERI_BASE + PWM_BASE)
self._clock = PhysicalMemory(PERI_BASE + CM_BASE)
# pre calculated variables for control blocks
self._delay_info = (DMA_TI_NO_WIDE_BURSTS | DMA_SRC_IGNORE
| DMA_TI_PER_MAP(DMA_TI_PER_MAP_PWM)
| DMA_TI_DEST_DREQ)
self._delay_destination = PHYSICAL_PWM_BUS + PWM_FIFO
self._delay_stride = 0
self._pulse_info = (DMA_TI_NO_WIDE_BURSTS | DMA_TI_TDMODE
| DMA_TI_WAIT_RESP)
self._pulse_destination = PHYSICAL_GPIO_BUS + GPIO_SET_OFFSET
# YLENGTH is transfers count and XLENGTH size of each transfer
self._pulse_length = (DMA_TI_TXFR_LEN_YLENGTH(2)
| DMA_TI_TXFR_LEN_XLENGTH(4))
self._pulse_stride = (DMA_TI_STRIDE_D_STRIDE(12)
| DMA_TI_STRIDE_S_STRIDE(4))
self.logger = logging.getLogger(__name__ + ".DMAGPIO")
self.blocks_queue = deque()
def add_pulses(self, blocks):
for b in blocks:
self.blocks_queue.append(b)
def update(self):
if len(self.blocks_queue) > 0:
# Must get before just in case running suddenly stops
running_address = self.current_control_block()
if not self.is_active():
# Can start from begining
self.clear()
self._insert_blocks(min(self._BLOCK_NUMBER, len(self.blocks_queue)))
self.logger.debug("Started a DMA session")
self.run()
else:
current_end = self.__current_address
if current_end >= self._phys_memory.get_size():
# Insert at beginning [++++^---------]
if 1 < running_address < self._phys_memory.get_size():
self.logger.debug("Filling starter gap blocks")
self.__current_address = 0
self._insert_blocks(min(len(self.blocks_queue), running_address - 1))
self._redirect_block(current_end-self._DMA_CONTROL_BLOCK_SIZE, 0)
else:
self.logger.debug("Cannot fill starter gap cos of strange running_address")
elif current_end > running_address:
# Insert next block [ ^----++++]
self.logger.debug("Filling Tight Gap Blocks")
self._insert_blocks(min(len(self.blocks_queue), self._phys_memory.get_size()-current_end))
self._redirect_block(current_end-self._DMA_CONTROL_BLOCK_SIZE, current_end)
elif current_end < running_address:
# Insert next block [----++++++^---]
self.logger.debug("Filling Healthy Gap Blocks")
self._insert_blocks(min(len(self.blocks_queue), running_address-current_end))
self._redirect_block(current_end-self._DMA_CONTROL_BLOCK_SIZE, current_end)
elif current_end == running_address:
self.logger.debug("It caught me, or I caught it?")
else:
self.logger.debug("Nothing to do")
else:
self.logger.debug("Nothing to do")
def _redirect_block(self, block_address, target_block_address):
if block_address >= self._phys_memory.get_size() or target_block_address >= self._phys_memory.get_size():
raise IndexError("Referenced blocks are outside allocated memory")
self._phys_memory.write_int(block_address + 20, self._phys_memory.get_bus_address()+target_block_address)
def _insert_blocks(self, number):
if self.__current_address + (number * self._DMA_CONTROL_BLOCK_SIZE) > self._phys_memory.get_size():
raise MemoryError("Out of allocated memory.")
elif number > len(self.blocks_queue) :
raise IndexError("Cannot insert this many blocks.")
# The address of the first CB to add
current_address = self.__current_address
# Loop thru pulses
for _ in range(number):
p = self.blocks_queue.popleft()
# Address of next cb
next1 = current_address + self._DMA_CONTROL_BLOCK_SIZE + self._phys_memory.get_bus_address()
source = next1 - 8 # last 8 bytes are padding, use it to store data
if len(p) > 1:
data = (
self._pulse_info, source, self._pulse_destination,
self._pulse_length, self._pulse_stride, next1,
p[0], p[1]
)
else:
# Is a delay
length = p[0] << 4 # * 16
data = (
self._delay_info, source, self._delay_destination, length,
self._delay_stride, next1, 0, 0
)
self._phys_memory.write(current_address, "8I", data)
current_address += self._DMA_CONTROL_BLOCK_SIZE
oldaddr = self.__current_address
self.__current_address = current_address
# Finalise the block
self._phys_memory.write_int(self.__current_address + 20
- self._DMA_CONTROL_BLOCK_SIZE, 0)
self.logger.info("DMA Inserted and Finalised {}MB".format(round((self.__current_address - oldaddr)/1048576.0, 2)))
def run_stream(self):
""" Run DMA module in stream mode, i.e. does'n finalize last block
and do not check if there is anything to do.
"""
# configure PWM hardware module which will clocks DMA
self._pwm.write_int(PWM_CTL, 0)
# disable
self._clock.write_int(CM_PWM_CNTL, CM_PASSWORD | CM_SRC_PLLD)
while (self._clock.read_int(CM_PWM_CNTL) & CM_CNTL_BUSY) != 0:
time.sleep(0.00001) # 10 us, wait until BUSY bit is clear
# configure, 100 MHz
self._clock.write_int(CM_PWM_DIV, CM_PASSWORD | CM_DIV_VALUE(5))
self._clock.write_int(CM_PWM_CNTL,
CM_PASSWORD | CM_SRC_PLLD | CM_CNTL_ENABLE)
self._pwm.write_int(PWM_RNG1, 100)
self._pwm.write_int(PWM_DMAC, PWM_DMAC_ENAB | PWM_DMAC_PANIC(15)
| PWM_DMAC_DREQ(15))
self._pwm.write_int(PWM_CTL, PWM_CTL_CLRF)
# enable
self._pwm.write_int(PWM_CTL, PWM_CTL_USEF1 | PWM_CTL_PWEN1)
super(DMAGPIO, self)._run_dma()
def run(self):
""" Run DMA module and start sending specified pulses.
:param loop: If true, run pulse sequence in infinite loop. Otherwise
"""
if self.__current_address == 0:
raise RuntimeError("Nothing was added.")
self.run_stream()
def stop(self):
""" Stop any DMA activities.
"""
self._pwm.write_int(PWM_CTL, 0)
super(DMAGPIO, self)._stop_dma()
def clear(self):
""" Remove any specified pulses. Doesn't affect currently running
sequence.
"""
self.__current_address = 0
def current_address(self):
""" Get current buffer offset.
:return: current buffer offset in bytes.
"""
return self.__current_address
def control_block_size(self):
""" Get control block size.
:return: control block size in bytes.
"""
return self._DMA_CONTROL_BLOCK_SIZE
class DMAPWM(DMAProto):
_DMA_CONTROL_BLOCK_SIZE = 32
_DMA_DATA_OFFSET = 24
_TOTAL_NUMBER_OF_BLOCKS = 58880
_DMA_CHANNEL = 14
def __init__(self):
""" Initialise PWM. PWM has 8 bit resolution and fixed frequency
(~11.5 KHz and may flow). Though duty cycle is quite precise and
it uses the minimum amount of system resources (just one lite DMA
channel without any anything else).
That's why such PWM is best to use with collector motors, heaters
and other non sensitive hardware.
Implementation is super simple and uses lite DMA channel.
Overall frequency depends on number of blocks.
To adjust frequency, just write more byte per operation, use Wait
Cycles in info field of control blocks.
"""
super(DMAPWM, self).__init__(self._TOTAL_NUMBER_OF_BLOCKS
* self._DMA_CONTROL_BLOCK_SIZE,
self._DMA_CHANNEL)
self._clear_pins = dict()
# first control block always set pins
self.__add_control_block(0, GPIO_SET_OFFSET)
# fill control blocks
for i in range(1, self._TOTAL_NUMBER_OF_BLOCKS):
self.__add_control_block(i * self._DMA_CONTROL_BLOCK_SIZE,
GPIO_CLEAR_OFFSET)
# loop
self._phys_memory.write_int((self._TOTAL_NUMBER_OF_BLOCKS - 1)
* self._DMA_CONTROL_BLOCK_SIZE + 20,
self._phys_memory.get_bus_address())
self._gpio = PhysicalMemory(PERI_BASE + GPIO_REGISTER_BASE)
def __add_control_block(self, address, offset):
ba = self._phys_memory.get_bus_address() + address
data = (
DMA_TI_NO_WIDE_BURSTS | DMA_TI_WAIT_RESP
| DMA_TI_DEST_INC | DMA_TI_SRC_INC, # info
ba + self._DMA_DATA_OFFSET, # source, use padding for storing data
PHYSICAL_GPIO_BUS + offset, # destination
4, # length
0, # stride
ba + self._DMA_CONTROL_BLOCK_SIZE, # next control block
0, # padding, uses as data storage
0 # padding
)
self._phys_memory.write(address, "8I", data)
def add_pin(self, pin, duty_cycle):
""" Add pin to PMW with specified duty cycle.
:param pin: pin number to add.
:param duty_cycle: duty cycle 0..100 which represent percents.
"""
assert 0 <= duty_cycle <= 100
self.remove_pin(pin)
block_number = int(duty_cycle * self._TOTAL_NUMBER_OF_BLOCKS
/ 100.0)
if block_number == 0:
self._gpio.write_int(GPIO_CLEAR_OFFSET, 1 << pin)
elif block_number == self._TOTAL_NUMBER_OF_BLOCKS:
self._gpio.write_int(GPIO_SET_OFFSET, 1 << pin)
self._clear_pins[pin] = self._DMA_DATA_OFFSET
else:
value = self._phys_memory.read_int(self._DMA_DATA_OFFSET)
value |= 1 << pin
self._phys_memory.write_int(self._DMA_DATA_OFFSET, value)
clear_address = (block_number * self._DMA_CONTROL_BLOCK_SIZE
+ self._DMA_DATA_OFFSET)
value = self._phys_memory.read_int(clear_address)
value |= 1 << pin
self._phys_memory.write_int(clear_address, value)
self._clear_pins[pin] = clear_address
if not self.is_active():
super(DMAPWM, self)._run_dma()
def remove_pin(self, pin):
""" Remove pin from PWM
:param pin: pin number to remove.
"""
assert 0 <= pin < 32
if pin in self._clear_pins.keys():
address = self._clear_pins[pin]
value = self._phys_memory.read_int(address)
value &= ~(1 << pin)
self._phys_memory.write_int(address, value)
value = self._phys_memory.read_int(self._DMA_DATA_OFFSET)
value &= ~(1 << pin)
self._phys_memory.write_int(self._DMA_DATA_OFFSET, value)
del self._clear_pins[pin]
self._gpio.write_int(GPIO_CLEAR_OFFSET, 1 << pin)
if len(self._clear_pins) == 0 and self.is_active():
super(DMAPWM, self)._stop_dma()
def remove_all(self):
""" Remove all pins from PWM and stop it.
"""
pins_list = self._clear_pins.keys()
for pin in pins_list:
self.remove_pin(pin)
assert len(self._clear_pins) == 0
class DMAWatchdog(DMAProto):
_DMA_CONTROL_BLOCK_SIZE = 32
_DMA_CHANNEL = 13
_DMA_BLOCKS = 2047
def __init__(self):
""" Initialize hardware watchdog timer.
"""
super(DMAWatchdog, self).__init__(self._DMA_CONTROL_BLOCK_SIZE
* (self._DMA_BLOCKS + 1),
self._DMA_CHANNEL)
def start(self):
""" Arm watchdog for ~15 seconds. If watchdog wasn't fed in 15 seconds,
GPIO pins 0-29 will be switched to input to prevent any output from
them.
"""
data = ()
ba = self._phys_memory.get_bus_address()
# first blocks is just a delay
for _ in range(0, self._DMA_BLOCKS):
data += (
DMA_TI_NO_WIDE_BURSTS | DMA_DEST_IGNORE | DMA_TI_WAIT_RESP
| DMA_TI_WAITS(31),
ba + 24, ba + 28, 65535,
0, ba + self._DMA_CONTROL_BLOCK_SIZE, 0, 0
)
ba += self._DMA_CONTROL_BLOCK_SIZE
# The last block writes zeros(switches to input state) in GPIO's FSEL
# registers. In normal operating should never be called until watchdog
# timeout is reached.
data += (
DMA_TI_NO_WIDE_BURSTS | DMA_TI_WAIT_RESP | DMA_TI_DEST_INC,
ba + 24, PHYSICAL_GPIO_BUS + GPIO_FSEL_OFFSET, 12,
0, 0, 0, 0
)
self._phys_memory.write(0, str(len(data)) + "I", data)
super(DMAWatchdog, self)._run_dma()
def stop(self):
""" Disarm watchdog.
"""
super(DMAWatchdog, self)._stop_dma()
def feed(self):
""" Feed watchdog, restart waiting loop from the very beginning.
"""
self._dma.write_int(self._DMA_CHANNEL_ADDRESS + DMA_NEXTCONBK,
self._phys_memory.get_bus_address())
# for testing purpose
def main():
pin = 21
g = GPIO()
g.init(pin, GPIO.MODE_INPUT_NOPULL)
print("nopull " + str(g.read(pin)))
g.init(pin, GPIO.MODE_INPUT_PULLDOWN)
print("pulldown " + str(g.read(pin)))
g.init(pin, GPIO.MODE_INPUT_PULLUP)
print("pullup " + str(g.read(pin)))
time.sleep(1)
g.init(pin, GPIO.MODE_OUTPUT)
g.set(pin)
print("set " + str(g.read(pin)))
time.sleep(1)
g.clear(pin)
print("clear " + str(g.read(pin)))
time.sleep(1)
cma = CMAPhysicalMemory(1*1024*1024)
print(str(cma.get_size() / 1024 / 1024) + "MB of memory allocated at "
+ hex(cma.get_phys_address()))
a = cma.read_int(0)
print("was " + hex(a))
cma.write_int(0, 0x12345678)
a = cma.read_int(0)
assert a == 0x12345678, "Memory isn't written or read correctly"
print("now " + hex(a))
del cma
dg = DMAGPIO()
# TODO: Re-add example
print("dmagpio is started")
try:
print("press enter to stop...")
sys.stdin.readline()
except KeyboardInterrupt:
pass
dg.stop()
g.clear(pin)
print("dma stopped")
pwm = DMAPWM()
pwm.add_pin(pin, 20)
print("pwm is started")
try:
print("press enter to stop...")
sys.stdin.readline()
except KeyboardInterrupt:
pass
pwm.remove_pin(pin)
print("pwm stopped")
if __name__ == "__main__":
main()
```
#### File: drawpi/hardware/plotter.py
```python
from drawpi import config
from drawpi.point import Point
from drawpi.utils import frequency_to_delay, mm_to_steps
from drawpi.hardware.steppers import XYSteppers
import logging
import time
import pigpio
from drawpi.hardware.hardware_internals.rpgpio import GPIO, DMAGPIO
logger = logging.getLogger(__name__)
class Plotter:
'''Manages the plotter, and its capabilities'''
def __init__(self):
# Store location in STEPS
self.location = Point(0, 0)
# Store current direction
self.direction = (True, True)
self.gpio = GPIO()
self.dma = DMAGPIO()
self.pi = pigpio.pi()
# Setup pins as outputs/inputs
self.setup_pins()
self.pulses = []
logger.info("Plotter is ready")
def _get_pulse(self, pins, delay=0):
setmask = 0
resetmask = 0
for pin, state in pins.items():
if state:
setmask |= 1 << pin
else:
resetmask |= 1 << pin
return (setmask, resetmask, delay)
def add_pulse(self, pulse):
self.pulses.append((pulse[0], pulse[1]))
if len(pulse) > 2:
self.pulses.append((pulse[2],))
if len(self.pulses) > config.PREFERRED_PULSE_BATCH:
logger.debug("Committing Pulses")
# Enable steppers
self.gpio.clear(config.ENABLE_STEPPER)
self.dma.add_pulses(self.pulses)
self.pulses = []
self.dma.update()
def flush_pulses(self):
logger.debug("Flushing Pulses")
self.dma.add_pulses(self.pulses)
self.pulses = []
self.dma.update()
def wait_till_idle(self):
self.flush_pulses()
logger.debug("Waiting till Idle")
while self.dma.is_active():
self.dma.update()
time.sleep(0.1)
def stop(self):
# Disable the steppers first
self.gpio.set(config.ENABLE_STEPPER)
self.dma.stop()
def shutdown(self):
'''Shutdown tidily'''
self.stop()
self.pi.set_servo_pulsewidth(config.PEN_SERVO, config.PEN_UP_PULSE)
self.pi.stop()
def _set_direction(self, dirX, dirY):
set = 0
reset = 0
if dirX != config.X_INVERTED:
set |= 1 << config.X_DIR
else:
reset |= 1 << config.X_DIR
if dirY != config.Y_INVERTED:
set |= 1 << config.Y_DIR
else:
reset |= 1 << config.Y_DIR
self.add_pulse((set, reset, 0))
def _pulse_steppers(self, stepx, stepy, delay):
bitmask = 0
if stepx:
bitmask |= 1 << config.X_STEP
if stepy:
bitmask |= 1 << config.Y_STEP
self.add_pulse((bitmask, 0, int(delay / 2)))
self.add_pulse((0, bitmask, int((delay + 1) / 2)))
def _get_steps_to(self, point):
# Get the steps to a point
diff=point - self.location
return diff.x, diff.y
def goto(self, point):
logger.info("GOTO " + str(point))
# Get no. steps to endpoint
x, y=self._get_steps_to(point)
dirx=diry=True
# If steps are negative, change direction and make steps positive
if (x < 0):
dirx=False
x=abs(x)
if (y < 0):
diry=False
y=abs(y)
# Add the pulse for setting the direction
self._set_direction(dirx, diry)
# Rate is a frequency, get us between pulses
delay_per_pulseset=frequency_to_delay(mm_to_steps(config.GOTO_RATE))
# -DEBUG-
pulse_count=0
# Add pulses until correct no. of steps is achieved.
while (x > 0) or (y > 0):
# Decrement
if x > 0:
x -= 1
if y > 0:
y -= 1
# Add the pulse for the steppers
self._pulse_steppers(x > 0, y > 0, delay_per_pulseset)
pulse_count += 1
logger.debug("GOTO generated {} pulses".format(pulse_count))
# Update location
self.location=point
def penup(self):
'''Set servo to move pen up'''
self.wait_till_idle()
self.pi.set_servo_pulsewidth(config.PEN_SERVO, config.PEN_UP_PULSE)
time.sleep(config.PEN_MOVE_DELAY)
def pendown(self):
'''Set servo to move pen down'''
self.wait_till_idle()
self.pi.set_servo_pulsewidth(config.PEN_SERVO, config.PEN_DOWN_PULSE)
time.sleep(config.PEN_MOVE_DELAY)
def zero_me(self):
'''Zero the plotter(move it to home)'''
# The delay in us
delay=frequency_to_delay(mm_to_steps(config.ZERO_RATE))
# 1 Second of waves are generated at a time(just in case)
safety_pregen_no=mm_to_steps(config.ZERO_RATE)
# For each axis
for is_x, triggerp, extent, endinverted in [
[True,
config.X_MIN, config.X_EXTENT, config.X_END_INVERTED],
[False, config.Y_MIN, config.Y_EXTENT, config.Y_END_INVERTED]
]:
self.wait_till_idle()
theoretical_maximum_steps = mm_to_steps(extent)
steps_moved = 0
while steps_moved < theoretical_maximum_steps:
# This limits just in case of malfunction (inversion for endstops -> high when pressed)
endstop = (self.gpio.read(triggerp) == 1) == endinverted
if endstop:
self.stop()
logger.info("ZERO AXIS SUCCESS")
break
if not self.dma.is_active():
self._set_direction(False, False)
for _ in range(safety_pregen_no):
steps_moved += 1
self._pulse_steppers(is_x, not is_x, delay)
# We are now zeroed.
self.location=Point(0, 0)
def draw_line(self, start, finish, rate):
logger.info("LINE from {} to {}".format(str(start), str(finish)))
# ensure at start point
if self.location != start:
self.goto(start)
# Get no. steps to endpoint
x, y=self._get_steps_to(finish)
dirx=diry=True
# If steps are negative, change direction and make steps positive
if (x < 0):
dirx=False
x=abs(x)
if (y < 0):
diry=False
y=abs(y)
# Add the pulse for setting the direction
self._set_direction(dirx, diry)
# generate pulses
pulse_count=self._generate_line_pulses((x, y), mm_to_steps(rate))
logger.debug("LINE generated {} pulses".format(pulse_count))
self.location=finish
def _generate_line_pulses(self, steps, rate):
x=y=0
dx, dy=steps
fxy=dx - dy
delay=frequency_to_delay(rate)
# -DEBUG-
pulse_count=0
while((x != dx) or (y != dy)):
pulse_count += 1
# The endpoint has not been reached
# Works along the line in zigzag, going towards 'ideal' until achieved
# and then switching to other axis
if fxy > 0:
self._pulse_steppers(True, False, delay)
x += 1
fxy -= dy
else:
self._pulse_steppers(False, True, delay)
y += 1
fxy += dx
return pulse_count
def setup_pins(self):
# Stepper driver pins are outputs
self.gpio.init(config.X_DIR, GPIO.MODE_OUTPUT)
self.gpio.init(config.Y_DIR, GPIO.MODE_OUTPUT)
self.gpio.init(config.X_STEP, GPIO.MODE_OUTPUT)
self.gpio.init(config.Y_STEP, GPIO.MODE_OUTPUT)
# Set servo to default pen up position.
self.gpio.init(config.PEN_SERVO, GPIO.MODE_OUTPUT)
self.pi.set_servo_pulsewidth(config.PEN_SERVO, config.PEN_UP_PULSE)
# Disable steppers for now
self.gpio.init(config.ENABLE_STEPPER, GPIO.MODE_OUTPUT)
self.gpio.set(config.ENABLE_STEPPER)
```
#### File: Jovascript/DrawPy/webserver.py
```python
from flask import Flask, render_template, send_file
from flask_socketio import SocketIO, emit
from drawpi.svgreader import slice, parse
from threading import Thread
import logging
from drawpi.runner import main
app = Flask(__name__)
app.config['SECRET_KEY'] = 'TopSecretMagic'
socketio = SocketIO(app)
thread = None
class RunningThread(Thread):
def __init__(self, commands, *args, **kwargs):
super().__init__(*args, **kwargs)
self.commands = commands
def run(self):
try:
main(self.commands)
socketio.emit("StatusUpdate", {"error": False, "text":"Draw Completed Successfully"})
except Exception as e:
logging.error(str(e))
socketio.emit("StatusUpdate", {"error": True, "text":"The application errored: "+ str(e)})
socketio.emit("NewStatus", "ready")
class LogSenderHandler(logging.Handler):
def emit(self, record):
log = self.format(record)
socketio.emit("CommandOutput", log)
return True
@app.route("/")
def index():
return send_file("site/index.html")
@socketio.on("SVGLoad")
def load_svg(text):
emit("SVGProcessed", slice(parse(text)))
@socketio.on("RunCommands")
def run_commands(commands):
global thread
if thread is None or not thread.is_alive():
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG,
handlers=[LogSenderHandler()]
)
thread = RunningThread(commands)
thread.start()
socketio.emit("NewStatus", "running")
emit("StatusUpdate", {"error": False, "text":"Draw has begun"})
else:
emit("StatusUpdate", {"error": True, "text":"Cannot Start while Running"})
def run():
socketio.run(app)
if __name__ == "__main__":
run()
``` |
{
"source": "Jovascript/laggard-parser",
"score": 3
} |
#### File: laggard-parser/laggard/abstracts.py
```python
from typing import List, Callable
from laggard import Buffer
from laggard.exceptions import ParseException
from laggard import helpers
class Parser:
def __init__(self, source: str):
self.source = source
self.buffer = self._get_buffer(source)
self.stack: List[str] = []
self.error_stack: List[ParseException] = []
self._mark_name: str = None
def _get_buffer(self, source:str) -> Buffer:
return Buffer(source)
def parse(self):
"""
Begin the parse.
If the parse is unsuccessful, it will throw ParseException.
Ensures that the end of the string provided is reached.
Returns:
The result of the start rule.
"""
result = self.parse_start()
if not self.buffer.is_eof():
raise ParseException("Did not consume whole file.")
return result
def parse_start(self):
raise NotImplementedError
def expect(self, literal:str):
"""
Attempts to parse the given literal. Will skip until the first char, and then no more.
Args:
literal: The literal to match
Returns:
The literal matched
"""
return helpers.expect(self.buffer, literal)
def expectOneOf(self, charset: List[str], skip: bool = True):
"""
Attempts to parse a character from charset.
Args:
charset: The characters to accept
skip: Whether it should skip the specified characters in buffer.
Returns:
The char matched
"""
return helpers.expectOneOf(self.buffer, charset, skip)
def expectManyOutOf(self, charset: List[str]):
"""
Attempts to greedily parse characters from charset. It will parse at least one, or error.
It will skip the specified chars until the first matching character, and then it will cease skipping.
Args:
charset: The characters to accept
Returns:
A string of the characters it managed to parse.
"""
return helpers.expectManyOutOf(self.buffer, charset)
def parseMultipleOf(self, parser: Callable, accept_none: bool = False):
"""
Attempts to parse many of the provided parser rule.
Args:
parser: The callable which parses the rule
accept_none: Whether it raises if it cannot parse even 1 of the rule
Returns:
A list of the parses it managed to do
"""
return helpers.parseMultipleOf(self.buffer, parser, accept_none)
def parseUntil(self, charset: List[str]):
"""
Parses characters until it finds a character in charset
Args:
charset: The characters to look out for
Returns:
A string of all characters parsed
"""
return helpers.parseUntil(self.buffer, charset)
def __call__(self, name: str):
"""
Allows the context manager setion to be marked with the name of the rule, for easier debugging.
Args:
name: The marker name
Returns:
Self
"""
self._mark_name = name
return self
def __enter__(self):
self.stack.append(self._mark_name)
self.buffer.mark()
self._mark_name = None
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.error_stack.append()
self.buffer.abandon()
else:
self.buffer.commit()
```
#### File: laggard-parser/laggard/buffer.py
```python
from collections import namedtuple
from typing import List, Union
from laggard.exceptions import ParseException
from laggard.infoholders import TextPosition
StackEntry = namedtuple("StackEntry", ["pos", "name"])
class Buffer:
"""
Represents the incoming stream of characters for the parser, provided by a source string.
An instance of :class:`Buffer` is a `context manager <http://book.pythontips.com/en/latest/context_managers.html#context-managers>`_.
Examples:
To parse the string "hello"::
buf = Buffer("hello")
# Execute a parser with buf now
In a parsing function, to make the buffer revert if a :class:`~laggard.exceptions.ParseException` occurs, simply use buffer as a context manager::
with buffer:
if is_bad_text():
raise ParseException
else:
return "good text :)"
If the function returns successfully, the buffer will be moved forward(:meth:`commit`), otherwise it will :meth:`abandon`.
"""
def __init__(self, source: str, skip: List = []):
"""
Args:
source: A string, which is used as the text to be parsed.
skip: A list of the characters which the buffer will skip; they will not appear in the results of :meth:`fetch`.
"""
self.source = source
self.stack: List[int] = []
self.current_index = 0
self.skip = skip
def _get_position_from_index(self, index):
# Line number should start at 1
line_number = self.source.count("\n", 0, index) + 1
column_number = index - self.source.rfind("\n", 0, index)
return TextPosition(line_number, column_number)
@property
def last_pos(self) -> TextPosition:
"""The last position the stack was located."""
if len(self.stack):
return self._get_position_from_index(self.stack[-1])
else:
return TextPosition(0, 0)
@property
def current_pos(self) -> TextPosition:
"""The current position of the buffer, as a tuple (line number, column number)"""
return self._get_position_from_index(self.current_index)
def fetch_char(self, skip=True) -> str:
try:
while True:
x = self.source[self.current_index]
self.current_index += 1
if x not in self.skip or (not skip):
return x
except IndexError:
return "[EOF]"
def fetch(self, count: int = 1, skip: Union[str, bool] = True) -> str:
"""
Fetches the next section from the buffer
Args:
count: Length of required string
skip: Whether the Buffer's skip property should be respected.
Returns:
A string of specified length
"""
retval = ""
# Skip until the first char
if skip == "initial":
retval += self.fetch_char(True)
count -= 1
skip = False
for i in range(count):
retval += self.fetch_char(skip)
return retval
def peek(self, count: int = 1, skip: bool = True) -> str:
self.mark()
try:
return self.fetch(count, skip)
finally:
self.abandon()
def mark(self):
"""
Adds the current location to the stack.
"""
self.stack.append(self.current_index)
print(len(self.stack))
def abandon(self):
"""
Reverts to last recorded position on the stack
"""
self.current_index = self.stack.pop()
def commit(self):
"""
Progresses on the buffer.
"""
self.stack.pop()
def cry(self, message: str):
"""Raises a parse error with the location detailed.
Args:
message: The simple parse message, not a full sentence.
"""
raise ParseException("Failed to parse: {} at {}".format(message, self.last_pos))
def __enter__(self):
self.mark()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.abandon()
else:
self.commit()
def is_eof(self):
print(self.current_index)
return self.current_index + 1 >= len(self.source)
``` |
{
"source": "Jovascript/pseudo-fun",
"score": 2
} |
#### File: pseudofun/pseudocode/python_gen.py
```python
from tatsu.codegen import ModelRenderer
from tatsu.codegen import CodeGenerator
from tatsu.synth import synthesize
from tatsu.semantics import Node
import sys
from tatsu.model import ModelBuilderSemantics
symbol_conversions = {
'AND': 'and',
'OR': 'or',
'<>': '!=',
'=': '=='
}
class PseudoPythonSemantics(ModelBuilderSemantics):
def SYMBOL(self, ast):
if ast in symbol_conversions:
ast = symbol_conversions[ast]
return ast
THIS_MODULE = sys.modules[__name__]
class PythonCodeGenerator(CodeGenerator):
def __init__(self):
super(PythonCodeGenerator, self).__init__(modules=[THIS_MODULE])
class Script(ModelRenderer):
template = '''{lines::\n:}'''
class Block(ModelRenderer):
template = '''{lines:1:\n:}'''
class ForStatement(ModelRenderer):
template = '''\
for {identifier} in range(int({start}), int({end})+1):
{body}
'''
class WhileStatement(ModelRenderer):
template = '''\
while {condition}:
{body}
'''
class RepeatStatement(ModelRenderer):
template = '''\
while True:
{body}
if {condition}:
break
'''
class ConditionalStatement(ModelRenderer):
template = '''\
if {condition}:
{body}
'''
class AssignmentStatement(ModelRenderer):
template = '''\
{identifier} = {expression}'''
class Identifier(ModelRenderer):
template = '''\
{name}'''
class SumExpression(ModelRenderer):
template = '''\
{left} {operator} {right}\
'''
class BracketedExpression(ModelRenderer):
template = '''\
({expression})\
'''
class FunctionCall(ModelRenderer):
template = '''\
{name}{arguments}'''
class ArgumentList(ModelRenderer):
template = '''({arguments::,:})'''
class FunctionDef(ModelRenderer):
template = '''\
def {name}{arguments}:
{body}
'''
class ReturnStatement(ModelRenderer):
template = '''return {expression}'''
``` |
{
"source": "jovechina/ABBYY-Cloud-OCR-PoC",
"score": 2
} |
#### File: ABBYY-Cloud-OCR-PoC/abbyy_cloud/cloud_api.py
```python
import shutil
import requests
import json
from utils.env_variables import Endpoint, Auth
from logging import getLogger
logger = getLogger(__name__)
class ABBYYCloudAPI:
"""
Call abbyy cloud ocr api with project id and pwd
"""
image = None
setting = None
app_id = Auth.abbyy_appid
app_pwd = <PASSWORD>
cloud_api = Endpoint.abbyy_api
def __init__(self, image, setting):
self.image = image
self.setting = setting
def text_detection(self):
url_params = {
"language": self.setting.Language,
"exportFormat": self.setting.OutputFormat
}
request_url = self.get_request_url("processImage")
response = requests.post(request_url, data=self.image, params=url_params,
auth=(self.app_id, self.app_pwd))
task = self.decode_response(response.text)
return task
def get_task_status(self, task):
if task.Id.find('00000000-0') != -1:
# GUID_NULL is being passed. This may be caused by a logical error in the calling code
logger.error("Null task id passed")
return None
url_params = {"taskId": task.Id}
status_url = self.get_request_url("getTaskStatus")
response = requests.get(status_url, params=url_params,
auth=(self.app_id, self.app_pwd), )
task = self.decode_response(response.text)
return task
def decode_response(self, json_response):
""" Decode json response of the server. Return Task object """
# logger.debug("json_response:{}".format(json_response))
response_value = json.loads(json_response)
task = Task()
task.Id = response_value["taskId"]
task.Status = response_value["status"]
if task.Status == "Completed":
task.DownloadUrl = response_value["resultUrls"][0]
return task
def download_result(self, task, output_path):
get_result_url = task.DownloadUrl
if get_result_url is None:
print("No download URL found")
return
file_response = requests.get(get_result_url, stream=True)
with open(output_path, 'wb') as output_file:
shutil.copyfileobj(file_response.raw, output_file)
def get_request_url(self, url):
return self.cloud_api.strip('/') + '/' + url.strip('/')
class Task:
Status = "Unknown"
Id = None
DownloadUrl = None
def is_active(self):
if self.Status == "InProgress" or self.Status == "Queued":
return True
else:
return False
class ProcessingSettings:
Language = "Japanese, English"
OutputFormat = "docx"
``` |
{
"source": "jovechina/google-ocr-demo",
"score": 3
} |
#### File: google-ocr-demo/response_handler/res_data_conversion.py
```python
from logging import getLogger
import re
from pykakasi import kakasi
import jaconv
logger = getLogger(__name__)
class DataConversion:
def __init__(self, row_list, description):
self.kks = kakasi()
self.data = row_list
self.description = description
self.result = {}
def data_conversion(self):
self.fetch_by_field_name()
def fetch_by_field_name(self):
"""
fetch value data from pre-defined label
assume one contain one label
remove "break" of not last label in case more than one labels in same row
"""
breakflag = 0
for rn in range(len(self.data)):
# post code
if "〒" in self.data[rn]:
post_code = self.data[rn]
post_code = post_code.partition("〒")[2]
breakflag += 1
self.result_update("post_code", post_code)
# ITEM2
# ITEM3
# ITEM4
# when all item completed, exit the loop
if breakflag == 1:
break
def result_update(self, str_key, str_value):
result = self.result
logger.debug("result to be update, key: {}, value: {}".format(str_key, str_value))
if str_value:
if not result.__contains__(str_key):
result.update({str_key: str_value})
elif str_value != result[str_key]:
if str_value:
result.update({str_key: str_value})
self.result = result
```
#### File: jovechina/google-ocr-demo/test_google_ocr.py
```python
import unittest
import yaml
from google_vision.rest_api import GoogleVisionRestAPI
from google_vision.vision_client import GoogleVisionClient
from response_handler.res_data_conversion import DataConversion
from response_handler.vision_res_data_wrapper import VisionResDataWrapper
from utils.file_utils import FileUtils
from logging import getLogger, config
logger = getLogger(__name__)
class RestAPITestCase(unittest.TestCase):
def test_fail_case_1(self):
file_name = "<file_path>.jpg"
data_convert = self.__data_convert(file_name)
self.assertEqual("<value>", data_convert.result["<item>"])
def __data_convert(self, file_name):
# google client api
# content = FileUtils.load_image(file_name=file_name)
# vision_client = GoogleVisionClient(content)
# res_data = vision_client.text_detection()
# rest api
rest_api = GoogleVisionRestAPI(FileUtils.load_image_b64(file_name=file_name))
res_data = rest_api.document_text_detection()
wrapper = VisionResDataWrapper(res_data)
data_convert = DataConversion(wrapper.txt_2_column_list_by_row, wrapper.description)
data_convert.data_conversion()
return data_convert
if __name__ == '__main__':
config.dictConfig(yaml.load(open("logging.yaml").read(), Loader=yaml.SafeLoader))
unittest.main()
``` |
{
"source": "JoveH-H/path-following-control",
"score": 3
} |
#### File: path-following-control/controller/incremental_pid.py
```python
import numpy as np
import copy
class CU:
'''
增量式PID控制器
'''
def __init__(self, kp, ki, kd):
'''
增量式PID控制器初始化
参数:
kp: 比例系数
ki: 积分系数
kd: 微分系数
'''
self.kp = kp
self.ki = ki
self.kd = kd
self.ep = 0.0
self.ei = 0.0
self.ed = 0.0
def update_e(self, e):
'''
更新偏差
参数:
e: 测量值与给定值之间的差
'''
self.ed = e - self.ep
self.ei = e + self.ep
self.ep = copy.deepcopy(e)
def get_ut(self):
'''
更新给定值变化量
返回:
ut: 给定值变化量
'''
ut = self.kp * self.ep + self.ki * self.ei + self.kd * self.ed
if ut > np.pi / 5:
ut = np.pi / 5
elif ut < -np.pi / 5:
ut = -np.pi / 5
return ut
if __name__ == "__main__":
input("位置式PID控制器,任意键退出")
```
#### File: path-following-control/controller/pure_pursuit.py
```python
import numpy as np
import math
class CU:
'''
纯跟踪控制器
'''
def __init__(self, ld):
'''
纯跟踪控制器初始化
参数:
ld: 预瞄距离
'''
self.ld = ld
def get_deltat(self, model, alpha):
'''
获取前轮转角
参数:
model: 被控模型
alpha: 期望角度
'''
delta = math.atan2(2.0 * model.l * np.sin(alpha - model.theta) / self.ld, 1)
if delta > np.pi / 5:
delta = np.pi / 5
elif delta < -np.pi / 5:
delta = -np.pi / 5
return delta
if __name__ == "__main__":
input("纯跟踪控制器,任意键退出")
``` |
{
"source": "joveh/qsim",
"score": 2
} |
#### File: qsim/qsimcirq/qsimh_simulator.py
```python
from typing import Union, Sequence
from cirq import study, ops, protocols, circuits, value, SimulatesAmplitudes
from qsimcirq import qsim
import qsimcirq.qsim_circuit as qsimc
class QSimhSimulator(SimulatesAmplitudes):
def __init__(self, qsimh_options: dict = {}):
self.qsimh_options = {'t': 1, 'f': 2, 'v': 0}
self.qsimh_options.update(qsimh_options)
def compute_amplitudes_sweep(
self,
program: circuits.Circuit,
bitstrings: Sequence[int],
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> Sequence[Sequence[complex]]:
if not isinstance(program, qsimc.QSimCircuit):
program = qsimc.QSimCircuit(program, device=program.device)
n_qubits = len(program.all_qubits())
# qsim numbers qubits in reverse order from cirq
bitstrings = [format(bitstring, 'b').zfill(n_qubits)[::-1]
for bitstring in bitstrings]
options = {'i': '\n'.join(bitstrings)}
options.update(self.qsimh_options)
param_resolvers = study.to_resolvers(params)
trials_results = []
for prs in param_resolvers:
solved_circuit = protocols.resolve_parameters(program, prs)
options['c'] = solved_circuit.translate_cirq_to_qsim(qubit_order)
options.update(self.qsimh_options)
amplitudes = qsim.qsimh_simulate(options)
trials_results.append(amplitudes)
return trials_results
```
#### File: qsim/qsimcirq_tests/qsimcirq_test.py
```python
import numpy as np
import sympy
import unittest
import cirq
import qsimcirq
class MainTest(unittest.TestCase):
def test_cirq_too_big_gate(self):
# Pick qubits.
a, b, c, d, e, f, g = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(0, 2),
cirq.GridQubit(1, 0),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 2),
cirq.GridQubit(2, 0),
]
# Create a circuit with a gate larger than 6 qubits.
cirq_circuit = cirq.Circuit(cirq.IdentityGate(7).on(a, b, c, d, e, f, g))
qsimSim = qsimcirq.QSimSimulator()
with self.assertRaises(NotImplementedError):
qsimSim.compute_amplitudes(cirq_circuit, bitstrings=[0b0, 0b1])
def test_cirq_qsim_simulate(self):
# Pick qubits.
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 0)
]
# Create a circuit
cirq_circuit = cirq.Circuit(
cirq.X(a)**0.5, # Square root of X.
cirq.Y(b)**0.5, # Square root of Y.
cirq.Z(c), # Z.
cirq.CZ(a, d) # ControlZ.
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.compute_amplitudes(
cirq_circuit, bitstrings=[0b0100, 0b1011])
assert np.allclose(result, [0.5j, 0j])
def test_cirq_qsim_simulate_fullstate(self):
# Pick qubits.
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 0)
]
# Create a circuit.
cirq_circuit = cirq.Circuit(
cirq.Moment([
cirq.X(a)**0.5, # Square root of X.
cirq.H(b), # Hadamard.
cirq.X(c), # X.
cirq.H(d), # Hadamard.
]),
cirq.Moment([
cirq.X(a)**0.5, # Square root of X.
cirq.CX(b, c), # ControlX.
cirq.S(d), # S (square root of Z).
]),
cirq.Moment([
cirq.I(a),
cirq.ISWAP(b, c),
])
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=[a, b, c, d])
assert result.state_vector().shape == (16,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=[a, b, c, d])
# When using rotation gates such as S, qsim may add a global phase relative
# to other simulators. This is fine, as the result is equivalent.
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector())
def test_cirq_qsim_run(self):
# Pick qubits.
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 0)
]
# Create a circuit
cirq_circuit = cirq.Circuit(
cirq.X(a)**0.5, # Square root of X.
cirq.Y(b)**0.5, # Square root of Y.
cirq.Z(c), # Z.
cirq.CZ(a, d), # ControlZ.
# measure qubits
cirq.measure(a, key='ma'),
cirq.measure(b, key='mb'),
cirq.measure(c, key='mc'),
cirq.measure(d, key='md'),
)
qsimSim = qsimcirq.QSimSimulator()
assert isinstance(qsimSim, cirq.SimulatesSamples)
result = qsimSim.run(cirq_circuit, repetitions=5)
for key, value in result.measurements.items():
assert(value.shape == (5, 1))
def test_qsim_run_vs_cirq_run(self):
# Simple circuit, want to check mapping of qubit(s) to their measurements
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 0),
cirq.GridQubit(1, 1),
]
circuit = cirq.Circuit(
cirq.X(b),
cirq.CX(b, d),
cirq.measure(a, b, c, key='mabc'),
cirq.measure(d, key='md'),
)
# run in cirq
simulator = cirq.Simulator()
cirq_result = simulator.run(circuit, repetitions=20)
# run in qsim
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.run(circuit, repetitions=20)
# are they the same?
assert(qsim_result == cirq_result)
def test_intermediate_measure(self):
# Demonstrate that intermediate measurement is possible.
a, b = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
]
circuit = cirq.Circuit(
cirq.X(a), cirq.CX(a, b), cirq.measure(a, b, key='m1'),
cirq.CZ(a, b), cirq.measure(a, b, key='m2'),
cirq.X(a), cirq.CX(a, b), cirq.measure(a, b, key='m3'),
# Trailing gates with no measurement do not affect results.
cirq.H(a), cirq.H(b),
)
simulator = cirq.Simulator()
cirq_result = simulator.run(circuit, repetitions=20)
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.run(circuit, repetitions=20)
assert(qsim_result == cirq_result)
def test_sampling_nondeterminism(self):
# Ensure that reusing a QSimSimulator doesn't reuse the original seed.
q = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.H(q), cirq.measure(q, key='m'))
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.run(circuit, repetitions=100)
result_counts = qsim_result.histogram(key='m')
assert(result_counts[0] > 1)
assert(result_counts[1] > 1)
def test_matrix1_gate(self):
q = cirq.LineQubit(0)
m = np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5)
cirq_circuit = cirq.Circuit(cirq.MatrixGate(m).on(q))
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit)
assert result.state_vector().shape == (2,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector())
def test_matrix2_gate(self):
qubits = cirq.LineQubit.range(2)
m = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
cirq_circuit = cirq.Circuit(cirq.MatrixGate(m).on(*qubits))
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (4,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector())
def test_big_matrix_gates(self):
qubits = cirq.LineQubit.range(3)
# Toffoli gate as a matrix.
m = np.array([
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
])
cirq_circuit = cirq.Circuit(
cirq.H(qubits[0]), cirq.H(qubits[1]),
cirq.MatrixGate(m).on(*qubits),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (8,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector())
def test_decompose_to_matrix_gates(self):
class UnknownThreeQubitGate(cirq.ops.Gate):
"""This gate is not recognized by qsim, and cannot be decomposed.
qsim should attempt to convert it to a MatrixGate to resolve the issue.
"""
def __init__(self):
pass
def _num_qubits_(self):
return 3
def _qid_shape_(self):
return (2, 2, 2)
def _unitary_(self):
# Toffoli gate as a matrix.
return np.array([
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
])
qubits = cirq.LineQubit.range(3)
cirq_circuit = cirq.Circuit(
cirq.H(qubits[0]), cirq.H(qubits[1]),
UnknownThreeQubitGate().on(*qubits),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (8,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector())
def test_basic_controlled_gate(self):
qubits = cirq.LineQubit.range(3)
cirq_circuit = cirq.Circuit(
cirq.H(qubits[1]), cirq.Y(qubits[2]),
cirq.X(qubits[0]).controlled_by(qubits[1]),
cirq.CX(*qubits[1:]).controlled_by(qubits[0]),
cirq.H(qubits[1]).controlled_by(qubits[0], qubits[2]),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (8,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector())
def test_controlled_matrix_gates(self):
qubits = cirq.LineQubit.range(4)
m1 = np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5)
m2 = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
cirq_circuit = cirq.Circuit(
cirq.MatrixGate(m1).on(qubits[0]).controlled_by(qubits[3]),
cirq.MatrixGate(m2).on(*qubits[1:3]).controlled_by(qubits[0]),
cirq.MatrixGate(m1).on(qubits[2]).controlled_by(qubits[0], qubits[1],
qubits[3]),
cirq.MatrixGate(m2).on(qubits[0], qubits[3]).controlled_by(*qubits[1:3]),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (16,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector())
def test_control_values(self):
qubits = cirq.LineQubit.range(3)
cirq_circuit = cirq.Circuit(
# Controlled by |01) state on qubits 1 and 2
cirq.X(qubits[0]).controlled_by(*qubits[1:], control_values=[0, 1]),
# Controlled by either |0) or |1) on qubit 0 (i.e., uncontrolled)
cirq.X(qubits[1]).controlled_by(qubits[0], control_values=[(0, 1)]),
# Controlled by |10) state on qubits 0 and 1
cirq.X(qubits[2]).controlled_by(qubits[1], qubits[0],
control_values=[0, 1]),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (8,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector())
qubits = cirq.LineQid.for_qid_shape([2, 3, 2])
cirq_circuit = cirq.Circuit(
# Controlled by |12) state on qubits 0 and 1
# Since qsim does not support qudits (yet), this gate is omitted.
cirq.X(qubits[2]).controlled_by(*qubits[:2], control_values=[1, 2]),
)
qsimSim = qsimcirq.QSimSimulator()
with self.assertWarnsRegex(RuntimeWarning,
'Gate has no valid control value'):
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector()[0] == 1
def test_decomposable_gate(self):
qubits = cirq.LineQubit.range(4)
# The Toffoli gate (CCX) decomposes into multiple qsim-supported gates.
cirq_circuit = cirq.Circuit(
cirq.H(qubits[0]),
cirq.H(qubits[1]),
cirq.Moment(
cirq.CCX(*qubits[:3]),
cirq.H(qubits[3]),
),
cirq.H(qubits[2]),
cirq.H(qubits[3]),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (16,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
# Decomposition may result in gates which add a global phase.
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector())
def test_complicated_decomposition(self):
qubits = cirq.LineQubit.range(4)
# The QFT gate decomposes cleanly into the qsim gateset.
cirq_circuit = cirq.Circuit(
cirq.QuantumFourierTransformGate(4).on(*qubits))
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (16,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
# Decomposition may result in gates which add a global phase.
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector())
def test_multi_qubit_fusion(self):
q0, q1, q2, q3 = cirq.LineQubit.range(4)
qubits = [q0, q1, q2, q3]
cirq_circuit = cirq.Circuit(
cirq.CX(q0, q1), cirq.X(q2)**0.5, cirq.Y(q3)**0.5,
cirq.CX(q0, q2), cirq.T(q1), cirq.T(q3),
cirq.CX(q1, q2), cirq.X(q3)**0.5, cirq.Y(q0)**0.5,
cirq.CX(q1, q3), cirq.T(q0), cirq.T(q2),
cirq.CX(q2, q3), cirq.X(q0)**0.5, cirq.Y(q1)**0.5,
)
qsimSim = qsimcirq.QSimSimulator(qsim_options={'f': 2})
result_2q_fusion = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
qsimSim = qsimcirq.QSimSimulator(qsim_options={'f': 4})
result_4q_fusion = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result_2q_fusion.state_vector(), result_4q_fusion.state_vector())
def test_cirq_qsim_simulate_random_unitary(self):
q0, q1 = cirq.LineQubit.range(2)
qsimSim = qsimcirq.QSimSimulator(qsim_options={'t': 16, 'v': 0})
for iter in range(10):
random_circuit = cirq.testing.random_circuit(qubits=[q0, q1],
n_moments=8,
op_density=0.99,
random_state=iter)
cirq.ConvertToCzAndSingleGates().optimize_circuit(random_circuit) # cannot work with params
cirq.ExpandComposite().optimize_circuit(random_circuit)
result = qsimSim.simulate(random_circuit, qubit_order=[q0, q1])
assert result.state_vector().shape == (4,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(random_circuit, qubit_order=[q0, q1])
# When using rotation gates such as S, qsim may add a global phase relative
# to other simulators. This is fine, as the result is equivalent.
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(),
cirq_result.state_vector(),
atol = 1.e-6
)
def test_cirq_qsimh_simulate(self):
# Pick qubits.
a, b = [cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)]
# Create a circuit
cirq_circuit = cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a), cirq.X(a))
qsimh_options = {'k': [0], 'w': 0, 'p': 1, 'r': 1}
qsimhSim = qsimcirq.QSimhSimulator(qsimh_options)
result = qsimhSim.compute_amplitudes(
cirq_circuit, bitstrings=[0b00, 0b01, 0b10, 0b11])
assert np.allclose(result, [0j, 0j, (1 + 0j), 0j])
def test_cirq_qsim_params(self):
qubit = cirq.GridQubit(0,0)
circuit = cirq.Circuit(cirq.X(qubit)**sympy.Symbol("beta"))
params = cirq.ParamResolver({'beta': 0.5})
simulator = cirq.Simulator()
cirq_result = simulator.simulate(circuit, param_resolver = params)
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.simulate(circuit, param_resolver = params)
assert cirq.linalg.allclose_up_to_global_phase(
qsim_result.state_vector(), cirq_result.state_vector())
def test_cirq_qsim_all_supported_gates(self):
q0 = cirq.GridQubit(1, 1)
q1 = cirq.GridQubit(1, 0)
q2 = cirq.GridQubit(0, 1)
q3 = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.Moment([
cirq.H(q0),
cirq.H(q1),
cirq.H(q2),
cirq.H(q3),
]),
cirq.Moment([
cirq.T(q0),
cirq.T(q1),
cirq.T(q2),
cirq.T(q3),
]),
cirq.Moment([
cirq.CZPowGate(exponent=0.7, global_shift=0.2)(q0, q1),
cirq.CXPowGate(exponent=1.2, global_shift=0.4)(q2, q3),
]),
cirq.Moment([
cirq.XPowGate(exponent=0.3, global_shift=1.1)(q0),
cirq.YPowGate(exponent=0.4, global_shift=1)(q1),
cirq.ZPowGate(exponent=0.5, global_shift=0.9)(q2),
cirq.HPowGate(exponent=0.6, global_shift=0.8)(q3),
]),
cirq.Moment([
cirq.CX(q0, q2),
cirq.CZ(q1, q3),
]),
cirq.Moment([
cirq.X(q0),
cirq.Y(q1),
cirq.Z(q2),
cirq.S(q3),
]),
cirq.Moment([
cirq.XXPowGate(exponent=0.4, global_shift=0.7)(q0, q1),
cirq.YYPowGate(exponent=0.8, global_shift=0.5)(q2, q3),
]),
cirq.Moment([
cirq.I(q0),
cirq.I(q1),
cirq.IdentityGate(2)(q2, q3)
]),
cirq.Moment([
cirq.rx(0.7)(q0),
cirq.ry(0.2)(q1),
cirq.rz(0.4)(q2),
cirq.PhasedXPowGate(
phase_exponent=0.8, exponent=0.6, global_shift=0.3)(q3),
]),
cirq.Moment([
cirq.ZZPowGate(exponent=0.3, global_shift=1.3)(q0, q2),
cirq.ISwapPowGate(exponent=0.6, global_shift=1.2)(q1, q3),
]),
cirq.Moment([
cirq.XPowGate(exponent=0.1, global_shift=0.9)(q0),
cirq.YPowGate(exponent=0.2, global_shift=1)(q1),
cirq.ZPowGate(exponent=0.3, global_shift=1.1)(q2),
cirq.HPowGate(exponent=0.4, global_shift=1.2)(q3),
]),
cirq.Moment([
cirq.SwapPowGate(exponent=0.2, global_shift=0.9)(q0, q1),
cirq.PhasedISwapPowGate(phase_exponent = 0.8, exponent=0.6)(q2, q3),
]),
cirq.Moment([
cirq.PhasedXZGate(
x_exponent=0.2, z_exponent=0.3, axis_phase_exponent=1.4)(q0),
cirq.T(q1),
cirq.H(q2),
cirq.S(q3),
]),
cirq.Moment([
cirq.SWAP(q0, q2),
cirq.XX(q1, q3),
]),
cirq.Moment([
cirq.rx(0.8)(q0),
cirq.ry(0.9)(q1),
cirq.rz(1.2)(q2),
cirq.T(q3),
]),
cirq.Moment([
cirq.YY(q0, q1),
cirq.ISWAP(q2, q3),
]),
cirq.Moment([
cirq.T(q0),
cirq.Z(q1),
cirq.Y(q2),
cirq.X(q3),
]),
cirq.Moment([
cirq.FSimGate(0.3, 1.7)(q0, q2),
cirq.ZZ(q1, q3),
]),
cirq.Moment([
cirq.ry(1.3)(q0),
cirq.rz(0.4)(q1),
cirq.rx(0.7)(q2),
cirq.S(q3),
]),
cirq.Moment([
cirq.IdentityGate(4).on(q0, q1, q2, q3),
]),
cirq.Moment([
cirq.CCZPowGate(exponent=0.7, global_shift=0.3)(q2, q0, q1),
]),
cirq.Moment([
cirq.CCXPowGate(exponent=0.4, global_shift=0.6)(
q3, q1, q0).controlled_by(q2, control_values=[0]),
]),
cirq.Moment([
cirq.rx(0.3)(q0),
cirq.ry(0.5)(q1),
cirq.rz(0.7)(q2),
cirq.rx(0.9)(q3),
]),
cirq.Moment([
cirq.TwoQubitDiagonalGate([0.1, 0.2, 0.3, 0.4])(q0, q1),
]),
cirq.Moment([
cirq.ThreeQubitDiagonalGate([0.5, 0.6, 0.7, 0.8,
0.9, 1, 1.2, 1.3])(q1, q2, q3),
]),
cirq.Moment([
cirq.CSwapGate()(q0, q3, q1),
]),
cirq.Moment([
cirq.rz(0.6)(q0),
cirq.rx(0.7)(q1),
cirq.ry(0.8)(q2),
cirq.rz(0.9)(q3),
]),
cirq.Moment([
cirq.TOFFOLI(q3, q2, q0),
]),
cirq.Moment([
cirq.FREDKIN(q1, q3, q2),
]),
cirq.Moment([
cirq.MatrixGate(np.array([[0, -0.5 - 0.5j, -0.5 - 0.5j, 0],
[0.5 - 0.5j, 0, 0, -0.5 + 0.5j],
[0.5 - 0.5j, 0, 0, 0.5 - 0.5j],
[0, -0.5 - 0.5j, 0.5 + 0.5j, 0]]))(q0, q1),
cirq.MatrixGate(np.array([[0.5 - 0.5j, 0, 0, -0.5 + 0.5j],
[0, 0.5 - 0.5j, -0.5 + 0.5j, 0],
[0, -0.5 + 0.5j, -0.5 + 0.5j, 0],
[0.5 - 0.5j, 0, 0, 0.5 - 0.5j]]))(q2, q3),
]),
cirq.Moment([
cirq.MatrixGate(np.array([[1, 0], [0, 1j]]))(q0),
cirq.MatrixGate(np.array([[0, -1j], [1j, 0]]))(q1),
cirq.MatrixGate(np.array([[0, 1], [1, 0]]))(q2),
cirq.MatrixGate(np.array([[1, 0], [0, -1]]))(q3),
]),
cirq.Moment([
cirq.riswap(0.7)(q0, q1),
cirq.givens(1.2)(q2, q3),
]),
cirq.Moment([
cirq.H(q0),
cirq.H(q1),
cirq.H(q2),
cirq.H(q3),
]),
)
simulator = cirq.Simulator()
cirq_result = simulator.simulate(circuit)
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.simulate(circuit)
assert cirq.linalg.allclose_up_to_global_phase(
qsim_result.state_vector(), cirq_result.state_vector())
def test_cirq_qsim_global_shift(self):
q0 = cirq.GridQubit(1, 1)
q1 = cirq.GridQubit(1, 0)
q2 = cirq.GridQubit(0, 1)
q3 = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.Moment([
cirq.H(q0),
cirq.H(q1),
cirq.H(q2),
cirq.H(q3),
]),
cirq.Moment([
cirq.CXPowGate(exponent=1, global_shift=0.7)(q0, q1),
cirq.CZPowGate(exponent=1, global_shift=0.9)(q2, q3),
]),
cirq.Moment([
cirq.XPowGate(exponent=1, global_shift=1.1)(q0),
cirq.YPowGate(exponent=1, global_shift=1)(q1),
cirq.ZPowGate(exponent=1, global_shift=0.9)(q2),
cirq.HPowGate(exponent=1, global_shift=0.8)(q3),
]),
cirq.Moment([
cirq.XXPowGate(exponent=1, global_shift=0.2)(q0, q1),
cirq.YYPowGate(exponent=1, global_shift=0.3)(q2, q3),
]),
cirq.Moment([
cirq.ZPowGate(exponent=0.25, global_shift=0.4)(q0),
cirq.ZPowGate(exponent=0.5, global_shift=0.5)(q1),
cirq.YPowGate(exponent=1, global_shift=0.2)(q2),
cirq.ZPowGate(exponent=1, global_shift=0.3)(q3),
]),
cirq.Moment([
cirq.ZZPowGate(exponent=1, global_shift=0.2)(q0, q1),
cirq.SwapPowGate(exponent=1, global_shift=0.3)(q2, q3),
]),
cirq.Moment([
cirq.XPowGate(exponent=1, global_shift=0)(q0),
cirq.YPowGate(exponent=1, global_shift=0)(q1),
cirq.ZPowGate(exponent=1, global_shift=0)(q2),
cirq.HPowGate(exponent=1, global_shift=0)(q3),
]),
cirq.Moment([
cirq.ISwapPowGate(exponent=1, global_shift=0.3)(q0, q1),
cirq.ZZPowGate(exponent=1, global_shift=0.5)(q2, q3),
]),
cirq.Moment([
cirq.ZPowGate(exponent=0.5, global_shift=0)(q0),
cirq.ZPowGate(exponent=0.25, global_shift=0)(q1),
cirq.XPowGate(exponent=0.9, global_shift=0)(q2),
cirq.YPowGate(exponent=0.8, global_shift=0)(q3),
]),
cirq.Moment([
cirq.CZPowGate(exponent=0.3, global_shift=0)(q0, q1),
cirq.CXPowGate(exponent=0.4, global_shift=0)(q2, q3),
]),
cirq.Moment([
cirq.ZPowGate(exponent=1.3, global_shift=0)(q0),
cirq.HPowGate(exponent=0.8, global_shift=0)(q1),
cirq.XPowGate(exponent=0.9, global_shift=0)(q2),
cirq.YPowGate(exponent=0.4, global_shift=0)(q3),
]),
cirq.Moment([
cirq.XXPowGate(exponent=0.8, global_shift=0)(q0, q1),
cirq.YYPowGate(exponent=0.6, global_shift=0)(q2, q3),
]),
cirq.Moment([
cirq.HPowGate(exponent=0.7, global_shift=0)(q0),
cirq.ZPowGate(exponent=0.2, global_shift=0)(q1),
cirq.YPowGate(exponent=0.3, global_shift=0)(q2),
cirq.XPowGate(exponent=0.7, global_shift=0)(q3),
]),
cirq.Moment([
cirq.ZZPowGate(exponent=0.1, global_shift=0)(q0, q1),
cirq.SwapPowGate(exponent=0.6, global_shift=0)(q2, q3),
]),
cirq.Moment([
cirq.XPowGate(exponent=0.4, global_shift=0)(q0),
cirq.YPowGate(exponent=0.3, global_shift=0)(q1),
cirq.ZPowGate(exponent=0.2, global_shift=0)(q2),
cirq.HPowGate(exponent=0.1, global_shift=0)(q3),
]),
cirq.Moment([
cirq.ISwapPowGate(exponent=1.3, global_shift=0)(q0, q1),
cirq.CXPowGate(exponent=0.5, global_shift=0)(q2, q3),
]),
cirq.Moment([
cirq.H(q0),
cirq.H(q1),
cirq.H(q2),
cirq.H(q3),
]),
)
simulator = cirq.Simulator()
cirq_result = simulator.simulate(circuit)
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.simulate(circuit)
assert cirq.linalg.allclose_up_to_global_phase(
qsim_result.state_vector(), cirq_result.state_vector())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joveh/tensorboard",
"score": 2
} |
#### File: backend/event_processing/event_file_loader.py
```python
import contextlib
from tensorboard import data_compat
from tensorboard import dataclass_compat
from tensorboard.compat import tf
from tensorboard.compat.proto import event_pb2
from tensorboard.util import platform_util
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
@contextlib.contextmanager
def _nullcontext():
"""Pre-Python-3.7-compatible standin for contextlib.nullcontext."""
yield
# Might as well make this a singleton.
_NULLCONTEXT = _nullcontext()
def _silence_deprecation_warnings():
"""Context manager that best-effort silences TF deprecation warnings."""
try:
# Learn this one weird trick to make TF deprecation warnings go away.
from tensorflow.python.util import deprecation
return deprecation.silence()
except (ImportError, AttributeError):
return _NULLCONTEXT
def _make_tf_record_iterator(file_path):
"""Returns an iterator over TF records for the given tfrecord file."""
# If we don't have TF at all, use the stub implementation.
if tf.__version__ == "stub":
# TODO(#1711): Reshape stub implementation to fit tf_record_iterator API
# rather than needlessly emulating the old PyRecordReader_New API.
logger.debug("Opening a stub record reader pointing at %s", file_path)
return _PyRecordReaderIterator(
tf.pywrap_tensorflow.PyRecordReader_New, file_path
)
# If PyRecordReader exists, use it, otherwise use tf_record_iterator().
# Check old first, then new, since tf_record_iterator existed previously but
# only gained the semantics we need at the time PyRecordReader was removed.
#
# TODO(#1711): Eventually remove PyRecordReader fallback once we can drop
# support for TF 2.1 and prior, and find a non-deprecated replacement for
# tf.compat.v1.io.tf_record_iterator.
try:
from tensorflow.python import pywrap_tensorflow
py_record_reader_new = pywrap_tensorflow.PyRecordReader_New
except (ImportError, AttributeError):
py_record_reader_new = None
if py_record_reader_new:
logger.debug("Opening a PyRecordReader pointing at %s", file_path)
return _PyRecordReaderIterator(py_record_reader_new, file_path)
else:
logger.debug("Opening a tf_record_iterator pointing at %s", file_path)
# TODO(#1711): Find non-deprecated replacement for tf_record_iterator.
with _silence_deprecation_warnings():
return tf.compat.v1.io.tf_record_iterator(file_path)
class _PyRecordReaderIterator(object):
"""Python iterator for TF Records based on PyRecordReader."""
def __init__(self, py_record_reader_new, file_path):
"""Constructs a _PyRecordReaderIterator for the given file path.
Args:
py_record_reader_new: pywrap_tensorflow.PyRecordReader_New
file_path: file path of the tfrecord file to read
"""
with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status:
self._reader = py_record_reader_new(
tf.compat.as_bytes(file_path), 0, tf.compat.as_bytes(""), status
)
if not self._reader:
raise IOError(
"Failed to open a record reader pointing to %s" % file_path
)
def __iter__(self):
return self
def __next__(self):
try:
self._reader.GetNext()
except tf.errors.OutOfRangeError as e:
raise StopIteration
return self._reader.record()
next = __next__ # for python2 compatibility
class RawEventFileLoader(object):
"""An iterator that yields Event protos as serialized bytestrings."""
def __init__(self, file_path, detect_file_replacement=False):
"""Constructs a RawEventFileLoader for the given file path.
Args:
file_path: the event file path to read from
detect_file_replacement: if True, when Load() is called, the loader
will make a stat() call to check the size of the file. If it sees
that the file has grown, it will reopen the file entirely (while
preserving the current offset) before attempting to read from it.
Otherwise, Load() will simply poll at EOF for new data.
"""
if file_path is None:
raise ValueError("A file path is required")
self._file_path = platform_util.readahead_file_path(file_path)
self._detect_file_replacement = detect_file_replacement
self._file_size = None
self._iterator = _make_tf_record_iterator(self._file_path)
if self._detect_file_replacement and not hasattr(
self._iterator, "reopen"
):
logger.warning(
"File replacement detection requested, but not enabled because "
"TF record iterator impl does not support reopening. This "
"functionality requires TensorFlow 2.9+"
)
self._detect_file_replacement = False
def Load(self):
"""Loads all new events from disk as raw serialized proto bytestrings.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All event proto bytestrings in the file that have not been yielded yet.
"""
logger.debug("Loading events from %s", self._file_path)
if self._detect_file_replacement:
has_increased = self.CheckForIncreasedFileSize()
# Only act on the file size information if we got a concrete result.
if has_increased is not None:
if has_increased:
logger.debug(
"Reopening %s since file size has changed",
self._file_path,
)
self._iterator.close()
self._iterator.reopen()
else:
logger.debug(
"Skipping attempt to poll %s since file size has not "
"changed (still %d)",
self._file_path,
self._file_size,
)
return
while True:
try:
yield next(self._iterator)
except StopIteration:
logger.debug("End of file in %s", self._file_path)
break
except tf.errors.DataLossError as e:
# We swallow partial read exceptions; if the record was truncated
# and a later update completes it, retrying can then resume from
# the same point in the file since the iterator holds the offset.
logger.debug("Truncated record in %s (%s)", self._file_path, e)
break
logger.debug("No more events in %s", self._file_path)
def CheckForIncreasedFileSize(self):
"""Stats the file to get its updated size, returning True if it grew.
If the stat call fails or reports a smaller size than was previously
seen, then any previously cached size is left unchanged.
Returns:
boolean or None: True if the file size increased; False if it was
the same or decreased; or None if neither case could be detected
(either because the previous size had not been recorded yet, or
because the stat call for the current size failed).
"""
previous_size = self._file_size
try:
self._file_size = tf.io.gfile.stat(self._file_path).length
except tf.errors.OpError as e:
logger.error("Failed to stat %s: %s", self._file_path, e)
return None
logger.debug(
"Stat on %s got size %d, previous size %s",
self._file_path,
self._file_size,
previous_size,
)
if previous_size is None:
return None
if self._file_size > previous_size:
return True
if self._file_size < previous_size:
logger.warning(
"File %s shrank from previous size %d to size %d",
self._file_path,
previous_size,
self._file_size,
)
# In case this was transient, preserve the previously cached size,
# to avoid reporting a spurious increase next time. If the file was
# actually truncated, we can't recover anyway, so just ignore it.
self._file_size = previous_size
return False
class LegacyEventFileLoader(RawEventFileLoader):
"""An iterator that yields parsed Event protos."""
def Load(self):
"""Loads all new events from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All events in the file that have not been yielded yet.
"""
for record in super(LegacyEventFileLoader, self).Load():
yield event_pb2.Event.FromString(record)
class EventFileLoader(LegacyEventFileLoader):
"""An iterator that passes events through read-time compat layers.
Specifically, this includes `data_compat` and `dataclass_compat`.
"""
def __init__(self, *args, **kwargs):
super(EventFileLoader, self).__init__(*args, **kwargs)
# Track initial metadata for each tag, for `dataclass_compat`.
# This is meant to be tracked per run, not per event file, so
# there is a potential failure case when the second event file
# in a single run has no summary metadata. This only occurs when
# all of the following hold: (a) the events were written with
# the TensorFlow 1.x (not 2.x) writer, (b) the summaries were
# created by `tensorboard.summary.v1` ops and so do not undergo
# `data_compat` transformation, and (c) the file writer was
# reopened by calling `.reopen()` on it, which creates a new
# file but does not clear the tag cache. This is considered
# sufficiently improbable that we don't take extra mitigations.
self._initial_metadata = {} # from tag name to `SummaryMetadata`
def Load(self):
for event in super(EventFileLoader, self).Load():
event = data_compat.migrate_event(event)
events = dataclass_compat.migrate_event(
event, self._initial_metadata
)
for event in events:
yield event
class TimestampedEventFileLoader(EventFileLoader):
"""An iterator that yields (UNIX timestamp float, Event proto) pairs."""
def Load(self):
"""Loads all new events and their wall time values from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
Pairs of (UNIX timestamp float, Event proto) for all events in the file
that have not been yielded yet.
"""
for event in super(TimestampedEventFileLoader, self).Load():
yield (event.wall_time, event)
```
#### File: data/server/update_protos.py
```python
import os
import sys
# Paths to "tensorboard/data/server/" in (a) the Bazel runfiles tree,
# whence we can read data dependencies, and (b) the Git repository,
# whither we can write output files.
_BAZEL_DIR = os.path.join("tensorboard", "data", "server")
_REPO_DIR = os.path.dirname(os.readlink(__file__))
# Basename for the gRPC file descriptor set. The same basename is used
# for the generated file and the source file.
_FILE_DESCRIPTOR_SET = "descriptor.bin"
_RUST_LICENSE = """\
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
"""
def pkg_basename(pkg, rust_extension):
if pkg == _FILE_DESCRIPTOR_SET:
return _FILE_DESCRIPTOR_SET
else:
return "%s%s" % (pkg, rust_extension)
def expected_contents(pkg):
src = os.path.join(_BAZEL_DIR, "genproto", pkg_basename(pkg, ".rs"))
with open(src, "rb") as infile:
contents = infile.read()
if pkg != _FILE_DESCRIPTOR_SET:
contents = _RUST_LICENSE.encode("utf-8") + contents
return contents
def repo_file_path(pkg):
return os.path.join(_REPO_DIR, pkg_basename(pkg, ".pb.rs"))
def runfiles_file_path(pkg):
return os.path.join(_BAZEL_DIR, pkg_basename(pkg, ".pb.rs"))
def update(proto_packages):
for pkg in proto_packages:
with open(repo_file_path(pkg), "wb") as outfile:
outfile.write(expected_contents(pkg))
def check(proto_packages):
failed = False
for pkg in proto_packages:
dst = runfiles_file_path(pkg)
try:
expected = expected_contents(pkg)
with open(dst, "rb") as infile:
actual = infile.read()
except OSError as e:
failed = True
print("Could not read package %s: %s" % (pkg, e))
continue
if expected == actual:
print("%s OK" % dst)
else:
print("%s out of date" % dst)
failed = True
if failed:
print("To update, run //tensorboard/data/server:update_protos")
raise SystemExit(1)
def main():
(mode, *proto_packages) = sys.argv[1:]
if mode == "--update":
return update(proto_packages)
if mode == "--check":
return check(proto_packages)
raise ValueError("unknown mode: %r" % mode)
if __name__ == "__main__":
main()
``` |
{
"source": "JoveIC/Grad-CAM",
"score": 3
} |
#### File: JoveIC/Grad-CAM/gradcam.py
```python
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['GradCam']
class GradCam():
"""
Produce 'visual explanations' for decisions made by CNN-based models
"""
def __init__(self, device='cpu', model=None, hasrecurrent=False):
self.device = device
self.model = model
self.gradients = None
self.image = None
self.hasrecurrent = hasrecurrent
def __call__(self, *args, **kwargs):
# get target feature map and model output (the score for class c before softmax y^c)
# target feature map
target, output = self.get_target_fmap(*args, **kwargs)
# y^c
if 'index' not in kwargs.keys():
index = torch.argmax(output)
else:
index = kwargs['index']
if index == None:
index = torch.argmax(output)
one_hot = self.get_y_index(index=index, output=output)
# compute the gradient w.r.t. feature map activations A^k of a convolutional layer
one_hot.backward()
# obtain the neuron importance weights:
# global-average-pool gradients over the width and height dimensions
weights = torch.mean(self.gradients.squeeze_(0), (1, 2)) # channel, w, h
# heatmap: weighted combination of forward activation maps, follow by a ReLU
heatmap = torch.zeros(target.size()[1:], dtype=torch.float32).to(self.device)
for i, w in enumerate(weights):
heatmap += w * target[i, :, :]
heatmap = F.relu(heatmap) # ReLU
return heatmap
def get_target_fmap(self, *args, **kwargs):
x = self.setup(*args, **kwargs)
self._model_setup_()
target, output = self.forward_pass(x, kwargs['target_layer'])
target = target.squeeze(0)
return target, output
def get_y_index(self, index=None, output=None):
one_hot = torch.zeros(output.size(), dtype=torch.float32)
one_hot[index] = 1
one_hot = torch.sum(one_hot.to(self.device) * output)
return one_hot
def _model_setup_(self):
self.model.to(self.device)
if self.hasrecurrent:
#RuntimeError: cudnn RNN backward can only be called in training mode
#https://github.com/pytorch/pytorch/issues/10006
self.model.train()
for name, module in self.model.named_modules():
if isinstance(module, nn.Dropout):
module.p = 0
elif isinstance(module, (nn.RNN, nn.LSTM, nn.GRU)):
module.dropout = 0
else:
self.model.eval()
for param in self.model.parameters():
param.requires_grad = False
self.model.zero_grad()
# get target feature maps and model output
def forward_pass(self, x):
r"""
Note: for getting the partial derivative of class score over target feature maps,
technically it is the same for setting requires_grad to True for either model parameters or
model input, since either way the target feature maps would be a intermediate node on the
computation graph, which is enough for attach a hook to it.
(of course only requires_grad for feature maps works as well... )
I disabled requires_grad for all model parameters,
only set the requires_grad for input to be true, just to be explicit
- General flow:
# get the feature maps we want
>> x = get_feature(x)
>> target_activations = x
# register hook on the fearure maps we want the gradient for
>> x.register_hook(self.save_gradient)
# get predicitons
>> x = the_rest_of_the_model_before_softmax(x)
"""
target_activations = None
raise NotImplementedError('Overwrite this one')
return target_activations, x
def save_gradient(self, grad):
self.gradients = grad
def setup(self, *args, **kwargs) -> torch.Tensor:
"""
Prepare original image and tfed model input
Args:
- set self.image
- return model input x
* Don't forget to update hidden (cell) states for recurrent models
"""
x.requires_grad = True
raise NotImplementedError('Overwrite this one')
return x
```
#### File: JoveIC/Grad-CAM/misc.py
```python
import cv2
import torch
import numpy as np
def fuse_heatmap_image(img, heatmap, resize=None, keep_heatmap=False):
img = img.cpu().numpy() if isinstance(img, torch.Tensor) else np.array(img)
heatmap = heatmap.detach().cpu().numpy() if isinstance(heatmap, torch.Tensor) else heatmap
if not resize:
size = img.shape
else:
size = resize
heatmap = heatmap - np.min(heatmap)
heatmap = heatmap / np.max(heatmap)
heatmap = np.float32(cv2.resize(heatmap, size))
heatmap = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
fused = np.float32(cv2.resize(img/255, size)) + np.float32(heatmap/255, size)
fused = np.uint8((fused / np.max(fused)) * 255)
if keep_heatmap:
return fused, heatmap
else:
return heatmap
``` |
{
"source": "jovemmanuelre/Simple-and-Multivariate-Classification",
"score": 3
} |
#### File: jovemmanuelre/Simple-and-Multivariate-Classification/Testing the Model - Exercise.py
```python
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
raw_data = pd.read_csv('Bank-data.csv')
data = raw_data.copy()
data = data.drop(['Unnamed: 0'], axis=1)
data['y'] = data['y'].map({'yes': 1, 'no': 0})
data.describe()
# ## Model: Simple Classification
y = data['y']
x1 = data['duration']
x = sm.add_constant(x1)
reg_log = sm.Logit(y, x)
results_log = reg_log.fit()
results_log.summary()
plt.scatter(x1, y, color='C0')
plt.xlabel('Duration', fontsize=20)
plt.ylabel('Subscription', fontsize=20)
plt.show()
# ## Expanded model: Multivariate Classification
estimators = ['interest_rate', 'credit', 'march', 'previous', 'duration']
X1_all = data[estimators]
y = data['y']
X_all = sm.add_constant(X1_all)
reg_logit = sm.Logit(y, X_all)
results_logit = reg_logit.fit()
results_logit.summary2()
# ### Confusion Matrix
def confusion_matrix(data_array, actual_values, model):
pred_values = model.predict(data_array)
bins = np.array([0, 0.5, 1])
cm = np.histogram2d(actual_values, pred_values, bins=bins)[0]
accuracy = (cm[0, 0] + cm[1, 1]) / cm.sum()
return cm, accuracy
confusion_matrix(X_all, y, results_logit)
# ## Testing the accuracy of the model
raw_data2 = pd.read_csv('Bank-data-testing.csv')
data_test = raw_data2.copy()
data_test = data_test.drop(['Unnamed: 0'], axis=1)
data_test['y'] = data_test['y'].map({'yes': 1, 'no': 0})
y_test = data_test['y']
X1_test = data_test[estimators]
X_test = sm.add_constant(X1_test)
confusion_matrix(X_test, y_test, results_logit)
confusion_matrix(X_all, y, results_logit)
``` |
{
"source": "jovencoda/troca",
"score": 2
} |
#### File: core/templatetags/core_extras.py
```python
from django.contrib.auth.models import User
from django import template
from core.models import UserProfile, Project, Skills, Skills_categories
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter(name='get_project_author')
@stringfilter
def get_project_author(value):
context = User.get_context_data(username=value)
obj = UserProfile.objects.get(user=user.id)
return {'author': obj}
``` |
{
"source": "jovenwayfarer/Pneumothorax",
"score": 2
} |
#### File: jovenwayfarer/Pneumothorax/dataset.py
```python
from torch.utils.data import DataLoader, Dataset
import pandas as pd
import albumentations as albu
from albumentations.pytorch import ToTensorV2
from sklearn.model_selection import StratifiedKFold
import cv2
import os
import numpy as np
pd.options.mode.chained_assignment = None
class SIIMDataset(Dataset):
def __init__(self, df, image_folder, mask_folder, size, mean, std, phase):
self.df = df
self.root_images = image_folder
self.root_masks = mask_folder
self.size = size
self.mean = mean
self.std = std
self.phase = phase
self.transforms = get_transforms(phase, size, mean, std)
self.gb = self.df.groupby('ImageId')
self.fnames = list(self.gb.groups.keys())
def __getitem__(self, idx):
image_id = self.fnames[idx]
image_path = os.path.join(self.root_images, image_id + ".png")
mask_path = os.path.join(self.root_masks, image_id + ".png")
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(mask_path)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
mask = (mask > 0).astype(np.float32)
augmented = self.transforms(image=image, mask=mask)
image = augmented['image']
mask = augmented['mask']
return image, mask
def __len__(self):
return len(self.fnames)
def get_transforms(phase, size, mean, std):
list_transforms = []
if phase == "train":
list_transforms.extend(
[
albu.HorizontalFlip(),
albu.OneOf([
albu.RandomContrast(),
albu.RandomGamma(),
albu.RandomBrightness(),
], p=0.3),
albu.OneOf([
albu.ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
albu.GridDistortion(),
albu.OpticalDistortion(distort_limit=2, shift_limit=0.5),
], p=0.3),
albu.ShiftScaleRotate(),
]
)
list_transforms.extend(
[
albu.Normalize(mean=mean, std=std, p=1),
albu.Resize(size, size),
ToTensorV2(),
]
)
list_trfms = albu.Compose(list_transforms)
return list_trfms
def provider(
fold,
total_folds,
image_folder,
mask_folder,
df_path,
phase,
size,
mean=None,
std=None,
batch_size=8,
num_workers=4,
shuffle = False,
):
df = pd.read_csv(df_path)
df_with_mask = df[df["EncodedPixels"] != "-1"]
df_with_mask['has_mask'] = 1
df_without_mask = df[df["EncodedPixels"] == "-1"]
df_without_mask['has_mask'] = 0
df_without_mask_sampled = df_without_mask.sample(len(df_with_mask.drop_duplicates('ImageId')))
df = pd.concat([df_with_mask, df_without_mask_sampled])
kfold = StratifiedKFold(total_folds, shuffle=True, random_state=69)
train_idx, val_idx = list(kfold.split(
df["ImageId"], df["has_mask"]))[fold]
train_df, val_df = df.iloc[train_idx], df.iloc[val_idx]
df = train_df if phase == "train" else val_df
# NOTE: total_folds=5 -> train/val : 80%/20%
image_dataset = SIIMDataset(df, image_folder, mask_folder, size, mean, std, phase)
dataloader = DataLoader(
image_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
shuffle=shuffle,
)
return dataloader
``` |
{
"source": "joverandout/frequency_in_python",
"score": 3
} |
#### File: joverandout/frequency_in_python/monoalph.py
```python
import frequency
freq = frequency.Freq("emails/email1.enc")
mapper0 = {
"A" : "G",
"B" : "Y",
"C" : "O",
"D" : "R",
"E" : "D",
"F" : "Q",
"G" : "C",
"H" : "V",
"I" : "X",
"J" : "K",
"K" : "T",
"L" : "L",
"M" : "E",
"N" : "N",
"O" : "P",
"P" : "U",
"Q" : "B",
"R" : "J",
"S" : "S",
"T" : "A",
"U" : "H",
"V" : "W",
"W" : "M",
"X" : "Z",
"Y" : "F",
"Z" : "I"
}
def replace(text):
test_string = ""
tempy=0
for char in text:
if char in mapper0:
newChar = mapper0[char]
inty = ord(newChar)-tempy
if inty < 65:
inty = 91-(65-inty)
finalChar = chr(inty)
test_string += finalChar
elif char == ' ':
tempy+=1
test_string += char
else:
test_string += char
return test_string
def main():
with open("emails/email1.enc","rt") as f:
cipher_text = f.read()
print(cipher_text)
new_string = replace(cipher_text)
print(freq.ordered())
with open("emails/output.txt","wt") as f:
f.write(new_string)
print(new_string)
main()
``` |
{
"source": "joverbey/WebCode",
"score": 3
} |
#### File: modules/template_manager/models.py
```python
from app.database import Base, session
class Template(Base):
"""Model object for entries in the templates database table."""
__tablename__ = 'templates'
def commit_to_session(self):
"""Commit this problem to the database as a new template."""
session.add(self)
session.flush()
session.commit()
session.refresh(self)
def to_dict(self):
return {
'template_id': self.template_id,
'title': self.title,
'body': self.body,
'cursor_x': self.cursor_x,
'cursor_y': self.cursor_y,
'type': self.type
}
``` |
{
"source": "joverwey/Diverse-Motion-Stylization",
"score": 2
} |
#### File: Diverse-Motion-Stylization/model/motion_graph_gan_model.py
```python
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from model import st_networks
class MotionGraphGANModel(nn.Module):
def __init__(self, opt):
super(MotionGraphGANModel, self).__init__()
self.opt = opt
self.mode = opt.mode
self.device = torch.device("cuda:{}".format(opt.gpu_ids[0]) if torch.cuda.is_available() else "cpu")
self.save_dir = opt.save_dir
self.model_names = ['G', 'E', 'F', 'D']
# define networks
self.netG = st_networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.style_dim, opt.ng_blk, opt.ng_btn)
self.netE = st_networks.define_E(opt.input_nc, opt.nef, opt.style_dim, opt.num_domains, opt.clip_size, opt.ne_blk, opt.ne_btn)
self.netF = st_networks.define_F(opt.latent_dim, opt.hidden_dim, opt.style_dim, opt.num_domains)
self.netD = st_networks.define_D(opt.input_nc, opt.ndf, opt.num_domains, opt.clip_size, opt.nd_blk, opt.nd_btn)
# set optimizers
if self.mode == 'train':
for name in self.model_names:
setattr(self, 'optimizer_' + name, self.set_optimizer(name))
# set lr schedulers
if self.mode == 'train':
for name in self.model_names:
setattr(self, 'scheduler_' + name, self.set_scheduler(name))
self.to(self.device)
def set_optimizer(self, name):
net = getattr(self, 'net' + name)
if name == 'F':
lr = self.opt.f_lr
elif name == 'G':
lr = self.opt.g_lr
elif name == 'D':
lr = self.opt.d_lr
elif name == 'E':
lr = self.opt.e_lr
else:
NotImplementedError()
optimizer = torch.optim.Adam(
params=net.parameters(),
lr=lr,
betas=(self.opt.beta1, self.opt.beta2),
weight_decay=self.opt.weight_decay)
return optimizer
def set_scheduler(self, name):
optimizer = getattr(self, 'optimizer_' + name)
scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer=optimizer,
gamma=0.95
)
return scheduler
def reset_grad(self):
for name in self.model_names:
optim = getattr(self, 'optimizer_' + name)
optim.zero_grad()
def get_current_iter(self):
return self.current_iter
def get_current_lrs(self):
learning_rates = {}
for name in self.model_names:
optimizer = getattr(self, 'optimizer_' + name)
for param_group in optimizer.param_groups:
learning_rates[name] = param_group['lr']
return learning_rates
def print_networks(self):
for name in self.model_names:
save_path = os.path.join(self.save_dir, 'net%s.txt' % name)
with open(save_path, 'w') as nets_f:
if isinstance(name, str):
net = getattr(self, 'net' + name)
st_networks.print_network(net, nets_f)
def save_networks(self, iter=None, latest=False):
if latest:
save_filename = 'latest_checkpoint.pth'
else:
save_filename = '%d_checkpoint.pth' % iter
save_path = os.path.join(self.save_dir, save_filename)
print('Saving the model into %s...' % save_path)
checkpoint = {'iter': iter}
for name in self.model_names:
if isinstance(name, str):
net_name = 'net' + name
optim_name = 'optimizer_' + name
net = getattr(self, net_name)
optim = getattr(self, optim_name)
checkpoint[net_name + '_state_dict'] = net.state_dict()
checkpoint[optim_name + '_state_dict'] = optim.state_dict()
torch.save(checkpoint, save_path)
def load_networks(self, iter=None):
if iter is not None:
load_filename = '%d_checkpoint.pth' % iter
else:
load_filename = 'latest_checkpoint.pth'
load_path = os.path.join(self.save_dir, load_filename)
print('Loading the model from %s...' % load_path)
checkpoint = torch.load(load_path, map_location='cuda:0')
for name in self.model_names:
if isinstance(name, str):
net_name = 'net' + name
net = getattr(self, net_name)
net.load_state_dict(checkpoint[net_name + '_state_dict'])
if self.mode == 'train':
optim_name = 'optimizer_' + name
optim = getattr(self, optim_name)
if name == 'F':
lr = self.opt.f_lr
elif name == 'G':
lr = self.opt.g_lr
elif name == 'D':
lr = self.opt.d_lr
elif name == 'E':
lr = self.opt.e_lr
optim.load_state_dict(checkpoint[optim_name + '_state_dict'])
for param_group in optim.param_groups:
param_group['lr'] = lr
self.current_iter = checkpoint['iter']
```
#### File: Diverse-Motion-Stylization/options/base_options.py
```python
import argparse
from datetime import datetime
from utils.logger import make_dir
f = open('contents.txt', 'r')
contents = [line.strip() for line in f.readlines()]
f = open('styles.txt', 'r')
styles = [line.strip() for line in f.readlines()]
class BaseOptions:
def __init__(self):
self.initialized = False
def initialize(self, parser):
# basic parameters
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
parser.add_argument('--name', type=str, default='experiment_name')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints')
parser.add_argument('--num_domains', type=int, default=len(styles))
parser.add_argument('--domains', type=str, nargs='+', default=styles)
# content domains for recongnition
parser.add_argument('--num_contents', type=int, default=len(contents))
parser.add_argument('--contents', type=str, nargs='+', default=contents)
# model parameters
parser.add_argument('--model', type=str, default='motion_graph_gan')
parser.add_argument('--input_nc', type=int, default=7)
parser.add_argument('--output_nc', type=int, default=7)
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--nef', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--latent_dim', type=int, default=16)
parser.add_argument('--hidden_dim', type=int, default=512)
parser.add_argument('--style_dim', type=int, default=64)
parser.add_argument('--ng_blk', type=int, default=2)
parser.add_argument('--ng_btn', type=int, default=0)
parser.add_argument('--nd_blk', type=int, default=2)
parser.add_argument('--nd_btn', type=int, default=0)
parser.add_argument('--ne_blk', type=int, default=2)
parser.add_argument('--ne_btn', type=int, default=0)
# misc
parser.add_argument('--gpu_ids', type=str, default='0')
parser.add_argument('--opt_print', type=bool, default=True)
self.initialized = True
return parser
def gather_options(self):
parser = None
if not self.initialized:
parser = argparse.ArgumentParser()
parser = self.initialize(parser)
self.parser = parser
return parser.parse_args()
def check(self, opt):
pass
def parse(self):
opt = self.gather_options()
opt.save_dir = make_dir(opt.checkpoints_dir, opt.name)
self.check(opt)
return opt
def print_options(self, opt):
now = datetime.now()
message = ''
message += '----------------- %s options -----------------\n' % (opt.mode).capitalize()
message += '{}_start: {}\n'.format(opt.mode, now.strftime('%Y/%m/%d %H:%M:%S'))
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '[default: %s]' % str(default)
message += '{}: {} {}\n'.format(str(k), str(v), comment)
message += '----------------- End -----------------'
if opt.opt_print:
print(message)
return message
```
#### File: Diverse-Motion-Stylization/preprocess/export_dataset.py
```python
import os
import sys
import numpy as np
import scipy.ndimage.filters as filters
sys.path.append('../')
from animation import BVH, Animation
from animation.Quaternions import Quaternions
from animation.Pivots import Pivots
njoints = 21
selected_joints = [0,
2, 3, 4, 5,
7, 8, 9, 10,
12, 13, 15, 16,
18, 19, 20, 22,
25, 26, 27, 29]
parents = [-1,
0, 1, 2, 3,
0, 5, 6, 7,
0, 9, 10, 11,
10, 13, 14, 15,
10, 17, 18, 19]
f = open('contents.txt', 'r')
contents = [line.strip() for line in f.readlines()]
f = open('styles.txt', 'r')
styles = [line.strip() for line in f.readlines()]
def get_bvh_files(directory):
return [os.path.join(directory, f) for f in sorted(list(os.listdir(directory)))
if os.path.isfile(os.path.join(directory, f))
and f.endswith('.bvh') and f != 'rest.bvh']
def feet_contact_from_positions(positions, fid_l=(3, 4), fid_r=(7, 8)):
fid_l, fid_r = np.array(fid_l), np.array(fid_r)
velfactor = np.array([0.05, 0.05])
feet_contact = []
for fid_index in [fid_l, fid_r]:
foot_vel = (positions[1:, fid_index] - positions[:-1, fid_index]) ** 2
foot_vel = np.sum(foot_vel, axis=-1)
foot_contact = (foot_vel < velfactor).astype(np.float)
feet_contact.append(foot_contact)
feet_contact = np.concatenate(feet_contact, axis=-1)
feet_contact = np.concatenate((feet_contact[0:1].copy(), feet_contact), axis=0)
return feet_contact
def preprocess(filename, downsample=2, slice=True, window=64, window_step=32):
anim, names, frametime = BVH.load(filename)
anim = anim[::downsample]
global_xforms = Animation.transforms_global(anim)
global_positions = global_xforms[:,:,:3,3] / global_xforms[:,:,3:,3]
global_rotations = Quaternions.from_transforms(global_xforms)
global_positions = global_positions[:, selected_joints]
global_rotations = global_rotations[:, selected_joints]
clip, feet = get_motion_data(global_positions, global_rotations)
if not slice:
return clip, feet
else:
cls = np.array([-1, -1])
clip_windows = []
feet_windows = []
class_windows = []
cls_name = os.path.split(filename)[1]
cls = np.array([contents.index(cls_name.split('_')[0].split()[-1]),
styles.index(cls_name.split('_')[1])])
if not (cls[0] < 0) & (cls[1] < 0):
for j in range(0, len(clip) - window // 8, window_step):
assert (len(global_positions) >= window // 8)
clip_slice = clip[j:j + window]
clip_feet = feet[j:j + window]
if len(clip_slice) < window:
# left slices
clip_left = clip_slice[:1].repeat((window - len(clip_slice)) // 2 + (window - len(clip_slice)) % 2, axis=0)
clip_left[:, :, -4:] = 0.0
clip_feet_l = clip_feet[:1].repeat((window - len(clip_slice)) // 2 + (window - len(clip_slice)) % 2, axis=0)
# right slices
clip_right = clip_slice[-1:].repeat((window - len(clip_slice)) // 2, axis=0)
clip_right[:, :, -4:] = 0.0
clip_feet_r = clip_feet[-1:].repeat((window - len(clip_slice)) // 2, axis=0)
# padding
clip_slice = np.concatenate([clip_left, clip_slice, clip_right], axis=0)
clip_feet = np.concatenate([clip_feet_l, clip_feet, clip_feet_r], axis=0)
if len(clip_slice) != window: raise Exception()
if len(clip_feet) != window: raise Exception()
clip_windows.append(clip_slice)
feet_windows.append(clip_feet)
class_windows.append(cls)
return clip_windows, feet_windows, class_windows
def get_motion_data(global_positions, global_rotations):
# extract forward direction
sdr_l, sdr_r, hip_l, hip_r = 13, 17, 1, 5
across = ((global_positions[:, sdr_l] - global_positions[:, sdr_r]) + (global_positions[:, hip_l] - global_positions[:, hip_r]))
across = across / np.sqrt((across ** 2).sum(axis=-1))[..., np.newaxis] # (F, 3)
# smooth forward direction
direction_filterwidth = 20
forward = filters.gaussian_filter1d(np.cross(across, np.array([[0, 1, 0]])), direction_filterwidth, axis=0, mode='nearest')
forward = forward / np.sqrt((forward ** 2).sum(axis=-1))[..., np.newaxis]
# remove translation & rotation
root_rotation = Quaternions.between(forward, np.array([[0, 0, 1]]).repeat(len(forward), axis=0))[:, np.newaxis]
positions = global_positions.copy()
rotations = global_rotations.copy()
positions[:, :, 0] = positions[:, :, 0] - positions[:, 0:1, 0]
positions[:, :, 1] = positions[:, :, 1] - positions[:, 0:1, 1] + positions[0:1, 0:1, 1]
positions[:, :, 2] = positions[:, :, 2] - positions[:, 0:1, 2]
positions = root_rotation * positions
rotations = root_rotation * rotations
# trajectory info
root_velocity = root_rotation[:-1] * (global_positions[1:, 0:1] - global_positions[:-1, 0:1])
root_rvelocity = Pivots.from_quaternions(root_rotation[1:] * -root_rotation[:-1]).ps
root_velocity = root_velocity.repeat(njoints, axis=1)
root_rvelocity = root_rvelocity.repeat(njoints, axis=1)[..., np.newaxis]
# motion clip info
positions = positions[:-1]
rotations = rotations[:-1]
root_trajectory = np.concatenate([root_velocity, root_rvelocity], axis=-1)
motion_clip = np.concatenate([positions, rotations, root_trajectory], axis=-1)
# feet contact info """
motion_feet = feet_contact_from_positions(positions)
return motion_clip, motion_feet
def generate_data(filename, downsample=1):
dataframe, feet_cnt = preprocess(filename, slice=False, downsample=downsample)
dataframe = np.transpose(dataframe, (2, 0, 1)) # (C, F, J)
return dataframe, feet_cnt
def generate_dataset(data_dir, out_path, downsample=2, window=64, window_step=16):
style_files = get_bvh_files(data_dir)
style_clips = []
style_feet = []
style_classes = []
for i, item in enumerate(style_files):
print('Processing %i of %i (%s)' % (i, len(style_files), item))
clip, feet, cls = preprocess(item, downsample=downsample, window=window, window_step=window_step)
style_clips += clip
style_feet += feet
style_classes += cls
style_clips = np.array(style_clips)
style_feet = np.array(style_feet)
style_clips = np.transpose(style_clips, (0, 3, 1, 2))
np.savez_compressed(out_path, clips=style_clips, feet=style_feet, classes=style_classes)
def generate_mean_std(dataset_path, out_path):
X = np.load(dataset_path)['clips']
print('Total shape: ', X.shape) # (N, C, F, J)
X = X[:, :-4, :, :] # (N, 7, F, J)
Xmean = X.mean(axis=(0, 2), keepdims=True)[0]
Xmean = np.concatenate([Xmean, np.zeros((4,) + Xmean.shape[1:])])
Xstd = X.std(axis=(0, 2), keepdims=True)[0]
idx = Xstd < 1e-5
Xstd[idx] = 1
Xstd = np.concatenate([Xstd, np.ones((4,) + Xstd.shape[1:])])
print('Mean shape', Xmean.shape)
print('Std shape: ', Xstd.shape)
np.savez_compressed(out_path, Xmean=Xmean, Xstd=Xstd)
"""
if __name__ == '__main__':
generate_dataset('../bvh/generate', '../datasets/styletransfer_generate', downsample=2, window=64, window_step=32)
generate_mean_std('../datasets/styletransfer_generate.npz', '../datasets/preprocess_styletransfer_generate')
generate_dataset('../bvh/classify', '../datasets/styletransfer_classify', downsample=2, window=64, window_step=32)
generate_mean_std('../datasets/styletransfer_classify.npz', '../datasets/preprocess_styletransfer_classify')
print('done!')
"""
``` |
{
"source": "joverwey/tokenizers",
"score": 3
} |
#### File: tests/bindings/test_decoders.py
```python
import pytest
import pickle
from tokenizers.decoders import Decoder, ByteLevel, WordPiece, Metaspace, BPEDecoder
class TestByteLevel:
def test_instantiate(self):
assert ByteLevel() is not None
assert isinstance(ByteLevel(), Decoder)
assert isinstance(ByteLevel(), ByteLevel)
assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel)
def test_decoding(self):
decoder = ByteLevel()
assert decoder.decode(["My", "Ġname", "Ġis", "ĠJohn"]) == "My name is John"
class TestWordPiece:
def test_instantiate(self):
assert WordPiece() is not None
assert WordPiece(prefix="__") is not None
assert WordPiece(cleanup=True) is not None
assert isinstance(WordPiece(), Decoder)
assert isinstance(WordPiece(), WordPiece)
assert isinstance(pickle.loads(pickle.dumps(WordPiece())), WordPiece)
def test_decoding(self):
decoder = WordPiece()
assert decoder.decode(["My", "na", "##me", "is", "Jo", "##hn"]) == "My name is John"
assert decoder.decode(["I", "'m", "Jo", "##hn"]) == "I'm John"
decoder = WordPiece(prefix="__", cleanup=False)
assert decoder.decode(["My", "na", "__me", "is", "Jo", "__hn"]) == "My name is John"
assert decoder.decode(["I", "'m", "Jo", "__hn"]) == "I 'm John"
class TestMetaspace:
def test_instantiate(self):
assert Metaspace() is not None
assert Metaspace(replacement="-") is not None
with pytest.raises(Exception, match="replacement must be a character"):
Metaspace(replacement="")
assert Metaspace(add_prefix_space=True) is not None
assert isinstance(Metaspace(), Decoder)
assert isinstance(Metaspace(), Metaspace)
assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace)
def test_decoding(self):
decoder = Metaspace()
assert decoder.decode(["▁My", "▁name", "▁is", "▁John"]) == "My name is John"
decoder = Metaspace(replacement="-", add_prefix_space=False)
assert decoder.decode(["-My", "-name", "-is", "-John"]) == " My name is John"
class TestBPEDecoder:
def test_instantiate(self):
assert BPEDecoder() is not None
assert BPEDecoder(suffix="_") is not None
assert isinstance(BPEDecoder(), Decoder)
assert isinstance(BPEDecoder(), BPEDecoder)
assert isinstance(pickle.loads(pickle.dumps(BPEDecoder())), BPEDecoder)
def test_decoding(self):
decoder = BPEDecoder()
assert (
decoder.decode(["My</w>", "na", "me</w>", "is</w>", "Jo", "hn</w>"])
== "My name is John"
)
decoder = BPEDecoder(suffix="_")
assert decoder.decode(["My_", "na", "me_", "is_", "Jo", "hn_"]) == "My name is John"
```
#### File: tokenizers/implementations/bert_wordpiece.py
```python
from tokenizers import Tokenizer, AddedToken, decoders, trainers
from tokenizers.models import WordPiece
from tokenizers.normalizers import BertNormalizer
from tokenizers.pre_tokenizers import BertPreTokenizer
from tokenizers.processors import BertProcessing
from .base_tokenizer import BaseTokenizer
from typing import Optional, List, Union
class BertWordPieceTokenizer(BaseTokenizer):
""" Bert WordPiece Tokenizer """
def __init__(
self,
vocab_file: Optional[str] = None,
unk_token: Union[str, AddedToken] = "[UNK]",
sep_token: Union[str, AddedToken] = "[SEP]",
cls_token: Union[str, AddedToken] = "[CLS]",
pad_token: Union[str, AddedToken] = "[PAD]",
mask_token: Union[str, AddedToken] = "[MASK]",
clean_text: bool = True,
handle_chinese_chars: bool = True,
strip_accents: Optional[bool] = None,
lowercase: bool = True,
wordpieces_prefix: str = "##",
):
if vocab_file is not None:
tokenizer = Tokenizer(WordPiece(vocab_file, unk_token=str(unk_token)))
else:
tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token)))
# Let the tokenizer know about special tokens if they are part of the vocab
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
if tokenizer.token_to_id(str(sep_token)) is not None:
tokenizer.add_special_tokens([str(sep_token)])
if tokenizer.token_to_id(str(cls_token)) is not None:
tokenizer.add_special_tokens([str(cls_token)])
if tokenizer.token_to_id(str(pad_token)) is not None:
tokenizer.add_special_tokens([str(pad_token)])
if tokenizer.token_to_id(str(mask_token)) is not None:
tokenizer.add_special_tokens([str(mask_token)])
tokenizer.normalizer = BertNormalizer(
clean_text=clean_text,
handle_chinese_chars=handle_chinese_chars,
strip_accents=strip_accents,
lowercase=lowercase,
)
tokenizer.pre_tokenizer = BertPreTokenizer()
if vocab_file is not None:
sep_token_id = tokenizer.token_to_id(str(sep_token))
if sep_token_id is None:
raise TypeError("sep_token not found in the vocabulary")
cls_token_id = tokenizer.token_to_id(str(cls_token))
if cls_token_id is None:
raise TypeError("cls_token not found in the vocabulary")
tokenizer.post_processor = BertProcessing(
(str(sep_token), sep_token_id), (str(cls_token), cls_token_id)
)
tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix)
parameters = {
"model": "BertWordPiece",
"unk_token": unk_token,
"sep_token": sep_token,
"cls_token": cls_token,
"pad_token": pad_token,
"mask_token": mask_token,
"clean_text": clean_text,
"handle_chinese_chars": handle_chinese_chars,
"strip_accents": strip_accents,
"lowercase": lowercase,
"wordpieces_prefix": wordpieces_prefix,
}
super().__init__(tokenizer, parameters)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
special_tokens: List[Union[str, AddedToken]] = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]",
],
show_progress: bool = True,
wordpieces_prefix: str = "##",
):
""" Train the model using the given files """
trainer = trainers.WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
special_tokens=special_tokens,
show_progress=show_progress,
continuing_subword_prefix=wordpieces_prefix,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(trainer, files)
``` |
{
"source": "jovesus/gencoor",
"score": 3
} |
#### File: gencoor/data/update_gene_info.py
```python
import os
from tqdm import tqdm
from urllib.request import urlopen
"""
This script is for updating the files below:
alias files
genes_<genome>.bed
chrom.sizes.<genome>
"""
def get_num_lines(file_path):
with open(file_path) as f:
for i, l in enumerate(f):
pass
return i + 1
##########################################################################
### HG38
### -- Update hg19/alias_human.txt
### -- Update hg38/alias_human.txt
### -- Update hg38/genes_hg38.bed
##########################################################################
alias = []
genes = []
file_path = "hg38/gencode.v32.annotation.gtf"
print("Parsing "+file_path)
with open(file_path) as gtf_hg38:
for line in tqdm(gtf_hg38, total=get_num_lines(file_path)):
if line.startswith("#"):
pass
else:
line = line.replace('"', "")
line = line.replace(';', "")
l = line.split()
if l[2] == "gene":
id_symbol = l.index("gene_name")
id_ensembl = l.index("gene_id")
if "hgnc_id" in l:
tag_HGNC = l[l.index("hgnc_id")+1]
else:
tag_HGNC = ""
if "havana_gene" in l:
tag_Havana = l[l.index("havana_gene")+1]
else:
tag_Havana = ""
alias.append([l[id_symbol + 1], l[id_ensembl + 1],
tag_HGNC, tag_Havana])
genes.append([l[0], l[3], l[4], l[id_symbol + 1], "0", l[6]])
res_alias = list(set(tuple(g) for g in alias))
res_genes = list(set(tuple(g) for g in genes))
# Save alias to HG38
alias_file = "hg38/alias_human.txt"
with open(alias_file, "w") as f:
for g in res_alias:
print("\t".join(g), file=f)
# Save alias to HG19
alias_file = "hg19/alias_human.txt"
with open(alias_file, "w") as f:
for g in res_alias:
print("\t".join(g), file=f)
# Save genes to HG38
genes_file = "hg38/genes_hg38.txt"
with open(genes_file, "w") as f:
for g in res_genes:
print("\t".join(g), file=f)
##########################################################################
### HG19
### -- Update hg19/genes_hg19.bed
##########################################################################
genes = []
file_path = "hg19/gencode.v19.annotation.gtf"
print("Parsing "+file_path)
with open(file_path) as gtf_hg19:
for line in tqdm(gtf_hg19, total=get_num_lines(file_path)):
if line.startswith("#"):
pass
else:
line = line.replace('"', "")
line = line.replace(';', "")
l = line.split()
if l[2] == "gene":
id_symbol = l.index("gene_name")
genes.append([l[0], l[3], l[4], l[id_symbol + 1], "0", l[6]])
res_genes = list(set(tuple(g) for g in genes))
# Save genes to HG19
genes_file = "hg19/genes_hg19.txt"
with open(genes_file, "w") as f:
for g in res_genes:
print("\t".join(g), file=f)
##########################################################################
### MM10
### -- Update mm9/alias_mouse.txt
### -- Update mm10/alias_mouse.txt
### -- Update mm10/genes_mm10.bed
##########################################################################
alias = []
genes = []
file_path = "mm10/gencode.vM23.annotation.gtf"
print("Parsing "+file_path)
with open(file_path) as gtf_mm10:
for line in tqdm(gtf_mm10, total=get_num_lines(file_path)):
if line.startswith("#"):
pass
else:
line = line.replace('"', "")
line = line.replace(';', "")
l = line.split()
if l[2] == "gene":
id_symbol = l.index("gene_name")
id_ensembl = l.index("gene_id")
if "hgnc_id" in l:
tag_HGNC = l[l.index("hgnc_id")+1]
else:
tag_HGNC = ""
if "havana_gene" in l:
tag_Havana = l[l.index("havana_gene")+1]
else:
tag_Havana = ""
alias.append([l[id_symbol + 1], l[id_ensembl + 1],
tag_HGNC, tag_Havana])
genes.append([l[0], l[3], l[4], l[id_symbol + 1], "0", l[6]])
res_alias = list(set(tuple(g) for g in alias))
res_genes = list(set(tuple(g) for g in genes))
# Save alias to MM10
alias_file = "mm10/alias_mouse.txt"
with open(alias_file, "w") as f:
for g in res_alias:
print("\t".join(g), file=f)
# Save alias to MM9
alias_file = "mm9/alias_mouse.txt"
with open(alias_file, "w") as f:
for g in res_alias:
print("\t".join(g), file=f)
# Save genes to MM10
genes_file = "mm10/genes_mm10.txt"
with open(genes_file, "w") as f:
for g in res_genes:
print("\t".join(g), file=f)
##########################################################################
### MM9
### -- Update mm9/genes_mm.bed
##########################################################################
genes = []
file_path = "mm9/gencode.vM1.annotation.gtf"
print("Parsing "+file_path)
with open(file_path) as gtf_mm9:
for line in tqdm(gtf_mm9, total=get_num_lines(file_path)):
if line.startswith("#"):
pass
else:
line = line.replace('"', "")
line = line.replace(';', "")
l = line.split()
if l[2] == "gene":
id_symbol = l.index("gene_name")
genes.append([l[0], l[3], l[4], l[id_symbol + 1], "0", l[6]])
res_genes = list(set(tuple(g) for g in genes))
# Save genes to MM9
genes_file = "mm9/genes_mm9.txt"
with open(genes_file, "w") as f:
for g in res_genes:
print("\t".join(g), file=f)
##########################################################################
### Chromosome Size
##########################################################################
chrom_size = {
"hg19": "http://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/hg19.chrom.sizes",
"hg38": "http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/hg38.chrom.sizes",
"mm9": "http://hgdownload.cse.ucsc.edu/goldenPath/mm9/bigZips/mm9.chrom.sizes",
"mm10": "http://hgdownload.cse.ucsc.edu/goldenPath/mm10/bigZips/mm10.chrom.sizes"
}
for genome in chrom_size.keys():
with open(os.path.join(genome, "chrom.sizes." + genome), "w") as f:
data = urlopen(chrom_size[genome])
for line in data:
l = line.decode('utf-8').strip()
if "_" not in l:
print(l, file=f)
```
#### File: gencoor/gencoor/experiment_matirx.py
```python
class ExpMatrix:
"""Experimental Matrix is a class for loading, storing and \
manipulating the files from different sources and format."""
def __init__(self):
self.names = []
self.types = {}
self.files = {}
# self.labels = {}
self.headers = []
self.additional_columns = {}
self.tags = {}
def read(self, path):
fixed_headers = ["name", "type", "file"]
fixed_idx = {}
with open(path) as f:
for i, line in enumerate(f):
if i == 0:
l = line.strip().split()
for h in fixed_headers:
if h in l:
fixed_idx[h] = l.index(h)
self.headers = [h for h in l if h not in fixed_headers]
if self.headers:
for h in self.headers:
self.additional_columns[h] = {}
elif line.startswith("#"):
continue
else:
l = line.strip().split()
if len(l) >= 4:
new_name = l[fixed_idx["name"]]
self.names.append(new_name)
self.types[new_name] = l[fixed_idx["type"]]
self.files[new_name] = l[fixed_idx["file"]]
# self.labels[new_name] = l[fixed_idx["label"]]
ll = list(set(range(len(l))) - set(fixed_idx.values()))
# print(range(len(l)))
# print()
if self.headers:
for j, h in enumerate(self.headers):
self.additional_columns[h][new_name] = l[ll[j]]
else:
continue
self.extend_entries()
self.sum_up_tags()
def sum_up_tags(self):
for n in self.names:
self.tags[n] = []
if self.headers:
for h in self.headers:
self.tags[n].append(self.additional_columns[h][n])
self.tags[n].append(self.types[n])
# self.tags[n].append(self.labels[n])
self.tags[n] = set(self.tags[n])
def remove_a_name(self, name):
self.names.remove(name)
del self.types[name]
del self.files[name]
# del self.labels[name]
if self.headers:
for h in self.additional_columns.keys():
del self.additional_columns[h][name]
def duplicate_entry(self, name, new_name):
self.names.append(new_name)
self.types[new_name] = self.types[name]
self.files[new_name] = self.files[name]
# self.labels[new_name] = self.labels[name]
if self.headers:
for h in self.additional_columns.keys():
self.additional_columns[h][new_name] = self.additional_columns[h][name]
def extend_entries(self):
def split_a_tag():
for h in self.headers:
for name, tag in self.additional_columns[h].items():
if "," in tag:
new_labels = tag.split(",")
combined_labels = [name+"_"+ s for s in new_labels]
for i, ll in enumerate(combined_labels):
self.duplicate_entry(name, ll)
self.additional_columns[h][ll] = new_labels[i]
self.remove_a_name(name)
return False
return True
def complete_all_tag():
for h in self.headers:
for name, tag in self.additional_columns[h].items():
if tag == ".":
tags = self.get_all_tags(h)
combined_labels = [name + "_" + s for s in tags]
for i, t in enumerate(combined_labels):
self.duplicate_entry(name, t)
self.additional_columns[h][t] = tags[i]
self.remove_a_name(name)
return False
return True
if self.headers:
reach_end = False
while not reach_end:
reach_end = split_a_tag()
reach_end = False
while not reach_end:
reach_end = complete_all_tag()
def get_regions(self):
res = []
for name, type in self.types.items():
if type == "regions":
res.append(name)
return res
def get_signals(self):
res = []
for name, type in self.types.items():
if type == "signals":
res.append(name)
return res
def print(self):
print("####################################")
print("\t".join(["name", "type", "file"] + self.headers))
for name in self.names:
print("\t".join([name, self.types[name], self.files[name]] +
[self.additional_columns[h][name] for h in self.headers]))
print("####################################")
def get_all_tags(self, header):
res = []
if header in self.headers:
for name in self.names:
tag = self.additional_columns[header][name]
if tag != "." and tag not in res:
res.append(tag)
elif header == "regions":
res = self.get_regions()
elif header == "signals":
res = self.get_signals()
else:
res = [""]
return res
def filter_by_tags(self, tags):
res = []
cue = [t for t in tags if t != ""]
# print("cue: "+ " ".join(cue))
# print(self.tags)
for name in self.names:
if set(cue) <= set(self.tags[name]):
res.append(name)
return res
def get_file(self, name):
return self.files[name]
#
# def get_label(self, name):
# return self.labels[name]
``` |
{
"source": "jovi521/swsw",
"score": 2
} |
#### File: python/fusion12h/fusion12h.py
```python
import sys
import netCDF4 as nc
import numpy as np
import time
import os
import json
import math
def create_json(filepath, savedir):
dataset = nc.Dataset(filepath, 'r')
filename = filepath.split('/')[-1]
lat = dataset.variables['latitude'][:]
lon = dataset.variables['longitude'][:]
time_shape = dataset.variables['r2m'].shape[0]
temp = dataset.variables['t2m'][:]
rhu = dataset.variables['r2m'][:]
prs = dataset.variables['msl'][:]
pre = dataset.variables['tp'][:]
# windu = dataset.variables['u10'][:]
# windv = dataset.variables['v10'][:]
# 从文件名截取时间(该时间为国际时)
time_str = filename[0: 15]
# 转为时间数组
time_array = time.strptime(time_str, '%Y%m%d_%H%M%S')
# 转为时间戳
time_stamp = int(time.mktime(time_array))
time_stamp += 8 * 3600
time_array = time.localtime(time_stamp)
other_style_time = time.strftime('%Y%m%d%H%M%S', time_array)
yyyyMM = other_style_time[0: 8]
lon_max_line = 105
lon_min_line = 102.5
lat_max_line = 31.5
lat_min_line = 30
# 对经纬度数组进行截取
lon_idx = np.where((lon >= float(lon_min_line)) & (lon <= float(lon_max_line)))
lon_after = lon[lon_idx]
lat_idx = np.where((lat >= float(lat_min_line)) & (lat <= float(lat_max_line)))
lat_after = lat[lat_idx]
lon_min_ind = lon_idx[0][0]
lon_max_ind = lon_idx[0][-1]
lat_min_ind = lat_idx[0][0]
lat_max_ind = lat_idx[0][-1]
lon_min = float('{:.2f}'.format(lon_after[0]))
lon_max = float('{:.2f}'.format(lon_after[-1]))
lat_min = float('{:.2f}'.format(lat_after[0]))
lat_max = float('{:.2f}'.format(lat_after[-1]))
lon_count = len(lon_after)
lat_count = len(lat_after)
for i in range(time_shape):
temp_after = temp[i][lat_min_ind:lat_max_ind + 1, lon_min_ind: lon_max_ind + 1]
rhu_after = rhu[i][lat_min_ind:lat_max_ind + 1, lon_min_ind: lon_max_ind + 1]
prs_after = prs[i][lat_min_ind:lat_max_ind + 1, lon_min_ind: lon_max_ind + 1]
pre_after = pre[i][lat_min_ind:lat_max_ind + 1, lon_min_ind: lon_max_ind + 1]
# windu_after = windu[i][lat_min_ind:lat_max_ind + 1, lon_min_ind: lon_max_ind + 1]
# windv_after = windv[i][lat_min_ind:lat_max_ind + 1, lon_min_ind: lon_max_ind + 1]
# # 计算风速
# windspeed = np.sqrt(np.square(windu_after) + np.square(windv_after))
# # 计算风向
# winddirect = 180 + np.arctan(windu_after / windv_after) * 180 / math.pi
# 将温度k转换为c
temp_after = temp_after - 273.15
# 保留一位小数
temp_after = np.around(temp_after, 1)
rhu_after = np.around(rhu_after, 1)
prs_after = np.around(prs_after, 1)
pre_after = np.around(pre_after, 1)
temp_filename = '{}{}{}{}{}{}'.format(other_style_time, '_', i, '_', 0, '_tem.json')
rhu_filename = '{}{}{}{}{}{}'.format(other_style_time, '_', i, '_', 0, '_rhu.json')
prs_filename = '{}{}{}{}{}{}'.format(other_style_time, '_', i, '_', 0, '_prs.json')
pre_filename = '{}{}{}{}{}{}'.format(other_style_time, '_', i, '_', 0, '_rain.json')
# windfield_filename = '{}{}{}{}{}{}'.format(other_style_time, '_', i, '_', 0, '_windfield.json')
temp_dic = {}
rhu_dic = {}
prs_dic = {}
pre_dic = {}
# wind_dic = {}
temp_dic['modifyType'] = 0
temp_dic['latCount'] = lat_count
temp_dic['lonMax'] = lon_max
temp_dic['data'] = [float('{:.1f}'.format(j)) for j in temp_after.flatten().tolist()]
temp_dic['lonCount'] = lon_count
temp_dic['latMin'] = lat_min
temp_dic['forecastCount'] = int(i)
temp_dic['layer'] = 0
temp_dic['filename'] = temp_filename
temp_dic['time'] = other_style_time
temp_dic['lonMin'] = lon_min
temp_dic['latMax'] = lat_max
temp_dic['productType'] = 'tem'
rhu_dic['modifyType'] = 0
rhu_dic['latCount'] = lat_count
rhu_dic['lonMax'] = lon_max
rhu_dic['data'] = [float('{:.1f}'.format(j)) for j in rhu_after.flatten().tolist()]
rhu_dic['lonCount'] = lon_count
rhu_dic['latMin'] = lat_min
rhu_dic['forecastCount'] = int(i)
rhu_dic['layer'] = 0
rhu_dic['filename'] = rhu_filename
rhu_dic['time'] = other_style_time
rhu_dic['lonMin'] = lon_min
rhu_dic['latMax'] = lat_max
rhu_dic['productType'] = 'rhu'
prs_dic['modifyType'] = 0
prs_dic['latCount'] = lat_count
prs_dic['lonMax'] = lon_max
prs_dic['data'] = [float('{:.1f}'.format(j)) for j in prs_after.flatten().tolist()]
prs_dic['lonCount'] = lon_count
prs_dic['latMin'] = lat_min
prs_dic['forecastCount'] = int(i)
prs_dic['layer'] = 0
prs_dic['filename'] = prs_filename
prs_dic['time'] = other_style_time
prs_dic['lonMin'] = lon_min
prs_dic['latMax'] = lat_max
prs_dic['productType'] = 'prs'
pre_dic['modifyType'] = 0
pre_dic['latCount'] = lat_count
pre_dic['lonMax'] = lon_max
pre_dic['data'] = [float('{:.1f}'.format(j)) for j in pre_after.flatten().tolist()]
pre_dic['lonCount'] = lon_count
pre_dic['latMin'] = lat_min
pre_dic['forecastCount'] = int(i)
pre_dic['layer'] = 0
pre_dic['filename'] = pre_filename
pre_dic['time'] = other_style_time
pre_dic['lonMin'] = lon_min
pre_dic['latMax'] = lat_max
pre_dic['productType'] = 'rain'
# wind_dic['modifyType'] = 0
# wind_dic['latCount'] = lat_count
# wind_dic['lonMax'] = lon_max
# wind_dic['windSpeed'] = [float('{:.1f}'.format(j)) for j in windspeed.flatten().tolist()]
# wind_dic['windDirection'] = [float('{:.1f}'.format(j)) for j in winddirect.flatten().tolist()]
# wind_dic['lonCount'] = lon_count
# wind_dic['latMin'] = lat_min
# wind_dic['forecastCount'] = int(i)
# wind_dic['layer'] = 0
# wind_dic['filename'] = windfield_filename
# wind_dic['time'] = other_style_time
# wind_dic['lonMin'] = lon_min
# wind_dic['latMax'] = lat_max
# wind_dic['productType'] = 'windfield'
temp_json = json.dumps(temp_dic)
rhu_json = json.dumps(rhu_dic)
prs_json = json.dumps(prs_dic)
pre_json = json.dumps(pre_dic)
# wind_json = json.dumps(wind_dic)
temp_dir = '{}{}{}{}{}'.format(savedir, '\\\\', yyyyMM, '\\\\', 'TEM')
rhu_dir = '{}{}{}{}{}'.format(savedir, '\\\\', yyyyMM, '\\\\', 'RHU')
prs_dir = '{}{}{}{}{}'.format(savedir, '\\\\', yyyyMM, '\\\\', 'PRS')
pre_dir = '{}{}{}{}{}'.format(savedir, '\\\\', yyyyMM, '\\\\', 'RAIN')
# wind_dir = '{}{}{}{}{}'.format(savedir, '\\\\', yyyyMM, '\\\\', 'WINDFIELD')
temp_path = '{}{}{}{}{}{}{}'.format(savedir, '\\\\', yyyyMM, '\\\\', 'TEM', '\\\\', temp_filename)
rhu_path = '{}{}{}{}{}{}{}'.format(savedir, '\\\\', yyyyMM, '\\\\', 'RHU', '\\\\', rhu_filename)
prs_path = '{}{}{}{}{}{}{}'.format(savedir, '\\\\', yyyyMM, '\\\\', 'PRS', '\\\\', prs_filename)
pre_path = '{}{}{}{}{}{}{}'.format(savedir, '\\\\', yyyyMM, '\\\\', 'RAIN', '\\\\', pre_filename)
# wind_path = '{}{}{}{}{}{}{}'.format(savedir, '\\\\', yyyyMM, '\\\\', 'WINDFIELD', '\\\\', windfield_filename)
if os.path.exists(temp_dir) == False:
os.makedirs(temp_dir)
if os.path.exists(rhu_dir) == False:
os.makedirs(rhu_dir)
if os.path.exists(prs_dir) == False:
os.makedirs(prs_dir)
if os.path.exists(pre_dir) == False:
os.makedirs(pre_dir)
# if os.path.exists(wind_dir) == False:
# os.makedirs(wind_dir)
with open(temp_path, 'w+') as file_obj:
file_obj.write(temp_json)
with open(rhu_path, 'w+') as file_obj:
file_obj.write(rhu_json)
with open(prs_path, 'w+') as file_obj:
file_obj.write(prs_json)
with open(pre_path, 'w+') as file_obj:
file_obj.write(pre_json)
# with open(wind_path, 'w+') as file_obj:
# file_obj.write(wind_json)
if __name__ == '__main__':
filepath = sys.argv[1]
savedir = sys.argv[2]
# filepath = r'D:\\Data\\20210104_000000.grapes.12h.nc'
# create_json(filepath, 'D:\\Data\\fusion12h_parse')
create_json(filepath, savedir)
```
#### File: python/statellite/fy4a_channel12.py
```python
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
import sys
import os
import time
from fy4a import FY4A_AGRI_L1
def create_img(file_path, geo_range, save_dir):
'''
file_path:需要解析的文件路径
geo_range:需要裁剪的区域范围和粒度,格式:最小纬度,最大纬度,最小经度,最大经度,粒度 例如:10, 54, 70, 140, 0.1
save_path:保存路径
'''
# 获得文件名
filename = file_path.split('\\')[-1]
# 从文件名中获得时间
start_time = filename.split('_')[-4]
# 将世界时转化为北京时
time_array = time.strptime(start_time, "%Y%m%d%H%M%S")
time_stamp = int(time.mktime(time_array)) + 8 * 3600
time_array = time.localtime(time_stamp)
other_style_time = time.strftime('%Y%m%d%H%M%S', time_array)
yyyyMMdd = other_style_time[0:8]
# 卫星类型
satellite_type = 'FY4A'
# 通道号
channel_number = 'Channel12'
# 读取文件,获得fy4a对象
fy4a_agri_l1 = FY4A_AGRI_L1(file_path)
# 选择通道和区域
fy4a_agri_l1.extract('Channel12', geo_range)
# 获得通道对象
channel12 = fy4a_agri_l1.channels['Channel12']
# 绘图
# 设置图片大小和dpi
# plt.subplot(1, 1, 1)
plt.figure(figsize=(10, 8), dpi=200)
lat_S, lat_N, lon_W, lon_E, step = eval(geo_range)
channel12 = np.array(channel12)
channel12 = np.flip(channel12, axis=0)
Basemap(projection='merc', llcrnrlat=lat_S, urcrnrlat=lat_N, \
llcrnrlon=lon_W, urcrnrlon=lon_E, lat_ts=5, resolution='c')
x = np.arange(lon_W, lon_E + 0.1, 0.1)
y = np.arange(lat_S, lat_N + 0.1, 0.1)
xx, yy = np.meshgrid(x, y)
# 色彩定制:ch012色标
levels = [198.55, 218.15, 232.38, 243.94, 253.88, 262.69, 270.1, 277.51, 284.39, 290.85,
296.96, 302.76, 308.32, 313.65, 318.79, 323.74, 328.55, 334.39]
cdict = (
'#474447', '#EBA6C3', '#DA5146', '#DCB758', '#C0D9C6', '#99C7E1', '#84ABCF', '#6D8DBC', '#5971A9', '#425396',
'#303B84', '#242C78', '#1B1F6D', '#1B1F6D', '#111262', '#0a0b5B', '#0a0a5B', '#0c0c5D')
plt.contourf(xx, yy, channel12, levels=levels, extend='both', colors=cdict)
# plt.contourf(xx, yy, channel12)
# 去除边框
plt.axis('off')
img_name = '{}{}{}{}{}{}'.format('C012', '_', other_style_time, '_', satellite_type, '.png')
save_dir = '{}{}{}{}{}{}'.format(save_dir, satellite_type, '/', yyyyMMdd, '/', channel_number.upper())
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = '{}{}{}'.format(save_dir, '/', img_name)
plt.savefig(save_path, transparent=True, bbox_inches='tight', pad_inches=0)
if __name__ == '__main__':
file_path = sys.argv[1]
geo_range = sys.argv[2]
save_path = sys.argv[3]
# file_path = 'D:/Z_SATE_C_BAWX_20201217062328_P_FY4A-_AGRI--_N_DISK_1047E_L2-_LSE-_MULT_NOM_20201217060000_20201217061459_012KM_V0001.NC'
# color_dict = 'D:/color_dict.txt'
# geo_range = '10,54,70,140,0.1'
# save_path = 'D:/China.png'
# file_path = 'D:\\Data\\Z_SATE_C_BAWX_20210131142647_P_FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20210131141918_20210131142335_4000M_V0001.HDF'
# geo_range = '10,54,70,140,0.1'
# save_path = 'D:/Data/satellite_parse/'
create_img(file_path, geo_range, save_path)
```
#### File: python/statellite/fy4a.py
```python
import netCDF4 as nc
import numpy as np
from projection import latlon2linecolumn
# 各分辨率文件包含的通道号
CONTENTS = {'0500M': ('Channel02',),
'1000M': ('Channel01', 'Channel02', 'Channel03'),
'2000M': tuple([f'Channel{x:02d}' for x in range(1, 8)]),
'4000M': tuple([f'Channel{x:02d}' for x in range(1, 15)])}
# 各分辨率行列数
SIZES = {'0500M': 21984,
'1000M': 10992,
'2000M': 5496,
'4000M': 2748}
class FY4A_AGRI_L1(object):
"""
FY4A(AGRI一级数据)类
"""
def __init__(self, l1name):
"""
获得L1数据hdf5文件对象、记录读取状态
"""
# 解析得到数据集
self.dataset = nc.Dataset(l1name, 'r')
# 得到空间分辨率
self.resolution = l1name[-15:-10]
# 得到需要的通道号
self.channels = {x: None for x in CONTENTS[self.resolution]}
self.line_begin = self.dataset.getncattr('Begin Line Number')
self.line_end = self.dataset.getncattr('End Line Number')
# geo_range与line和column同步并对应
self.geo_range = None
self.line = None
self.column = None
def __del__(self):
"""
确保关闭L1数据hdf5文件
"""
self.dataset.close()
def extract(self, channelname, geo_range=None):
"""
最邻近插值提取
line:行号
column:列号
channelname:要提取的通道名(如'Channel01')
返回字典
暂时没有处理缺测值(异常)
REGC超出范围未解决
"""
NOMChannelname = 'NOM' + channelname
CALChannelname = 'CAL' + channelname
# 若geo_range没有指定,则读取整幅图像,不定标
if geo_range is None:
channel = self.dataset[NOMChannelname][()]
self.channels[channelname] = channel
return None
geo_range = eval(geo_range)
if self.geo_range != geo_range:
self.geo_range = geo_range
# 先乘1000取整是为了防止浮点数的精度误差累积
lat_S, lat_N, lon_W, lon_E, step = \
[int(1000 * x) for x in geo_range]
lat = np.arange(lat_N, lat_S-1, -step) / 1000
lon = np.arange(lon_W, lon_E+1, step) / 1000
lon_mesh, lat_mesh = np.meshgrid(lon, lat)
# 求geo_range对应的标称全圆盘行列号
line, column = latlon2linecolumn(lat_mesh, lon_mesh, self.resolution)
self.line = np.rint(line).astype(np.uint16) - self.line_begin
self.column = np.rint(column).astype(np.uint16)
# DISK全圆盘数据和REGC中国区域数据区别在起始行号和终止行号
channel = \
self.dataset[NOMChannelname][()][self.line, self.column]
# 定标表
CALChannel = self.dataset[CALChannelname][()].astype(np.float32)
if NOMChannelname != 'NOMChannel07':
CALChannel = np.append(CALChannel, np.nan)
channel[channel >= 65534] = 4096
else:
CALChannel[65535] = np.nan
self.channels[channelname] = CALChannel[channel]
# # 演示导出指定范围数据到一个.nc文件
# if __name__ == '__main__':
# from os import listdir
# from os.path import join
# from datetime import datetime
# from netCDF4 import date2num, Dataset as ncDataset
# from matplotlib import pyplot as plt
# h5path = r'..\data' # FY-4A一级数据所在路径
# ncname = r'..\data\test.nc'
# h5list = [join(h5path, x) for x in listdir(h5path)
# if '4000M' in x and 'FDI' in x]
# geo_range = '10, 54, 70, 140, 0.05'
# lat_S, lat_N, lon_W, lon_E, step = eval(geo_range)
# lat = np.arange(lat_N, lat_S-0.01, -step)
# lon = np.arange(lon_W, lon_E+0.01, step)
# channelnames = ('Channel12',) # 测试数据Channel02有问题
# # 创建nc文件
# ncfile = ncDataset(ncname, 'w', format='NETCDF4')
# ncfile.createDimension('lat', len(lat))
# ncfile.createDimension('lon', len(lon))
# ncfile.createDimension('time') # 不限长
# nclat = ncfile.createVariable('lat', 'f4', ('lat',))
# nclon = ncfile.createVariable('lon', 'f4', ('lon',))
# nctime = ncfile.createVariable('time', 'f8', ('time',))
# nctime.units = 'minutes since 0001-01-01 00:00:00.0'
# t = 0
# for channelname in channelnames:
# ncfile.createVariable(channelname, 'f4', ('time', 'lat', 'lon'))
# ncfile.set_auto_mask(False)
# # 向nc文件中写入
# nclat[:] = lat
# nclon[:] = lon
# lon, lat = np.meshgrid(lon, lat)
# for l1name in h5list:
# fy4a_h5 = FY4A_H5(l1name, channelnames)
# print('FY4A_H5实例化成功')
# for channelname in channelnames:
# fy4a_h5.extract(channelname, geo_range)
# ncfile[channelname][t, :, :] = fy4a_h5.channels[channelname]
# print(channelname + '读取成功')
# time = datetime.strptime(l1name[-45: -33], '%Y%m%d%H%M%S')
# nctime[t] = date2num(time, nctime.units)
# ncfile.sync() # 手动写入硬盘
# t += 1
# plt.figure(l1name[-45: -31])
# plt.imshow(fy4a_h5.channels['Channel12'], cmap='gray_r')
# plt.show()
# ncfile.close()
```
#### File: resources/python/word2pdf.py
```python
import sys
from win32com.client import Dispatch
wdFormatPDF = 17
def doc2pdf(input_files):
global word
try:
word = Dispatch('KWPS.application')
except:
word = Dispatch('WORD.application')
finally:
doc = word.Documents.Open(input_files)
if input_files.endswith(".docx"):
doc.SaveAs(input_files.replace(".docx", ".pdf"), FileFormat=wdFormatPDF)
elif input_files.endswith(".doc"):
doc.SaveAs(input_files.replace(".doc", ".pdf"), FileFormat=wdFormatPDF)
doc.Close()
word.Quit()
if __name__ == '__main__':
input_file = sys.argv[1]
doc2pdf(input_file)
``` |
{
"source": "jovial/arcus-slurm-on-openstack",
"score": 2
} |
#### File: modules/ironic_compute/baremetal.py
```python
from __future__ import print_function
import sys, json, pprint
import openstack
import pprint
from ClusterShell import NodeSet
def get_config():
config = {}
if len(sys.argv) == 1:
# using from terraform
config = json.load(sys.stdin)
config["debug"] = False
else:
config = dict(
zip(('os_cloud', 'hostname_pattern', 'cluster_name'),
sys.argv[1:]))
config["debug"] = True
pprint.pprint(config)
return config
def get_hostnames(host_pattern):
return list(NodeSet.NodeSet(host_pattern))
def find_baremetal_nodes(conn, hostnames):
found = []
nodes = conn.baremetal.nodes()
for node in nodes:
if node.name in hostnames:
found.append({
"uuid": node["id"],
"name": node["name"],
"instance_uuid": node["instance_id"],
})
if len(found) != len(hostnames):
print(found)
print(hostnames)
raise Exception("Unable to find all baremetal nodes")
return found
def print_result(nodes):
result = {}
for node in nodes:
result[node["name"]] = node["uuid"]
print(json.dumps(result))
config = get_config()
conn = openstack.connection.from_config(cloud=config["os_cloud"])
found = find_baremetal_nodes(conn, get_hostnames(config["hostname_pattern"]))
print_result(found)
``` |
{
"source": "jovial/arcus-terraform-idrac",
"score": 3
} |
#### File: arcus-terraform-idrac/ansible_enroll/configuration-filter.py
```python
import json
import argparse
import fileinput
import re
import sys
def filter_attr(component, regexps):
if not regexps:
return component
if "Attributes" not in component:
# no attributes to filter
return component
attrs = component["Attributes"]
component["Attributes"] = [x for x in attrs if re.match("|".join(regexps), x["Name"])]
return component
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Filter system configuration')
parser.add_argument('--attr-filter', metavar='', nargs='*', action="append",
help='Filter attributes', default=[])
parser.add_argument('--component-filter', metavar='', nargs='*', action="append",
help='Filter components', default=[])
parser.add_argument('--input', metavar='', nargs='?', help='input file', default=[])
args = parser.parse_args()
# flatten
component_filters = sum(args.component_filter, [])
if len(component_filters) != len(args.attr_filter):
print("Missing attr-filter for a component", sys.stderr)
sys.exit(1)
buffer = []
for line in fileinput.input(args.input):
buffer.append(line)
content = json.loads("\n".join(buffer))
components = content["SystemConfiguration"]["Components"]
filtered = []
for component in components:
for i, regex in enumerate(component_filters):
if re.match(regex, component["FQDD"]):
filtered.append(filter_attr(component, args.attr_filter[i]))
# [filter_attr(x, args.attr_filter[i]) for i, x in enumerate(components) if not component_filters or re.match("|".join(component_filters), x["FQDD"])]
# filter_attr returns none on no attributes
content["SystemConfiguration"]["Components"] = [x for x in filtered if x]
print(json.dumps(content))
``` |
{
"source": "jovialio/datumaro",
"score": 2
} |
#### File: datumaro/components/environment.py
```python
from functools import partial
from typing import (
Callable, Dict, Generic, Iterable, Iterator, Optional, Type, TypeVar,
)
import glob
import importlib
import inspect
import logging as log
import os.path as osp
from datumaro.components.cli_plugin import CliPlugin, plugin_types
from datumaro.components.format_detection import detect_dataset_format
from datumaro.util.os_util import import_foreign_module, split_path
T = TypeVar('T')
class Registry(Generic[T]):
def __init__(self):
self.items: Dict[str, T] = {}
def register(self, name: str, value: T) -> T:
self.items[name] = value
return value
def unregister(self, name: str) -> Optional[T]:
return self.items.pop(name, None)
def get(self, key: str):
"""Returns a class or a factory function"""
return self.items[key]
def __getitem__(self, key: str) -> T:
return self.get(key)
def __contains__(self, key) -> bool:
return key in self.items
def __iter__(self) -> Iterator[T]:
return iter(self.items)
class PluginRegistry(Registry[Type[CliPlugin]]):
def __init__(self, filter: Callable[[Type[CliPlugin]], bool] = None): \
#pylint: disable=redefined-builtin
super().__init__()
self.filter = filter
def batch_register(self, values: Iterable[CliPlugin]):
for v in values:
if self.filter and not self.filter(v):
continue
self.register(v.NAME, v)
class Environment:
_builtin_plugins = None
def __init__(self):
def _filter(accept, skip=None):
accept = (accept, ) if inspect.isclass(accept) else tuple(accept)
skip = {skip} if inspect.isclass(skip) else set(skip or [])
skip = tuple(skip | set(accept))
return lambda t: issubclass(t, accept) and t not in skip
from datumaro.components.converter import Converter
from datumaro.components.extractor import (
Extractor, Importer, ItemTransform, SourceExtractor, Transform,
)
from datumaro.components.launcher import Launcher
from datumaro.components.validator import Validator
self._extractors = PluginRegistry(_filter(Extractor,
skip=SourceExtractor))
self._importers = PluginRegistry(_filter(Importer))
self._launchers = PluginRegistry(_filter(Launcher))
self._converters = PluginRegistry(_filter(Converter))
self._transforms = PluginRegistry(_filter(Transform,
skip=ItemTransform))
self._validators = PluginRegistry(_filter(Validator))
self._builtins_initialized = False
def _get_plugin_registry(self, name):
if not self._builtins_initialized:
self._builtins_initialized = True
self._register_builtin_plugins()
return getattr(self, name)
@property
def extractors(self) -> PluginRegistry:
return self._get_plugin_registry('_extractors')
@property
def importers(self) -> PluginRegistry:
return self._get_plugin_registry('_importers')
@property
def launchers(self) -> PluginRegistry:
return self._get_plugin_registry('_launchers')
@property
def converters(self) -> PluginRegistry:
return self._get_plugin_registry('_converters')
@property
def transforms(self) -> PluginRegistry:
return self._get_plugin_registry('_transforms')
@property
def validators(self) -> PluginRegistry:
return self._get_plugin_registry('_validators')
@staticmethod
def _find_plugins(plugins_dir):
plugins = []
for pattern in ('*.py', '*/*.py'):
for path in glob.glob(
osp.join(glob.escape(plugins_dir), pattern)):
if not osp.isfile(path):
continue
path_rel = osp.relpath(path, plugins_dir)
name_parts = split_path(osp.splitext(path_rel)[0])
# a module with a dot in the name won't load correctly
if any('.' in part for part in name_parts):
log.warning(
"Python file '%s' in directory '%s' can't be imported "
"due to a dot in the name; skipping.",
path_rel, plugins_dir)
continue
plugins.append('.'.join(name_parts))
return plugins
@classmethod
def _get_plugin_exports(cls, module, types):
exports = []
if hasattr(module, 'exports'):
exports = module.exports
else:
for symbol in dir(module):
if symbol.startswith('_'):
continue
exports.append(getattr(module, symbol))
exports = [s for s in exports
if inspect.isclass(s) and issubclass(s, types) and not s in types]
return exports
@classmethod
def _load_plugins(cls, module_names, *, importer, types=None):
types = tuple(types or plugin_types())
all_exports = []
for module_name in module_names:
try:
module = importer(module_name)
exports = cls._get_plugin_exports(module, types)
except Exception as e:
module_search_error = ModuleNotFoundError
message = ["Failed to import module '%s': %s", module_name, e]
if isinstance(e, module_search_error):
log.debug(*message)
else:
log.warning(*message)
continue
log.debug("Imported the following symbols from %s: %s" % \
(
module_name,
', '.join(s.__name__ for s in exports)
)
)
all_exports.extend(exports)
return all_exports
@classmethod
def _load_builtin_plugins(cls):
if cls._builtin_plugins is None:
import datumaro.plugins
plugins_dir = osp.dirname(datumaro.plugins.__file__)
module_names = [datumaro.plugins.__name__ + '.' + name
for name in cls._find_plugins(plugins_dir)]
cls._builtin_plugins = cls._load_plugins(module_names,
importer=importlib.import_module)
return cls._builtin_plugins
def load_plugins(self, plugins_dir):
module_names = self._find_plugins(plugins_dir)
plugins = self._load_plugins(module_names,
importer=partial(import_foreign_module, path=plugins_dir))
self._register_plugins(plugins)
def _register_builtin_plugins(self):
self._register_plugins(self._load_builtin_plugins())
def _register_plugins(self, plugins):
self.extractors.batch_register(plugins)
self.importers.batch_register(plugins)
self.launchers.batch_register(plugins)
self.converters.batch_register(plugins)
self.transforms.batch_register(plugins)
self.validators.batch_register(plugins)
def make_extractor(self, name, *args, **kwargs):
return self.extractors.get(name)(*args, **kwargs)
def make_importer(self, name, *args, **kwargs):
return self.importers.get(name)(*args, **kwargs)
def make_launcher(self, name, *args, **kwargs):
return self.launchers.get(name)(*args, **kwargs)
def make_converter(self, name, *args, **kwargs):
result = self.converters.get(name)
if inspect.isclass(result):
result = result.convert
return partial(result, *args, **kwargs)
def make_transform(self, name, *args, **kwargs):
return partial(self.transforms.get(name), *args, **kwargs)
def is_format_known(self, name):
return name in self.importers or name in self.extractors
def detect_dataset(self, path):
return detect_dataset_format(
((format_name, importer.detect)
for format_name, importer in self.importers.items.items()),
path,
)
```
#### File: datumaro/components/media.py
```python
from __future__ import annotations
from typing import Callable, Iterable, Iterator, Optional, Tuple, Union
import os
import os.path as osp
import shutil
import weakref
import cv2
import numpy as np
from datumaro.util.image import (
_image_loading_errors, decode_image, lazy_image, save_image,
)
class MediaElement:
def __init__(self, path: str) -> None:
self._path = path
@property
def path(self) -> str:
"""Path to the media file"""
return self._path
@property
def ext(self) -> str:
"""Media file extension (with the leading dot)"""
return osp.splitext(osp.basename(self.path))[1]
def __eq__(self, other: object) -> bool:
# We need to compare exactly with this type
if type(other) is not __class__: # pylint: disable=unidiomatic-typecheck
return False
return self._path == other._path
class Image(MediaElement):
def __init__(self,
data: Union[np.ndarray, Callable[[str], np.ndarray], None] = None,
*,
path: Optional[str] = None,
ext: Optional[str] = None,
size: Optional[Tuple[int, int]] = None) -> None:
"""
Creates an image.
Any combination of the `data`, `path` and `size` is possible,
but at least one of these arguments must be provided.
The `ext` parameter cannot be used as a single argument for
construction.
Args:
data - Image pixels or a function to retrieve them. The expected
image shape is (H, W [, C]). If a function is provided,
it must accept image path as the first argument.
path - Image path
ext - Image extension. Cannot be used together with `path`. It can
be used for saving with a custom extension - in that case,
the image need to have the `data` and `ext` fields defined.
size - A pair (H, W), which represents image size.
"""
assert size is None or len(size) == 2, size
if size is not None:
assert len(size) == 2 and 0 < size[0] and 0 < size[1], size
size = tuple(map(int, size))
self._size = size # (H, W)
if path is None:
path = ''
elif path:
path = path.replace('\\', '/')
self._path = path
if ext:
assert not path, "Can't specify both 'path' and 'ext' for image"
if not ext.startswith('.'):
ext = '.' + ext
ext = ext.lower()
else:
ext = None
self._ext = ext
if not isinstance(data, np.ndarray):
assert path or callable(data) or size, "Image can not be empty"
assert data is None or callable(data)
if data or path and osp.isfile(path):
data = lazy_image(path, loader=data)
self._data = data
@property
def data(self) -> np.ndarray:
"""Image data in BGR HWC [0; 255] (float) format"""
if callable(self._data):
data = self._data()
else:
data = self._data
if self._size is None and data is not None:
self._size = tuple(map(int, data.shape[:2]))
return data
@property
def has_data(self) -> bool:
return self._data is not None
@property
def has_size(self) -> bool:
"""Indicates that size info is cached and won't require image loading"""
return self._size is not None or isinstance(self._data, np.ndarray)
@property
def size(self) -> Optional[Tuple[int, int]]:
"""Returns (H, W)"""
if self._size is None:
try:
data = self.data
except _image_loading_errors:
return None
if data is not None:
self._size = tuple(map(int, data.shape[:2]))
return self._size
@property
def ext(self) -> str:
"""Media file extension"""
if self._ext is not None:
return self._ext
else:
return osp.splitext(osp.basename(self.path))[1]
def __eq__(self, other):
if not isinstance(other, __class__):
return False
return \
(np.array_equal(self.size, other.size)) and \
(self.has_data == other.has_data) and \
(self.has_data and np.array_equal(self.data, other.data) or \
not self.has_data)
def save(self, path):
cur_path = osp.abspath(self.path)
path = osp.abspath(path)
cur_ext = self.ext.lower()
new_ext = osp.splitext(osp.basename(path))[1].lower()
os.makedirs(osp.dirname(path), exist_ok=True)
if cur_ext == new_ext and osp.isfile(cur_path):
if cur_path != path:
shutil.copyfile(cur_path, path)
else:
save_image(path, self.data)
class ByteImage(Image):
_FORMAT_MAGICS = (
(b'\x89PNG\r\n\x1a\n', '.png'),
(b'\xff\xd8\xff', '.jpg'),
(b'BM', '.bmp'),
)
def __init__(self,
data: Union[bytes, Callable[[str], bytes], None] = None,
*,
path: Optional[str] = None,
ext: Optional[str] = None,
size: Optional[Tuple[int, int]] = None):
if not isinstance(data, bytes):
assert path or callable(data), "Image can not be empty"
assert data is None or callable(data)
if path and osp.isfile(path) or data:
data = lazy_image(path, loader=data)
self._bytes_data = data
if ext is None and path is None and isinstance(data, bytes):
ext = self._guess_ext(data)
super().__init__(path=path, ext=ext, size=size,
data=lambda _: decode_image(self.get_bytes()))
if data is None:
# We don't expect decoder to produce images from nothing,
# otherwise using this class makes no sense. We undefine
# data to avoid using default image loader for loading binaries
# from the path, when no data is provided.
self._data = None
@classmethod
def _guess_ext(cls, data: bytes) -> Optional[str]:
return next(
(ext for magic, ext in cls._FORMAT_MAGICS
if data.startswith(magic)),
None,
)
def get_bytes(self):
if callable(self._bytes_data):
return self._bytes_data()
return self._bytes_data
def save(self, path):
cur_path = osp.abspath(self.path)
path = osp.abspath(path)
cur_ext = self.ext.lower()
new_ext = osp.splitext(osp.basename(path))[1].lower()
os.makedirs(osp.dirname(path), exist_ok=True)
if cur_ext == new_ext and osp.isfile(cur_path):
if cur_path != path:
shutil.copyfile(cur_path, path)
elif cur_ext == new_ext:
with open(path, 'wb') as f:
f.write(self.get_bytes())
else:
save_image(path, self.data)
class VideoFrame(Image):
def __init__(self, video: Video, index: int):
self._video = video
self._index = index
super().__init__(lambda _: self._video.get_frame_data(self._index))
@property
def size(self) -> Tuple[int, int]:
return self._video.frame_size
@property
def index(self) -> int:
return self._index
@property
def video(self) -> Video:
return self._video
class _VideoFrameIterator(Iterator[VideoFrame]):
"""
Provides sequential access to the video frames.
"""
_video: Video
_iterator: Iterator[VideoFrame]
_pos: int
_current_frame_data: Optional[np.ndarray]
def __init__(self, video: Video):
self._video = video
self._reset()
def _reset(self):
self._video._reset_reader()
self._iterator = self._decode(self._video._get_reader())
self._pos = -1
self._current_frame_data = None
def _decode(self, cap) -> Iterator[VideoFrame]:
"""
Decodes video frames using opencv
"""
self._pos = -1
success, frame = cap.read()
while success:
self._pos += 1
if self._video._includes_frame(self._pos):
self._current_frame_data = frame.astype(float)
yield self._make_frame(index=self._pos)
success, frame = cap.read()
if self._video._frame_count is None:
self._video._frame_count = self._pos + 1
def _make_frame(self, index) -> VideoFrame:
return VideoFrame(self._video, index=index)
def __next__(self):
return next(self._iterator)
def __getitem__(self, idx: int) -> VideoFrame:
if not self._video._includes_frame(idx):
raise IndexError(f"Video doesn't contain frame #{idx}.")
return self._navigate_to(idx)
def get_frame_data(self, idx: int) -> np.ndarray:
self._navigate_to(idx)
return self._current_frame_data
def _navigate_to(self, idx: int) -> VideoFrame:
"""
Iterates over frames to the required position.
"""
if idx < 0:
raise IndexError()
if idx < self._pos:
self._reset()
if self._pos < idx:
try:
while self._pos < idx:
v = self.__next__()
except StopIteration as e:
raise IndexError() from e
else:
v = self._make_frame(index=self._pos)
return v
class Video(MediaElement, Iterable[VideoFrame]):
"""
Provides random access to the video frames.
"""
def __init__(self, path: str, *,
step: int = 1, start_frame: int = 0,
end_frame: Optional[int] = None) -> None:
super().__init__(path)
if end_frame:
assert start_frame < end_frame
assert 0 < step
self._step = step
self._start_frame = start_frame
self._end_frame = end_frame or None
self._reader = None
self._iterator: Optional[_VideoFrameIterator] = None
self._frame_size: Optional[Tuple[int, int]] = None
# We don't provide frame count unless we have a reliable source of
# this information.
# - Not all videos provide length / duration metainfo
# - We can get an estimation based on metadata, but it
# can be invalid or inaccurate due to variable frame rate
# or fractional values rounded up. Relying on the value will give
# errors during requesting frames.
# https://stackoverflow.com/a/47796468
self._frame_count = None
self._length = None
from .media_manager import MediaManager
MediaManager.get_instance().push(weakref.ref(self), self)
def close(self):
self._iterator = None
if self._reader is not None:
self._reader.release()
self._reader = None
def __getitem__(self, idx: int) -> VideoFrame:
if not self._includes_frame(idx):
raise IndexError(f"Video doesn't contain frame #{idx}.")
return self._get_iterator()[idx]
def get_frame_data(self, idx: int) -> VideoFrame:
if not self._includes_frame(idx):
raise IndexError(f"Video doesn't contain frame #{idx}.")
return self._get_iterator().get_frame_data(idx)
def __iter__(self) -> Iterator[VideoFrame]:
"""
Iterates over frames lazily, if possible.
"""
if self._frame_count is not None:
# Decoding is not necessary to get frame pointers
# However, it can be inacurrate
end_frame = self._get_end_frame()
for index in range(self._start_frame, end_frame, self._step):
yield VideoFrame(video=self, index=index)
else:
# Need to decode to iterate over frames
yield from self._get_iterator()
@property
def length(self) -> Optional[int]:
"""
Returns frame count, if video provides such information.
Note that not all videos provide length / duration metainfo, so the
result may be undefined.
Also note, that information may be inaccurate because of variable
FPS in video or incorrect metainfo. The count is only guaranteed to
be valid after video is completely read once.
The count is affected by the frame filtering options of the object,
i.e. start frame, end frame and frame step.
"""
if self._length is None:
end_frame = self._get_end_frame()
length = None
if end_frame is not None:
length = (end_frame - self._start_frame) // self._step
assert 0 < length
self._length = length
return self._length
@property
def frame_size(self) -> Tuple[int, int]:
"""Returns (H, W)"""
if self._frame_size is None:
self._frame_size = self._get_frame_size()
return self._frame_size
def _get_frame_size(self) -> Tuple[int, int]:
cap = self._get_reader()
w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
if h and w:
frame_size = (int(h), int(w))
else:
image = next(self._get_iterator()).data
frame_size = image.shape[0:2]
return frame_size
def _get_end_frame(self):
if self._end_frame is not None and self._frame_count is not None:
end_frame = min(self._end_frame, self._frame_count)
else:
end_frame = self._end_frame or self._frame_count
return end_frame
def _includes_frame(self, i):
end_frame = self._get_end_frame()
if self._start_frame <= i:
if (i - self._start_frame) % self._step == 0:
if end_frame is None or i < end_frame:
return True
return False
def _get_iterator(self):
if self._iterator is None:
self._iterator = _VideoFrameIterator(self)
return self._iterator
def _get_reader(self):
if self._reader is None:
self._reset_reader()
return self._reader
def _reset_reader(self):
if self._reader is not None:
self._reader.release()
self._reader = cv2.VideoCapture(self._path)
assert self._reader.isOpened()
def __eq__(self, other: object) -> bool:
if not isinstance(other, __class__):
return False
return self.path == other.path and \
self._start_frame == other._start_frame and \
self._step == other._step and \
self._end_frame == other._end_frame
def __hash__(self):
# Required for caching
return hash((self._path, self._step, self._start_frame, self._end_frame))
```
#### File: plugins/datumaro_format/converter.py
```python
import os
import os.path as osp
import shutil
import numpy as np
import pycocotools.mask as mask_utils
from datumaro.components.annotation import (
Annotation, Bbox, Caption, Cuboid3d, Label, LabelCategories, Mask,
MaskCategories, Points, PointsCategories, Polygon, PolyLine, RleMask,
_Shape,
)
from datumaro.components.converter import Converter
from datumaro.components.dataset import ItemStatus
from datumaro.components.extractor import DEFAULT_SUBSET_NAME, DatasetItem
from datumaro.util import cast, dump_json_file
from .format import DatumaroPath
class _SubsetWriter:
def __init__(self, context):
self._context = context
self._data = {
'info': {},
'categories': {},
'items': [],
}
@property
def categories(self):
return self._data['categories']
@property
def items(self):
return self._data['items']
def is_empty(self):
return not self.items
def add_item(self, item):
annotations = []
item_desc = {
'id': item.id,
'annotations': annotations,
}
if item.attributes:
item_desc['attr'] = item.attributes
if item.has_image:
path = item.image.path
if self._context._save_images:
path = self._context._make_image_filename(item)
self._context._save_image(item,
osp.join(self._context._images_dir, item.subset, path))
item_desc['image'] = {
'path': path,
}
if item.image.has_size: # avoid occasional loading
item_desc['image']['size'] = item.image.size
if item.has_point_cloud:
path = item.point_cloud
if self._context._save_images:
path = self._context._make_pcd_filename(item)
self._context._save_point_cloud(item,
osp.join(self._context._pcd_dir, item.subset, path))
item_desc['point_cloud'] = {
'path': path
}
if item.related_images:
images = sorted(item.related_images, key=lambda i: i.path)
if self._context._save_images:
related_images = []
for i, img in enumerate(images):
ri_desc = {}
# Images can have completely the same names or don't have
# them at all, so we just rename them
ri_desc['path'] = \
f'image_{i}{self._context._find_image_ext(img)}'
if img.has_data:
img.save(osp.join(self._context._related_images_dir,
item.subset, item.id, ri_desc['path']))
if img.has_size:
ri_desc['size'] = img.size
related_images.append(ri_desc)
else:
related_images = [{'path': img.path} for img in images]
item_desc['related_images'] = related_images
self.items.append(item_desc)
for ann in item.annotations:
if isinstance(ann, Label):
converted_ann = self._convert_label_object(ann)
elif isinstance(ann, Mask):
converted_ann = self._convert_mask_object(ann)
elif isinstance(ann, Points):
converted_ann = self._convert_points_object(ann)
elif isinstance(ann, PolyLine):
converted_ann = self._convert_polyline_object(ann)
elif isinstance(ann, Polygon):
converted_ann = self._convert_polygon_object(ann)
elif isinstance(ann, Bbox):
converted_ann = self._convert_bbox_object(ann)
elif isinstance(ann, Caption):
converted_ann = self._convert_caption_object(ann)
elif isinstance(ann, Cuboid3d):
converted_ann = self._convert_cuboid_3d_object(ann)
else:
raise NotImplementedError()
annotations.append(converted_ann)
def add_categories(self, categories):
for ann_type, desc in categories.items():
if isinstance(desc, LabelCategories):
converted_desc = self._convert_label_categories(desc)
elif isinstance(desc, MaskCategories):
converted_desc = self._convert_mask_categories(desc)
elif isinstance(desc, PointsCategories):
converted_desc = self._convert_points_categories(desc)
else:
raise NotImplementedError()
self.categories[ann_type.name] = converted_desc
def write(self, ann_file):
dump_json_file(ann_file, self._data)
def _convert_annotation(self, obj):
assert isinstance(obj, Annotation)
ann_json = {
'id': cast(obj.id, int),
'type': cast(obj.type.name, str),
'attributes': obj.attributes,
'group': cast(obj.group, int, 0),
}
return ann_json
def _convert_label_object(self, obj):
converted = self._convert_annotation(obj)
converted.update({
'label_id': cast(obj.label, int),
})
return converted
def _convert_mask_object(self, obj):
converted = self._convert_annotation(obj)
if isinstance(obj, RleMask):
rle = obj.rle
else:
rle = mask_utils.encode(
np.require(obj.image, dtype=np.uint8, requirements='F'))
if isinstance(rle['counts'], str):
counts = rle['counts']
else:
counts = rle['counts'].decode('ascii')
converted.update({
'label_id': cast(obj.label, int),
'rle': {
# serialize as compressed COCO mask
'counts': counts,
'size': list(int(c) for c in rle['size']),
},
'z_order': obj.z_order,
})
return converted
def _convert_shape_object(self, obj):
assert isinstance(obj, _Shape)
converted = self._convert_annotation(obj)
converted.update({
'label_id': cast(obj.label, int),
'points': [float(p) for p in obj.points],
'z_order': obj.z_order,
})
return converted
def _convert_polyline_object(self, obj):
return self._convert_shape_object(obj)
def _convert_polygon_object(self, obj):
return self._convert_shape_object(obj)
def _convert_bbox_object(self, obj):
converted = self._convert_shape_object(obj)
converted.pop('points', None)
converted['bbox'] = [float(p) for p in obj.get_bbox()]
return converted
def _convert_points_object(self, obj):
converted = self._convert_shape_object(obj)
converted.update({
'visibility': [int(v.value) for v in obj.visibility],
})
return converted
def _convert_caption_object(self, obj):
converted = self._convert_annotation(obj)
converted.update({
'caption': cast(obj.caption, str),
})
return converted
def _convert_cuboid_3d_object(self, obj):
converted = self._convert_annotation(obj)
converted.update({
'label_id': cast(obj.label, int),
'position': [float(p) for p in obj.position],
'rotation': [float(p) for p in obj.rotation],
'scale': [float(p) for p in obj.scale]
})
return converted
def _convert_attribute_categories(self, attributes):
return sorted(attributes)
def _convert_label_categories(self, obj):
converted = {
'labels': [],
'attributes': self._convert_attribute_categories(obj.attributes),
}
for label in obj.items:
converted['labels'].append({
'name': cast(label.name, str),
'parent': cast(label.parent, str),
'attributes': self._convert_attribute_categories(label.attributes),
})
return converted
def _convert_mask_categories(self, obj):
converted = {
'colormap': [],
}
for label_id, color in obj.colormap.items():
converted['colormap'].append({
'label_id': int(label_id),
'r': int(color[0]),
'g': int(color[1]),
'b': int(color[2]),
})
return converted
def _convert_points_categories(self, obj):
converted = {
'items': [],
}
for label_id, item in obj.items.items():
converted['items'].append({
'label_id': int(label_id),
'labels': [cast(label, str) for label in item.labels],
'joints': [list(map(int, j)) for j in item.joints],
})
return converted
class DatumaroConverter(Converter):
DEFAULT_IMAGE_EXT = DatumaroPath.IMAGE_EXT
def apply(self):
os.makedirs(self._save_dir, exist_ok=True)
images_dir = osp.join(self._save_dir, DatumaroPath.IMAGES_DIR)
os.makedirs(images_dir, exist_ok=True)
self._images_dir = images_dir
annotations_dir = osp.join(self._save_dir, DatumaroPath.ANNOTATIONS_DIR)
os.makedirs(annotations_dir, exist_ok=True)
self._annotations_dir = annotations_dir
self._pcd_dir = osp.join(self._save_dir, DatumaroPath.PCD_DIR)
self._related_images_dir = osp.join(self._save_dir,
DatumaroPath.RELATED_IMAGES_DIR)
writers = {s: _SubsetWriter(self) for s in self._extractor.subsets()}
for writer in writers.values():
writer.add_categories(self._extractor.categories())
for item in self._extractor:
subset = item.subset or DEFAULT_SUBSET_NAME
writers[subset].add_item(item)
for subset, writer in writers.items():
ann_file = osp.join(self._annotations_dir, '%s.json' % subset)
if self._patch and subset in self._patch.updated_subsets and \
writer.is_empty():
if osp.isfile(ann_file):
# Remove subsets that became empty
os.remove(ann_file)
continue
writer.write(ann_file)
@classmethod
def patch(cls, dataset, patch, save_dir, **kwargs):
for subset in patch.updated_subsets:
conv = cls(dataset.get_subset(subset), save_dir=save_dir, **kwargs)
conv._patch = patch
conv.apply()
conv = cls(dataset, save_dir=save_dir, **kwargs)
for (item_id, subset), status in patch.updated_items.items():
if status != ItemStatus.removed:
item = patch.data.get(item_id, subset)
else:
item = DatasetItem(item_id, subset=subset)
if not (status == ItemStatus.removed or \
not item.has_image and not item.has_point_cloud):
continue
image_path = osp.join(save_dir, DatumaroPath.IMAGES_DIR,
item.subset, conv._make_image_filename(item))
if osp.isfile(image_path):
os.unlink(image_path)
pcd_path = osp.join(save_dir, DatumaroPath.PCD_DIR,
item.subset, conv._make_pcd_filename(item))
if osp.isfile(pcd_path):
os.unlink(pcd_path)
related_images_path = osp.join(save_dir,
DatumaroPath.RELATED_IMAGES_DIR, item.subset, item.id)
if osp.isdir(related_images_path):
shutil.rmtree(related_images_path)
``` |
{
"source": "jovialio/visualDet3D",
"score": 2
} |
#### File: visualDet3D/scripts/imdb_precompute_test.py
```python
from copy import deepcopy
import torch
import cv2
import time
import pickle
import os
import numpy as np
import ctypes
from _path_init import *
from visualDet3D.utils.timer import Timer
from visualDet3D.utils.utils import cfg_from_file
from visualDet3D.data.kitti.kittidata import KittiData
def read_one_split(cfg, index_names, data_root_dir, output_dict, data_split='training', time_display_inter=100):
N = len(index_names)
frames = [None] * N
print("start reading {} data".format(data_split))
timer = Timer()
for i, index_name in enumerate(index_names):
# read data with dataloader api
data_frame = KittiData(data_root_dir, index_name, output_dict)
calib, _, _, _ = data_frame.read_data()
# store the list of kittiObjet and kittiCalib
data_frame.calib = calib
frames[i] = data_frame
if (i+1) % time_display_inter == 0:
avg_time = timer.compute_avg_time(i+1)
eta = timer.compute_eta(i+1, N)
print("{} iter:{}/{}, avg-time:{}, eta:{}".format(
data_split, i+1, N, avg_time, eta), end='\r')
save_dir = os.path.join(cfg.path.preprocessed_path, data_split)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
pkl_file = os.path.join(save_dir, 'imdb.pkl')
pickle.dump(frames, open(pkl_file, 'wb'))
print("{} split finished precomputing".format(data_split))
def main(config="config/config.py"):
cfg = cfg_from_file(config)
torch.cuda.set_device(cfg.trainer.gpu)
time_display_inter = 100 # define the inverval displaying time consumed in loop
data_root_dir = cfg.path.test_path # the base directory of training dataset
calib_path = os.path.join(data_root_dir, 'calib')
list_calib = os.listdir(calib_path)
N = len(list_calib)
# no need for image, could be modified for extended use
output_dict = {
"calib": True,
"image": False,
"label": False,
"velodyne": False,
}
num_test_file = N
test_names = ["%06d" % i for i in range(num_test_file)]
read_one_split(cfg, test_names, data_root_dir, output_dict,
'test', time_display_inter)
print("Preprocessing finished")
if __name__ == '__main__':
from fire import Fire
Fire(main)
```
#### File: kitti/dataset/depth_mono_dataset.py
```python
from __future__ import print_function, division
from multiprocessing import Manager
from torch.utils.data import Dataset, DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from visualDet3D.data.kitti.utils import read_image, read_pc_from_bin, read_depth
from visualDet3D.data.pipeline import build_augmentator
from visualDet3D.networks.utils.registry import DATASET_DICT
from PIL import Image
import os
import pickle
import numpy as np
from copy import deepcopy
import sys
from tqdm import tqdm
from matplotlib import pyplot as plt
ros_py_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
import skimage.measure
if sys.version_info > (3, 0) and ros_py_path in sys.path:
#Python 3, compatible with a naive ros environment
sys.path.remove(ros_py_path)
import cv2
sys.path.append(ros_py_path)
else:
#Python 2
import cv2
def read_K_from_depth_prediction(file):
with open(file, 'r') as f:
line = f.readlines()[0]
data = line.split(" ")
K = np.array([float(data[i]) for i in range(len(data[0:9]))])
return np.reshape(K, (3, 3))
def read_P23_from_sequence(file):
""" read P2 and P3 from a sequence file calib_cam_to_cam.txt
"""
P2 = None
P3 = None
with open(file, 'r') as f:
for line in f.readlines():
if line.startswith("P_rect_02"):
data = line.split(" ")
P2 = np.array([float(x) for x in data[1:13]])
P2 = np.reshape(P2, [3, 4])
if line.startswith("P_rect_03"):
data = line.split(" ")
P3 = np.array([float(x) for x in data[1:13]])
P3 = np.reshape(P3, [3, 4])
assert P2 is not None, f"can not find P2 in file {file}"
assert P3 is not None, f"can not find P3 in file {file}"
return P2, P3
def read_T_from_sequence(file):
""" read T from a sequence file calib_velo_to_cam.txt
"""
R = None
T = None
with open(file, 'r') as f:
for line in f.readlines():
if line.startswith("R:"):
data = line.split(" ")
R = np.array([float(x) for x in data[1:10]])
R = np.reshape(R, [3, 3])
if line.startswith("T:"):
data = line.split(" ")
T = np.array([float(x) for x in data[1:4]])
T = np.reshape(T, [3, 1])
assert R is not None, "can not find R in file {}".format(file)
assert T is not None, "can not find T in file {}".format(file)
T_velo2cam = np.eye(4)
T_velo2cam[0:3, 0:3] = R
T_velo2cam[0:3, 3:4] = T
return T_velo2cam
@DATASET_DICT.register_module
class KittiDepthMonoDataset(torch.utils.data.Dataset):
"""Some Information about KittiDataset"""
def __init__(self, cfg, split='training'):
super(KittiDepthMonoDataset, self).__init__()
raw_path = cfg.path.raw_path
depth_paths = cfg.path.depth_path if isinstance(cfg.path.depth_path, list) else [cfg.path.depth_path]
aug_cfg = cfg.data.augmentation
manager = Manager() # multithread manage wrapping for list objects
self.is_eval = not split == 'training'
self.size = aug_cfg.cropSize #[352, 1216]
raw_sequences = {}
for date_time in os.listdir(raw_path):
sequences = os.listdir(os.path.join(raw_path, date_time))
sequences = [sequence for sequence in sequences if not sequence.endswith(".txt")]
P2, P3 = read_P23_from_sequence(os.path.join(raw_path, date_time, "calib_cam_to_cam.txt"))
T = read_T_from_sequence (os.path.join(raw_path, date_time, "calib_velo_to_cam.txt"))
for sequence in sequences:
raw_sequences[sequence] = dict(P2=P2, P3=P3, T_velo2cam=T, date_time=date_time)
self.imdb = []
print("Start counting images in depth prediction dataset.")
for depth_path in depth_paths:
for sequence in tqdm(os.listdir(depth_path)):
sequence_gt_path = os.path.join(depth_path, sequence, 'proj_depth', 'groundtruth')
P2 = raw_sequences[sequence]['P2']
P3 = raw_sequences[sequence]['P3']
T = raw_sequences[sequence]['T_velo2cam']
left_gt_dir = os.path.join(sequence_gt_path, 'image_02')
right_gt_dir = os.path.join(sequence_gt_path, 'image_03')
gt_names = set(os.listdir(left_gt_dir))
left_image_dir = os.path.join(raw_path, raw_sequences[sequence]['date_time'], sequence, 'image_02', 'data')
right_image_dir = os.path.join(raw_path, raw_sequences[sequence]['date_time'], sequence, 'image_03', 'data')
point_cloud_dir = os.path.join(raw_path, raw_sequences[sequence]['date_time'], sequence, 'velodyne_points', 'data')
image_names = set(os.listdir(left_image_dir))
intersection = gt_names.intersection(image_names) # names in both
instances = [
dict(
image_2_file = os.path.join(left_image_dir, name),
image_3_file = os.path.join(right_image_dir, name),
gt_2_file = os.path.join(left_gt_dir, name),
gt_3_file = os.path.join(right_gt_dir, name),
P2 = P2.copy(),
P3 = P3.copy(),
# T_velo2cam = T.copy(),
# point_cloud_file = os.path.join(point_cloud_dir, name.replace('.png', '.bin'))
) for name in sorted(intersection)
]
self.imdb += instances
self.imdb = manager.list(self.imdb)
if not self.is_eval:
self.transform = build_augmentator(cfg.data.train_augmentation)
else:
self.transform = build_augmentator(cfg.data.test_augmentation)
def __getitem__(self, index):
obj = self.imdb[index]
# point_cloud = read_pc_from_bin(obj['point_cloud_file'])[..., 0:3] #[-1, 4]
# T_velo2cam = obj['T_velo2cam']
if self.is_eval or np.random.rand() < 0.5: # Randomly select left/right image
image = read_image(obj['image_2_file'])
gt = read_depth(obj['gt_2_file'])
P = obj['P2']
else:
image = read_image(obj['image_3_file'])
gt = read_depth(obj['gt_3_file'])
P = obj['P3']
transformed_image, P_new, gt = self.transform(image, p2=P.copy(), image_gt=gt)
output_dict = {'K': P_new[0:3, 0:3].copy(),
'image': transformed_image,
'gt': gt,
'original_shape': image.shape}
return output_dict
def __len__(self):
return len(self.imdb)
@staticmethod
def collate_fn(batch):
rgb_images = np.array([item["image"] for item in batch])#[batch, H, W, 3]
rgb_images = rgb_images.transpose([0, 3, 1, 2])
Ks = [item["K"] for item in batch]
gts = np.stack([item["gt"] for item in batch], axis=0) #[B, H, W]
return torch.from_numpy(rgb_images).float(), Ks, torch.from_numpy(gts).float()
@DATASET_DICT.register_module
class KittiDepthMonoValTestDataset(torch.utils.data.Dataset):
"""Some Information about KittiDataset"""
def __init__(self, cfg, split='validation'):
super(KittiDepthMonoValTestDataset, self).__init__()
base_path = cfg.path[split + "_path"]
self.transform = build_augmentator(cfg.data.test_augmentation)
self.imdb = []
image_dir = os.path.join(base_path, "image")
intrinsic_dir = os.path.join(base_path, "intrinsics")
image_list = os.listdir(image_dir)
image_list.sort()
K_list = os.listdir(intrinsic_dir)
K_list.sort()
self.imdb = [
dict(
image_file = os.path.join(image_dir, image_list[i]),
K = read_K_from_depth_prediction(os.path.join(intrinsic_dir, K_list[i]))
) for i in range(len(image_list))
]
def __getitem__(self, index):
obj = self.imdb[index]
image = read_image(obj['image_file'])
K = obj['K'].copy()
transformed_image = self.transform(image)[0] # shape should not change since input output should all be 352 * 1216
output_dict = {'K': K,
'image': transformed_image,
'original_shape': image.shape}
return output_dict
def __len__(self):
return len(self.imdb)
@staticmethod
def collate_fn(batch):
rgb_images = np.array([item["image"] for item in batch]) #[batch, H, W, 3]
rgb_images = rgb_images.transpose([0, 3, 1, 2])
Ks = [item["K"] for item in batch]
return torch.from_numpy(rgb_images).float(), Ks
```
#### File: data/kitti/utils.py
```python
import os
from typing import Optional
import numpy as np
from PIL import Image
from numba import jit
from numpy.linalg import inv
import cv2
def read_pc_from_bin(bin_path):
"""Load PointCloud data from bin file."""
p = np.fromfile(bin_path, dtype=np.float32).reshape(-1, 4)
return p
def read_image(path):
'''
read image
inputs:
path(str): image path
returns:
img(np.array): [w,h,c]
'''
return np.array(Image.open(path, 'r'))
def read_depth(path:str)->np.ndarray:
""" Read Ground Truth Depth Image
Args:
path: image path
Return:
depth image: floating image [H, W]
"""
return (cv2.imread(path, -1)) / 256.0
@jit(nopython=True, cache=True)
def _leftcam2lidar(pts, Tr_velo_to_cam, R0_rect):
'''
transform the pts from the left camera frame to lidar frame
pts_lidar = Tr_velo_to_cam^{-1} @ R0_rect^{-1} @ pts_cam
inputs:
pts(np.array): [#pts, 3]
points in the left camera frame
Tr_velo_to_cam:[4, 4]
R0_rect:[4, 4]
'''
hfiller = np.expand_dims(np.ones(pts.shape[0]), axis=1)
pts_hT = np.ascontiguousarray(np.hstack((pts, hfiller)).T) #(4, #pts)
pts_lidar_T = np.ascontiguousarray(inv(Tr_velo_to_cam)) @ np.ascontiguousarray(inv(R0_rect)) @ pts_hT # (4, #pts)
pts_lidar = np.ascontiguousarray(pts_lidar_T.T) # (#pts, 4)
return pts_lidar[:, :3]
@jit(nopython=True, cache=True)
def _lidar2leftcam(pts, Tr_velo_to_cam, R0_rect):
'''
transform the pts from the lidar frame to the left camera frame
pts_cam = R0_rect @ Tr_velo_to_cam @ pts_lidar
inputs:
pts(np.array): [#pts, 3]
points in the lidar frame
'''
hfiller = np.expand_dims(np.ones(pts.shape[0]), axis=1)
pts_hT = np.hstack((pts, hfiller)).T #(4, #pts)
pts_cam_T = R0_rect @ Tr_velo_to_cam @ pts_hT # (4, #pts)
pts_cam = pts_cam_T.T # (#pts, 4)
return pts_cam[:, :3]
@jit(nopython=True, cache=True)
def _leftcam2imgplane(pts, P2):
'''
project the pts from the left camera frame to left camera plane
pixels = P2 @ pts_cam
inputs:
pts(np.array): [#pts, 3]
points in the left camera frame
'''
hfiller = np.expand_dims(np.ones(pts.shape[0]), axis=1)
pts_hT = np.hstack((pts, hfiller)).T #(4, #pts)
pixels_T = P2 @ pts_hT #(3, #pts)
pixels = pixels_T.T
pixels[:, 0] /= pixels[:, 2] + 1e-6
pixels[:, 1] /= pixels[:, 2] + 1e-6
return pixels[:, :2]
@jit(nopython=True, cache=True)
def generate_dispariy_from_velo(pc_velo:np.ndarray,
height:int,
width:int,
Tr_velo_to_cam:np.ndarray,
R0_rect:np.ndarray,
P2:np.ndarray,
baseline:float=0.54):
"""
Generate disparity map from point clouds.
Args:
pc_velo : point clouds in lidar coordinate; np.array of shape [n, 3] -> [[x, y, z]; ...]
height, width : output disparity map shape; int
Tr_velo_to_cam : transform from lidar to camera; np.array [3, 4] -> [R | T]
R0_rect : rotation transform into camera coordinates(z forward, x towards right); np.array [3, 4] -> [R | T]
P2 : transform from P0 camera coordinates to target image plane; np.array [3, 4] -> [R | T]
baseline : baseline length in meter of the stereo setup; float
Output:
disp_map : disparity map; np.array of [height, width], dtype=np.uint16; if disp_map==0 -> should be ignore
"""
#pts_2d = calib.project_velo_to_image(pc_velo)
pts_cam = _lidar2leftcam(pc_velo, Tr_velo_to_cam, R0_rect)
pts_2d = _leftcam2imgplane(pts_cam, P2)
fov_inds = (pts_2d[:, 0] < width - 1) & (pts_2d[:, 0] >= 0) & \
(pts_2d[:, 1] < height - 1) & (pts_2d[:, 1] >= 0)
fov_inds = fov_inds & (pc_velo[:, 0] > 2)
imgfov_pts_2d = pts_2d[fov_inds, :]
imgfov_pc_rect = pts_cam[fov_inds, :]
depth_map = np.ones((height, width)) * 1e9
imgfov_pts_2d = imgfov_pts_2d.astype(np.int32)#np.round(imgfov_pts_2d).astype(int)
for i in range(imgfov_pts_2d.shape[0]):
depth = imgfov_pc_rect[i, 2]
depth_map[int(imgfov_pts_2d[i, 1]), int(imgfov_pts_2d[i, 0])] = depth
disp_map = (P2[0, 0] * baseline) / (depth_map) * 16
disp_map = disp_map.astype(np.uint16)
return disp_map
@jit(nopython=True, cache=True)
def generate_depth_from_velo(pc_velo:np.ndarray,
height:int,
width:int,
Tr_velo_to_cam:np.ndarray,
R0_rect:np.ndarray,
P2:np.ndarray,
base_depth:Optional[np.ndarray]=None):
"""
Generate disparity map from point clouds.
Args:
pc_velo : point clouds in lidar coordinate; np.array of shape [n, 3] -> [[x, y, z]; ...]
height, width : output disparity map shape; int
Tr_velo_to_cam : transform from lidar to camera; np.array [3, 4] -> [R | T]
R0_rect : rotation transform into camera coordinates(z forward, x towards right); np.array [3, 4] -> [R | T]
P2 : transform from P0 camera coordinates to target image plane; np.array [3, 4] -> [R | T]
baseline : baseline length in meter of the stereo setup; float
Output:
disp_map : disparity map; np.array of [height, width], dtype=np.uint16; if disp_map==0 -> should be ignore
"""
#pts_2d = calib.project_velo_to_image(pc_velo)
pts_cam = _lidar2leftcam(pc_velo, Tr_velo_to_cam, R0_rect)
pts_2d = _leftcam2imgplane(pts_cam, P2)
fov_inds = (pts_2d[:, 0] < width - 1) & (pts_2d[:, 0] >= 0) & \
(pts_2d[:, 1] < height - 1) & (pts_2d[:, 1] >= 0)
fov_inds = fov_inds & (pc_velo[:, 0] > 2)
imgfov_pts_2d = pts_2d[fov_inds, :]
imgfov_pc_rect = pts_cam[fov_inds, :]
if base_depth is None:
depth_map = np.zeros((height, width))
else:
depth_map = base_depth
imgfov_pts_2d = imgfov_pts_2d.astype(np.int32)#np.round(imgfov_pts_2d).astype(int)
for i in range(imgfov_pts_2d.shape[0]):
depth = imgfov_pc_rect[i, 2]
depth_map[int(imgfov_pts_2d[i, 1]), int(imgfov_pts_2d[i, 0])] = depth
return depth_map
def write_result_to_file(base_result_path:str,
index:int, scores, bbox_2d, bbox_3d_state_3d=None, thetas=None, obj_types=['Car', 'Pedestrian', 'Cyclist'], threshold=0.4):
"""Write Kitti prediction results of one frame to a file
Args:
base_result_path (str): path to the result dictionary
index (int): index of the target frame
scores (List[float]): A list or numpy array or cpu tensor of float for score
bbox_2d (np.ndarray): numpy array of [N, 4]
bbox_3d_state_3d (np.ndarray, optional): 3D stats [N, 7] [x_center, y_center, z_center, w, h, l, alpha]. Defaults to None.
thetas (np.ndarray, optional): [N]. Defaults to None.
obj_types (List[str], optional): List of string if object type names. Defaults to ['Car', 'Pedestrian', 'Cyclist'].
threshold (float, optional): Threshold for selection samples. Defaults to 0.4.
"""
name = "%06d" % index
text_to_write = ""
file = open(os.path.join(base_result_path, name + '.txt'), 'w')
if bbox_3d_state_3d is None:
bbox_3d_state_3d = np.ones([bbox_2d.shape[0], 7], dtype=int)
bbox_3d_state_3d[:, 3:6] = -1
bbox_3d_state_3d[:, 0:3] = -1000
bbox_3d_state_3d[:, 6] = -10
else:
for i in range(len(bbox_2d)):
bbox_3d_state_3d[i][1] = bbox_3d_state_3d[i][1] + 0.5*bbox_3d_state_3d[i][4] # kitti receive bottom center
if thetas is None:
thetas = np.ones(bbox_2d.shape[0]) * -10
if len(scores) > 0:
for i in range(len(bbox_2d)):
if scores[i] < threshold:
continue
bbox = bbox_2d[i]
text_to_write += ('{} -1 -1 {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {} \n').format(
obj_types[i], bbox_3d_state_3d[i][-1], bbox[0], bbox[1], bbox[2], bbox[3],
bbox_3d_state_3d[i][4], bbox_3d_state_3d[i][3], bbox_3d_state_3d[i][5],
bbox_3d_state_3d[i][0], bbox_3d_state_3d[i][1], bbox_3d_state_3d[i][2],
thetas[i], scores[i])
file.write(text_to_write)
file.close()
if __name__ == "__main__":
pts, Tr_velo_to_cam, R0_rect = np.zeros([10, 3]), np.eye(4), np.eye(4)
points = _leftcam2lidar(pts, Tr_velo_to_cam, R0_rect)
points = _lidar2leftcam(pts, Tr_velo_to_cam, R0_rect)
P2 = np.zeros([3, 4])
pixels = _leftcam2imgplane(pts, P2)
print(points.shape)
```
#### File: evaluator/kitti_depth_prediction/evaluate_depth.py
```python
import numpy as np
from numba import jit
import cv2
import os
@jit(cache=True, nopython=True)
def compute_errors(image_gt, image_pred):
""" Compute Errors from two floating point image.
init errors
1. mae
2. rmse
3. inverse mae
4. inverse rmse
5. log mae
6. log rmse
7. scale invariant log
8. abs relative
9. squared relative
"""
errors = np.zeros(9)
num_pixels = 0.0
# log sum for scale invariant metric
logSum = 0.0
w, h = image_gt.shape
for i in range(w):
for j in range(h):
if image_gt[i, j] > 0.01:
depth_pred = image_pred[i, j]
depth_gt = image_gt[i, j]
d_err = abs(depth_pred - depth_gt)
d_err_squared = d_err ** 2
d_err_inv = abs(1.0 / depth_gt - 1.0 / depth_pred)
d_err_inv_squared = d_err_inv ** 2
d_err_log = abs(np.log(depth_pred) - np.log(depth_gt))
d_err_log_squared = d_err_log ** 2
# MAE
errors[0] += d_err
# rmse
errors[1] += d_err_squared
# inv_mae
errors[2] += d_err_inv
# inv_rmse
errors[3] += d_err_inv_squared
# log
errors[4] += d_err_log
errors[5] += d_err_log_squared
# log diff for scale invariancet metric
logSum += np.log(depth_gt) - np.log(depth_pred)
# abs relative
errors[7] += d_err / depth_gt
# squared relative
errors[8] += d_err_squared / (depth_gt ** 2 )
num_pixels += 1
# normalize mae
errors[0] = errors[0] / num_pixels
# normalize and take sqrt for rmse
errors[1] = errors[1] / num_pixels
errors[1] = np.sqrt(errors[1])
# normalize inverse absoulte error
errors[2] = errors[2] / num_pixels
# normalize and take sqrt for inverse rmse
errors[3] = errors[3] / num_pixels
errors[3] = np.sqrt(errors[3])
# normalize log mae
errors[4] = errors[4] / num_pixels
# first normalize log rmse -> we need this result later
normalizedSquaredLog = errors[5] / num_pixels
errors[5] = np.sqrt(normalizedSquaredLog)
# calculate scale invariant metric
errors[6] = np.sqrt(normalizedSquaredLog - (logSum**2 / (num_pixels**2)))
# normalize abs relative
errors[7] = errors[7] / num_pixels
# normalize squared relative
errors[8] = errors[8] / num_pixels
return errors
def evaluate_depth(label_path,
result_path,
scale=256.0):
gt_list = os.listdir(label_path)
gt_list.sort()
gt_list = [os.path.join(label_path, gt) for gt in gt_list if gt.endswith(".png")]
result_list = os.listdir(result_path)
result_list.sort()
result_list = [os.path.join(result_path, result) for result in result_list if result.endswith(".png")]
if not len(gt_list) == len(result_list):
print("Notice: the lenght of gt_list {} is not the same as the result_list {}".format(len(gt_list), len(result_list)))
print("totally found {} images in {} and {}".format(len(gt_list), label_path, result_path))
error_vectors = []
for i in range(len(gt_list)):
image_gt = cv2.imread(gt_list[i], -1) / scale
image_pred = cv2.imread(result_list[i], -1) / scale
error_vectors.append(compute_errors(image_gt, image_pred))
error_vectors = np.array(error_vectors)
metric_names = [
"mae",
"rmse",
"inverse mae",
"inverse rmse",
"log mae",
"log rmse",
"scale invariant log",
"abs relative",
"squared relative"
]
result_texts = []
for i in range(len(error_vectors[0])):
text = "mean {} : {}\n".format(metric_names[i], np.mean(error_vectors[:, i]))
result_texts.append(text)
return result_texts
if __name__ == "__main__":
from fire import Fire
def main(label_path,
result_path):
texts = evaluate(label_path, result_path)
for text in texts:
print(text, end="")
Fire(main)
```
#### File: detectors/unet/u_net.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from visualDet3D.networks.backbones import resnet
from visualDet3D.networks.lib.look_ground import LookGround
from visualDet3D.networks.lib.coordconv import DisparityConv
from visualDet3D.networks.lib.ops import ModulatedDeformConvPack
class ResConv(nn.Module):
"""Some Information about ResConv"""
def __init__(self, *args, **kwarg):
super(ResConv, self).__init__()
self.conv = nn.Conv2d(*args, **kwarg)
def forward(self, x):
x = x + self.conv(x)
return x
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None, is_look_ground=False):
super().__init__()
self.is_look_ground=is_look_ground
if not mid_channels:
mid_channels = out_channels
if is_look_ground:
self.conv0 = LookGround(in_channels, baseline=0.54)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=5, padding=2),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
)
self.conv2 = nn.Sequential(
ModulatedDeformConvPack(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x, P2=None, scale=1.0):
"""Forward Methods for Double Conv
Args:
x (torch.Tensor): [description]
P2 ([torch.Tensor], optional): Only apply this when double conv appy disparity conv and look ground operation. Defaults to None.
scale (float, optional): the shrink ratio of the current feature map w.r.t. the original one along with P2, e.g. 1.0/2.0/4.0. Defaults to 1.0.
Returns:
x: torch.Tensor
"""
if P2 is not None:
P = x.new_zeros([x.shape[0], 3, 4])
P[:, :, 0:3] = P2[:, :, 0:3]
P[:, 0:2] /= float(scale)
x = self.conv0(dict(features=x, P2=P))
x = self.conv1(x)
x = self.conv2(x)
return x
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True, is_look_ground=False):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(
in_channels, out_channels, in_channels, is_look_ground)
else:
self.up = nn.ConvTranspose2d(
in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2=None, **kwargs):
x1 = self.up(x1)
# input is CHW
if x2 is not None:
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
if diffX > 0 or diffY > 0:
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
else:
x = x1
return self.conv(x, **kwargs)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
def forward(self, x):
return self.conv(x)
class UNet_Core(nn.Module):
def __init__(self, n_channels, n_classes, look_ground=True, bilinear=True, backbone_arguments=dict()):
super(UNet_Core, self).__init__()
self.backbone = resnet(**backbone_arguments)
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
factor = 2 if bilinear else 1
self.up0 = Up(512 + 256, 256, bilinear, is_look_ground=look_ground)
self.up1 = Up(256 + 128, 128 // factor, bilinear, is_look_ground=look_ground)
self.up2 = Up(128, 64, bilinear)
#self.up3 = Up(64, 64, bilinear)
#self.up4 = Up(64 + n_channels, 32, bilinear, is_look_ground=True)
self.out_scale_8 = OutConv(64, n_classes)
self.out_scale_4 = OutConv(64, n_classes)
self.outc = OutConv(64, n_classes)
def forward(self, x, P2=None):
#residual = x
x3, x4, x5, x6 = self.backbone(x)
outs = {}
#x6 = F.relu(x6 + self.fullencoders(x6))
x = self.up0(x6, x5, P2=P2, scale=32)
x = self.up1(x, x4, P2=P2, scale=16)
outs['scale_8'] = self.out_scale_8(x)
x = self.up2(x, x3)
outs['scale_4'] = self.out_scale_4(x)
#x = F.upsample_bilinear(x, scale_factor=4)
x = F.interpolate(x, scale_factor=4, align_corners=True, mode='bilinear')
#x = self.up3(x)
#x = self.up4(x, residual)
#x = torch.cat([x, residual], dim=1)
outs['scale_1'] = self.outc(x)
return outs
```
#### File: networks/detectors/yolomono3d_core.py
```python
import numpy as np
import torch.nn as nn
import torch
import math
import time
from visualDet3D.networks.backbones import resnet
class YoloMono3DCore(nn.Module):
"""Some Information about YoloMono3DCore"""
def __init__(self, backbone_arguments=dict()):
super(YoloMono3DCore, self).__init__()
self.backbone =resnet(**backbone_arguments)
def forward(self, x):
x = self.backbone(x['image'])
x = x[0]
return x
```
#### File: lib/disparity_loss/disp2prob.py
```python
import warnings
import torch
import torch.nn.functional as F
def isNaN(x):
return x != x
class Disp2Prob(object):
"""
Convert disparity map to matching probability volume
Args:
maxDisp, (int): the maximum of disparity
gtDisp, (torch.Tensor): in (..., Height, Width) layout
start_disp (int): the start searching disparity index, usually be 0
dilation (int): the step between near disparity index
Outputs:
probability, (torch.Tensor): in [BatchSize, maxDisp, Height, Width] layout
"""
def __init__(self, maxDisp:int, gtDisp:torch.Tensor, start_disp:int=0, dilation:int=1):
if not isinstance(maxDisp, int):
raise TypeError('int is expected, got {}'.format(type(maxDisp)))
if not torch.is_tensor(gtDisp):
raise TypeError('tensor is expected, got {}'.format(type(gtDisp)))
if not isinstance(start_disp, int):
raise TypeError('int is expected, got {}'.format(type(start_disp)))
if not isinstance(dilation, int):
raise TypeError('int is expected, got {}'.format(type(dilation)))
if gtDisp.dim() == 2: # single image H x W
gtDisp = gtDisp.view(1, 1, gtDisp.size(0), gtDisp.size(1))
if gtDisp.dim() == 3: # multi image B x H x W
gtDisp = gtDisp.view(gtDisp.size(0), 1, gtDisp.size(1), gtDisp.size(2))
if gtDisp.dim() == 4:
if gtDisp.size(1) == 1: # mult image B x 1 x H x W
gtDisp = gtDisp
else:
raise ValueError('2nd dimension size should be 1, got {}'.format(gtDisp.size(1)))
self.gtDisp = gtDisp
self.maxDisp = maxDisp
self.start_disp = start_disp
self.dilation = dilation
self.end_disp = start_disp + maxDisp - 1
self.disp_sample_number = (maxDisp + dilation -1) // dilation
self.eps = 1e-40
def getProb(self):
# [BatchSize, 1, Height, Width]
b, c, h, w = self.gtDisp.shape
assert c == 1
# if start_disp = 0, dilation = 1, then generate disparity candidates as [0, 1, 2, ... , maxDisp-1]
index = torch.linspace(self.start_disp, self.end_disp, self.disp_sample_number)
index = index.to(self.gtDisp.device)
# [BatchSize, maxDisp, Height, Width]
self.index = index.repeat(b, h, w, 1).permute(0, 3, 1, 2).contiguous()
# the gtDisp must be (start_disp, end_disp), otherwise, we have to mask it out
mask = (self.gtDisp > self.start_disp) & (self.gtDisp < self.end_disp)
mask = mask.detach().type_as(self.gtDisp)
self.gtDisp = self.gtDisp * mask
probability = self.calProb()
# let the outliers' probability to be 0
# in case divide or log 0, we plus a tiny constant value
probability = probability * mask + self.eps
# in case probability is NaN
if isNaN(probability.min()) or isNaN(probability.max()):
print('Probability ==> min: {}, max: {}'.format(probability.min(), probability.max()))
print('Disparity Ground Truth after mask out ==> min: {}, max: {}'.format(self.gtDisp.min(),
self.gtDisp.max()))
raise ValueError(" \'probability contains NaN!")
return probability
def kick_invalid_half(self):
distance = self.gtDisp - self.index
invalid_index = distance < 0
# after softmax, the valid index with value 1e6 will approximately get 0
distance[invalid_index] = 1e6
return distance
def calProb(self):
raise NotImplementedError
class LaplaceDisp2Prob(Disp2Prob):
# variance is the diversity of the Laplace distribution
def __init__(self, maxDisp, gtDisp, variance=1, start_disp=0, dilation=1):
super(LaplaceDisp2Prob, self).__init__(maxDisp, gtDisp, start_disp, dilation)
self.variance = variance
def calProb(self):
# 1/N * exp( - (d - d{gt}) / var), N is normalization factor, [BatchSize, maxDisp, Height, Width]
scaled_distance = ((-torch.abs(self.index - self.gtDisp)) / self.variance)
probability = F.softmax(scaled_distance, dim=1)
return probability
class GaussianDisp2Prob(Disp2Prob):
# variance is the variance of the Gaussian distribution
def __init__(self, maxDisp, gtDisp, variance=1, start_disp=0, dilation=1):
super(GaussianDisp2Prob, self).__init__(maxDisp, gtDisp, start_disp, dilation)
self.variance = variance
def calProb(self):
# 1/N * exp( - (d - d{gt})^2 / b), N is normalization factor, [BatchSize, maxDisp, Height, Width]
distance = (torch.abs(self.index - self.gtDisp))
scaled_distance = (- distance.pow(2.0) / self.variance)
probability = F.softmax(scaled_distance, dim=1)
return probability
class OneHotDisp2Prob(Disp2Prob):
# variance is the variance of the OneHot distribution
def __init__(self, maxDisp, gtDisp, variance=1, start_disp=0, dilation=1):
super(OneHotDisp2Prob, self).__init__(maxDisp, gtDisp, start_disp, dilation)
self.variance = variance
def getProb(self):
# |d - d{gt}| < variance, [BatchSize, maxDisp, Height, Width]
probability = torch.lt(torch.abs(self.index - self.gtDisp), self.variance).type_as(self.gtDisp)
return probability
``` |
{
"source": "jovial/ironic",
"score": 2
} |
#### File: modules/storage/external.py
```python
from oslo_config import cfg
from oslo_log import log
from ironic.common import exception
from ironic.drivers import base
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class ExternalStorage(base.StorageInterface):
"""Externally driven Storage Interface."""
def validate(self, task):
def _fail_validation(task, reason,
exception=exception.InvalidParameterValue):
msg = (_("Failed to validate external storage interface for node "
"%(node)s. %(reason)s") %
{'node': task.node.uuid, 'reason': reason})
LOG.error(msg)
raise exception(msg)
if (not self.should_write_image(task)
and not CONF.pxe.ipxe_enabled):
msg = _("The [pxe]/ipxe_enabled option must "
"be set to True to support network "
"booting to an iSCSI volume.")
_fail_validation(task, msg)
def get_properties(self):
return {}
def attach_volumes(self, task):
pass
def detach_volumes(self, task):
pass
def should_write_image(self, task):
"""Determines if deploy should perform the image write-out.
This enables the user to define a volume and Ironic understand
that the image may already exist and we may be booting to that volume.
:param task: The task object.
:returns: True if the deployment write-out process should be
executed.
"""
instance_info = task.node.instance_info
if 'image_source' not in instance_info:
for volume in task.volume_targets:
if volume['boot_index'] == 0:
return False
return True
```
#### File: modules/oneview/test_deploy.py
```python
import mock
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conf import CONF
from ironic.drivers.modules import agent
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules.oneview import common
from ironic.drivers.modules.oneview import deploy
from ironic.drivers.modules.oneview import deploy_utils
from ironic.tests.unit.drivers.modules.oneview import test_common
from ironic.tests.unit.objects import utils as obj_utils
METHODS = ['iter_nodes', 'update_node', 'do_provisioning_action']
oneview_error = common.SERVER_HARDWARE_ALLOCATION_ERROR
maintenance_reason = common.NODE_IN_USE_BY_ONEVIEW
driver_internal_info = {'oneview_error': oneview_error}
nodes_taken_by_oneview = [(1, 'oneview')]
nodes_freed_by_oneview = [(1, 'oneview', maintenance_reason)]
nodes_taken_on_cleanfail = [(1, 'oneview', driver_internal_info)]
nodes_taken_on_cleanfail_no_info = [(1, 'oneview', {})]
GET_POWER_STATE_RETRIES = 5
def _setup_node_in_available_state(node):
node.provision_state = states.AVAILABLE
node.maintenance = False
node.maintenance_reason = None
node.save()
def _setup_node_in_manageable_state(node):
node.provision_state = states.MANAGEABLE
node.maintenance = True
node.maintenance_reason = common.NODE_IN_USE_BY_ONEVIEW
node.save()
def _setup_node_in_cleanfailed_state_with_oneview_error(node):
node.provision_state = states.CLEANFAIL
node.maintenance = False
node.maintenance_reason = None
driver_internal_info = node.driver_internal_info
oneview_error = common.SERVER_HARDWARE_ALLOCATION_ERROR
driver_internal_info['oneview_error'] = oneview_error
node.driver_internal_info = driver_internal_info
node.save()
def _setup_node_in_cleanfailed_state_without_oneview_error(node):
node.provision_state = states.CLEANFAIL
node.maintenance = False
node.maintenance_reason = None
node.save()
class OneViewDriverDeploy(deploy.OneViewPeriodicTasks):
oneview_driver = 'oneview'
@mock.patch('ironic.objects.Node', spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'is_node_in_use_by_oneview')
class OneViewPeriodicTasks(test_common.BaseOneViewTest):
def setUp(self):
super(OneViewPeriodicTasks, self).setUp()
self.config(manager_url='https://1.2.3.4', group='oneview')
self.config(username='user', group='oneview')
self.config(password='password', group='oneview')
self.deploy = OneViewDriverDeploy()
self.os_primary = mock.MagicMock(spec=METHODS)
def test_node_manageable_maintenance_when_in_use_by_oneview(
self, mock_is_node_in_use_by_oneview, mock_node_get
):
mock_node_get.get.return_value = self.node
_setup_node_in_available_state(self.node)
self.os_primary.iter_nodes.return_value = nodes_taken_by_oneview
mock_is_node_in_use_by_oneview.return_value = True
self.deploy._periodic_check_nodes_taken_by_oneview(
self.os_primary, self.context
)
mock_is_node_in_use_by_oneview.assert_called_once_with(self.node)
self.assertTrue(self.os_primary.update_node.called)
self.assertTrue(self.os_primary.do_provisioning_action.called)
self.assertTrue(self.node.maintenance)
self.assertEqual(common.NODE_IN_USE_BY_ONEVIEW,
self.node.maintenance_reason)
def test_node_stay_available_when_not_in_use_by_oneview(
self, mock_is_node_in_use_by_oneview, mock_node_get
):
mock_node_get.get.return_value = self.node
_setup_node_in_available_state(self.node)
mock_node_get.return_value = self.node
mock_is_node_in_use_by_oneview.return_value = False
self.os_primary.iter_nodes.return_value = nodes_taken_by_oneview
self.deploy._periodic_check_nodes_taken_by_oneview(
self.os_primary, self.context
)
mock_is_node_in_use_by_oneview.assert_called_once_with(self.node)
self.assertFalse(self.os_primary.update_node.called)
self.assertFalse(self.os_primary.do_provisioning_action.called)
self.assertFalse(self.node.maintenance)
self.assertIsNone(self.node.maintenance_reason)
def test_node_stay_available_when_raise_exception(
self, mock_is_node_in_use_by_oneview, mock_node_get
):
mock_node_get.get.return_value = self.node
_setup_node_in_available_state(self.node)
side_effect = exception.OneViewError('boom')
mock_is_node_in_use_by_oneview.side_effect = side_effect
self.os_primary.iter_nodes.return_value = nodes_taken_by_oneview
self.deploy._periodic_check_nodes_taken_by_oneview(
self.os_primary, self.context
)
mock_is_node_in_use_by_oneview.assert_called_once_with(self.node)
self.assertFalse(self.os_primary.update_node.called)
self.assertFalse(self.os_primary.do_provisioning_action.called)
self.assertFalse(self.node.maintenance)
self.assertNotEqual(common.NODE_IN_USE_BY_ONEVIEW,
self.node.maintenance_reason)
def test_node_available_when_not_in_use_by_oneview(
self, mock_is_node_in_use_by_oneview, mock_node_get
):
mock_node_get.get.return_value = self.node
_setup_node_in_manageable_state(self.node)
self.os_primary.iter_nodes.return_value = nodes_freed_by_oneview
mock_is_node_in_use_by_oneview.return_value = False
self.deploy._periodic_check_nodes_freed_by_oneview(
self.os_primary, self.context
)
mock_is_node_in_use_by_oneview.assert_called_once_with(self.node)
self.assertTrue(self.os_primary.update_node.called)
self.assertTrue(self.os_primary.do_provisioning_action.called)
self.assertFalse(self.node.maintenance)
self.assertIsNone(self.node.maintenance_reason)
def test_node_stay_manageable_when_in_use_by_oneview(
self, mock_is_node_in_use_by_oneview, mock_node_get
):
mock_node_get.get.return_value = self.node
_setup_node_in_manageable_state(self.node)
mock_is_node_in_use_by_oneview.return_value = True
self.os_primary.iter_nodes.return_value = nodes_freed_by_oneview
self.deploy._periodic_check_nodes_freed_by_oneview(
self.os_primary, self.context
)
mock_is_node_in_use_by_oneview.assert_called_once_with(self.node)
self.assertFalse(self.os_primary.update_node.called)
self.assertFalse(self.os_primary.do_provisioning_action.called)
self.assertTrue(self.node.maintenance)
self.assertEqual(common.NODE_IN_USE_BY_ONEVIEW,
self.node.maintenance_reason)
def test_node_stay_manageable_maintenance_when_raise_exception(
self, mock_is_node_in_use_by_oneview, mock_node_get
):
mock_node_get.get.return_value = self.node
_setup_node_in_manageable_state(self.node)
side_effect = exception.OneViewError('boom')
mock_is_node_in_use_by_oneview.side_effect = side_effect
self.os_primary.iter_nodes.return_value = nodes_freed_by_oneview
self.deploy._periodic_check_nodes_freed_by_oneview(
self.os_primary, self.context
)
mock_is_node_in_use_by_oneview.assert_called_once_with(self.node)
self.assertFalse(self.os_primary.update_node.called)
self.assertFalse(self.os_primary.do_provisioning_action.called)
self.assertTrue(self.node.maintenance)
self.assertEqual(common.NODE_IN_USE_BY_ONEVIEW,
self.node.maintenance_reason)
def test_node_manageable_maintenance_when_oneview_error(
self, mock_is_node_in_use_by_oneview, mock_node_get
):
mock_node_get.get.return_value = self.node
_setup_node_in_cleanfailed_state_with_oneview_error(self.node)
self.os_primary.iter_nodes.return_value = nodes_taken_on_cleanfail
self.deploy._periodic_check_nodes_taken_on_cleanfail(
self.os_primary, self.context
)
self.assertTrue(self.os_primary.update_node.called)
self.assertTrue(self.os_primary.do_provisioning_action.called)
self.assertTrue(self.node.maintenance)
self.assertEqual(common.NODE_IN_USE_BY_ONEVIEW,
self.node.maintenance_reason)
self.assertNotIn('oneview_error', self.node.driver_internal_info)
def test_node_stay_clean_failed_when_no_oneview_error(
self, mock_is_node_in_use_by_oneview, mock_node_get
):
mock_node_get.get.return_value = self.node
_setup_node_in_cleanfailed_state_without_oneview_error(self.node)
self.os_primary.iter_nodes.return_value = (
nodes_taken_on_cleanfail_no_info)
self.deploy._periodic_check_nodes_taken_on_cleanfail(
self.os_primary, self.context
)
self.assertFalse(self.os_primary.update_node.called)
self.assertFalse(self.os_primary.do_provisioning_action.called)
self.assertFalse(self.node.maintenance)
self.assertNotEqual(common.NODE_IN_USE_BY_ONEVIEW,
self.node.maintenance_reason)
self.assertNotIn('oneview_error', self.node.driver_internal_info)
class OneViewIscsiDeployTestCase(test_common.BaseOneViewTest):
deploy_interface = 'oneview-iscsi'
def setUp(self):
super(OneViewIscsiDeployTestCase, self).setUp()
self.config(manager_url='https://1.2.3.4', group='oneview')
self.config(username='user', group='oneview')
self.config(password='password', group='oneview')
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
self.info = common.get_oneview_info(self.node)
def test_get_properties(self):
expected = common.COMMON_PROPERTIES
self.assertEqual(expected,
deploy.OneViewIscsiDeploy().get_properties())
@mock.patch.object(common, 'validate_oneview_resources_compatibility',
spect_set=True, autospec=True)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'validate',
spec_set=True, autospec=True)
def test_validate(
self, iscsi_deploy_validate_mock, mock_validate):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.validate(task)
self.assertTrue(mock_validate.called)
iscsi_deploy_validate_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'prepare', autospec=True)
@mock.patch.object(deploy_utils, 'allocate_server_hardware_to_ironic')
def test_prepare(self, allocate_server_hardware_mock,
iscsi_deploy_prepare_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.prepare(task)
iscsi_deploy_prepare_mock.assert_called_once_with(mock.ANY, task)
self.assertTrue(allocate_server_hardware_mock.called)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'prepare',
spec_set=True, autospec=True)
def test_prepare_active_node(self, iscsi_deploy_prepare_mock):
"""Ensure nodes in running states are not inadvertently changed"""
test_states = list(states.STABLE_STATES)
test_states.extend([states.CLEANING,
states.CLEANWAIT,
states.INSPECTING])
for state in test_states:
self.node.provision_state = state
self.node.save()
iscsi_deploy_prepare_mock.reset_mock()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.prepare(task)
iscsi_deploy_prepare_mock.assert_called_once_with(
mock.ANY, task)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'deploy',
spec_set=True, autospec=True)
def test_deploy(self, iscsi_deploy_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.deploy(task)
iscsi_deploy_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'tear_down', spec_set=True,
autospec=True)
def test_tear_down(self, iscsi_tear_down_mock):
iscsi_tear_down_mock.return_value = states.DELETED
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.tear_down(task)
iscsi_tear_down_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.DELETED, returned_state)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'tear_down', spec_set=True,
autospec=True)
@mock.patch.object(deploy_utils, 'deallocate_server_hardware_from_ironic')
def test_tear_down_with_automated_clean_disabled(
self, deallocate_server_hardware_mock, iscsi_tear_down_mock):
CONF.conductor.automated_clean = False
iscsi_tear_down_mock.return_value = states.DELETED
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.tear_down(task)
iscsi_tear_down_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.DELETED, returned_state)
self.assertTrue(deallocate_server_hardware_mock.called)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'prepare_cleaning',
spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'allocate_server_hardware_to_ironic')
def test_prepare_cleaning(
self, allocate_server_hardware_mock, iscsi_prep_clean_mock):
iscsi_prep_clean_mock.return_value = states.CLEANWAIT
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret = task.driver.deploy.prepare_cleaning(task)
self.assertEqual(states.CLEANWAIT, ret)
iscsi_prep_clean_mock.assert_called_once_with(mock.ANY, task)
self.assertTrue(allocate_server_hardware_mock.called)
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'tear_down_cleaning',
spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'deallocate_server_hardware_from_ironic')
def test_tear_down_cleaning(
self, deallocate_server_hardware_mock, iscsi_tear_down_clean_mock):
iscsi_tear_down_clean_mock.return_value = states.CLEANWAIT
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.tear_down_cleaning(task)
iscsi_tear_down_clean_mock.assert_called_once_with(mock.ANY, task)
self.assertTrue(deallocate_server_hardware_mock.called)
class OneViewAgentDeployTestCase(test_common.BaseOneViewTest):
deploy_interface = 'oneview-direct'
def setUp(self):
super(OneViewAgentDeployTestCase, self).setUp()
self.config(manager_url='https://1.2.3.4', group='oneview')
self.config(username='user', group='oneview')
self.config(password='password', group='oneview')
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
self.info = common.get_oneview_info(self.node)
def test_get_properties(self):
expected = common.COMMON_PROPERTIES
self.assertEqual(expected,
deploy.OneViewAgentDeploy().get_properties())
@mock.patch.object(common, 'validate_oneview_resources_compatibility',
spect_set=True, autospec=True)
@mock.patch.object(agent.AgentDeploy, 'validate',
spec_set=True, autospec=True)
def test_validate(
self, agent_deploy_validate_mock, mock_validate):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.validate(task)
self.assertTrue(mock_validate.called)
@mock.patch.object(agent.AgentDeploy, 'prepare',
spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'allocate_server_hardware_to_ironic')
def test_prepare(
self, allocate_server_hardware_mock, agent_deploy_prepare_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.prepare(task)
agent_deploy_prepare_mock.assert_called_once_with(mock.ANY, task)
self.assertTrue(allocate_server_hardware_mock.called)
@mock.patch.object(agent.AgentDeploy, 'prepare',
spec_set=True, autospec=True)
def test_prepare_active_node(self, agent_deploy_prepare_mock):
"""Ensure nodes in running states are not inadvertently changed"""
test_states = list(states.STABLE_STATES)
test_states.extend([states.CLEANING,
states.CLEANWAIT,
states.INSPECTING])
for state in test_states:
self.node.provision_state = state
self.node.save()
agent_deploy_prepare_mock.reset_mock()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.prepare(task)
agent_deploy_prepare_mock.assert_called_once_with(
mock.ANY, task)
@mock.patch.object(agent.AgentDeploy, 'deploy',
spec_set=True, autospec=True)
def test_deploy(self, agent_deploy_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.deploy(task)
agent_deploy_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(agent.AgentDeploy, 'tear_down', spec_set=True,
autospec=True)
@mock.patch.object(deploy_utils, 'deallocate_server_hardware_from_ironic')
def test_tear_down_with_automated_clean_disabled(
self, deallocate_server_hardware_mock, agent_tear_down_mock):
CONF.conductor.automated_clean = False
agent_tear_down_mock.return_value = states.DELETED
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.tear_down(task)
agent_tear_down_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.DELETED, returned_state)
self.assertTrue(deallocate_server_hardware_mock.called)
@mock.patch.object(agent.AgentDeploy, 'prepare_cleaning',
spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'allocate_server_hardware_to_ironic')
def test_prepare_cleaning(
self, allocate_server_hardware_mock, agent_prep_clean_mock):
agent_prep_clean_mock.return_value = states.CLEANWAIT
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret = task.driver.deploy.prepare_cleaning(task)
self.assertEqual(states.CLEANWAIT, ret)
agent_prep_clean_mock.assert_called_once_with(mock.ANY, task)
self.assertTrue(allocate_server_hardware_mock.called)
@mock.patch.object(agent.AgentDeploy, 'tear_down_cleaning',
spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'deallocate_server_hardware_from_ironic')
def test_tear_down_cleaning(
self, deallocate_server_hardware_mock, agent_tear_down_clean_mock):
agent_tear_down_clean_mock.return_value = states.CLEANWAIT
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.tear_down_cleaning(task)
agent_tear_down_clean_mock.assert_called_once_with(mock.ANY, task)
self.assertTrue(deallocate_server_hardware_mock.called)
``` |
{
"source": "jovial/kayobe-1",
"score": 2
} |
#### File: ironic-inspector-rules/library/os_ironic_inspector_rule.py
```python
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
# Store a list of import errors to report to the user.
IMPORT_ERRORS = []
try:
import ironic_inspector_client
except Exception as e:
IMPORT_ERRORS.append(e)
try:
import shade
except Exception as e:
IMPORT_ERRORS.append(e)
DOCUMENTATION = """
module: os_ironic_inspector_rule
short_description: Create or destroy an Ironic Inspector rule.
author: "<NAME> <<EMAIL>>"
extends_documentation_fragment: openstack
description:
- Create or destroy an Ironic inspector rule.
options:
state:
description:
- State of the rule
choices: ["present", "absent"]
uuid:
description:
- Globally unique identifier for the rule.
required: false
description:
description:
- Description for the rule.
required: false
conditions:
description:
- List of conditions that must be met in order to apply the rule.
required: true
actions:
description:
- List of actions to be taken when the conditions are met.
required: true
"""
EXAMPLES = """
# Ensure that an inspector rule exists.
os_ironic_inspector_rule:
cloud: "openstack"
state: present
uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
description: Set IPMI username in driver_info if not set
conditions:
- field: "node://driver_info.ipmi_username"
op: "is-empty"
actions:
- action: "set-attribute"
path: "driver_info/ipmi_username"
value: "root"
"""
def _build_client(module):
"""Create and return an Ironic inspector client."""
cloud = shade.operator_cloud(**module.params)
session = cloud.cloud_config.get_session()
# Ensure the requested API version is supported.
# API 1.14 is the latest API version available in Rocky.
api_version = (1, 14)
client = ironic_inspector_client.v1.ClientV1(
inspector_url=module.params['inspector_url'],
session=session, region_name=module.params['region_name'],
api_version=api_version)
return client
def _ensure_rule_present(module, client):
"""Ensure that an inspector rule is present."""
if module.params['uuid']:
try:
rule = client.rules.get(module.params['uuid'])
except ironic_inspector_client.ClientError as e:
if e.response.status_code != 404:
module.fail_json(msg="Failed retrieving Inspector rule %s: %s"
% (module.params['uuid'], repr(e)))
else:
# Check whether the rule differs from the request.
keys = ('conditions', 'actions', 'description')
for key in keys:
if rule[key] != module.params[key]:
break
else:
# Nothing to do - rule exists and is as requested.
return False
# Rule differs - delete it before recreating.
_ensure_rule_absent(module, client)
client.rules.create(module.params['conditions'], module.params['actions'],
module.params['uuid'], module.params['description'])
return True
def _ensure_rule_absent(module, client):
"""Ensure that an inspector rule is absent."""
if not module.params['uuid']:
module.fail_json(msg="UUID is required to ensure rules are absent")
try:
client.rules.delete(module.params['uuid'])
except ironic_inspector_client.ClientError as e:
# If the rule does not exist, no problem and no change.
if e.response.status_code == 404:
return False
module.fail_json(msg="Failed retrieving Inspector rule %s: %s"
% (module.params['uuid'], repr(e)))
return True
def main():
argument_spec = openstack_full_argument_spec(
conditions=dict(type='list', required=True),
actions=dict(type='list', required=True),
description=dict(required=False),
uuid=dict(required=False),
state=dict(required=False, default='present',
choices=['present', 'absent']),
inspector_url=dict(required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
# Fail if there were any exceptions when importing modules.
if IMPORT_ERRORS:
module.fail_json(msg="Import errors: %s" %
", ".join([repr(e) for e in IMPORT_ERRORS]))
if (module.params['auth_type'] in [None, 'None'] and
module.params['inspector_url'] is None):
module.fail_json(msg="Authentication appears disabled, please "
"define an inspector_url parameter")
if (module.params['inspector_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['inspector_url']
)
try:
client = _build_client(module)
if module.params["state"] == "present":
changed = _ensure_rule_present(module, client)
else:
changed = _ensure_rule_absent(module, client)
except Exception as e:
module.fail_json(msg="Failed to configure Ironic Inspector rule: %s" %
repr(e))
else:
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
```
#### File: kayobe-1/kayobe/ansible.py
```python
import errno
import logging
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
from kayobe import exception
from kayobe import utils
from kayobe import vault
DEFAULT_CONFIG_PATH = "/etc/kayobe"
CONFIG_PATH_ENV = "KAYOBE_CONFIG_PATH"
LOG = logging.getLogger(__name__)
def add_args(parser):
"""Add arguments required for running Ansible playbooks to a parser."""
default_config_path = os.getenv(CONFIG_PATH_ENV, DEFAULT_CONFIG_PATH)
parser.add_argument("-b", "--become", action="store_true",
help="run operations with become (nopasswd implied)")
parser.add_argument("-C", "--check", action="store_true",
help="don't make any changes; instead, try to predict "
"some of the changes that may occur")
parser.add_argument("--config-path", default=default_config_path,
help="path to Kayobe configuration. "
"(default=$%s or %s)" %
(CONFIG_PATH_ENV, DEFAULT_CONFIG_PATH))
parser.add_argument("-e", "--extra-vars", metavar="EXTRA_VARS",
action="append",
help="set additional variables as key=value or "
"YAML/JSON")
parser.add_argument("-i", "--inventory", metavar="INVENTORY",
help="specify inventory host path "
"(default=$%s/inventory or %s/inventory) or "
"comma-separated host list" %
(CONFIG_PATH_ENV, DEFAULT_CONFIG_PATH))
parser.add_argument("-l", "--limit", metavar="SUBSET",
help="further limit selected hosts to an additional "
"pattern")
parser.add_argument("--skip-tags", metavar="TAGS",
help="only run plays and tasks whose tags do not "
"match these values")
parser.add_argument("-t", "--tags", metavar="TAGS",
help="only run plays and tasks tagged with these "
"values")
parser.add_argument("-lt", "--list-tasks",
action="store_true",
help="only print names of tasks, don't run them, "
"note this has no affect on kolla-ansible.")
def _get_inventory_path(parsed_args):
"""Return the path to the Kayobe inventory."""
if parsed_args.inventory:
return parsed_args.inventory
else:
return os.path.join(parsed_args.config_path, "inventory")
def _validate_args(parsed_args, playbooks):
"""Validate Kayobe Ansible arguments."""
vault.validate_args(parsed_args)
result = utils.is_readable_dir(parsed_args.config_path)
if not result["result"]:
LOG.error("Kayobe configuration path %s is invalid: %s",
parsed_args.config_path, result["message"])
sys.exit(1)
inventory = _get_inventory_path(parsed_args)
result = utils.is_readable_dir(inventory)
if not result["result"]:
LOG.error("Kayobe inventory %s is invalid: %s",
inventory, result["message"])
sys.exit(1)
for playbook in playbooks:
result = utils.is_readable_file(playbook)
if not result["result"]:
LOG.error("Kayobe playbook %s is invalid: %s",
playbook, result["message"])
sys.exit(1)
def _get_vars_files(config_path):
"""Return a list of Kayobe Ansible configuration variable files.
The files will be sorted alphabetically by name.
"""
vars_files = []
for vars_file in os.listdir(config_path):
abs_path = os.path.join(config_path, vars_file)
if utils.is_readable_file(abs_path):
root, ext = os.path.splitext(vars_file)
if ext in (".yml", ".yaml", ".json"):
vars_files.append(abs_path)
return sorted(vars_files)
def build_args(parsed_args, playbooks,
extra_vars=None, limit=None, tags=None, verbose_level=None,
check=None):
"""Build arguments required for running Ansible playbooks."""
cmd = ["ansible-playbook"]
if verbose_level:
cmd += ["-" + "v" * verbose_level]
if parsed_args.list_tasks:
cmd += ["--list-tasks"]
cmd += vault.build_args(parsed_args, "--vault-password-file")
inventory = _get_inventory_path(parsed_args)
cmd += ["--inventory", inventory]
vars_files = _get_vars_files(parsed_args.config_path)
for vars_file in vars_files:
cmd += ["-e", "@%s" % vars_file]
if parsed_args.extra_vars:
for extra_var in parsed_args.extra_vars:
# Don't quote or escape variables passed via the kayobe -e CLI
# argument, to match Ansible's behaviour.
cmd += ["-e", extra_var]
if extra_vars:
for extra_var_name, extra_var_value in extra_vars.items():
# Quote and escape variables originating within the python CLI.
extra_var_value = utils.quote_and_escape(extra_var_value)
cmd += ["-e", "%s=%s" % (extra_var_name, extra_var_value)]
if parsed_args.become:
cmd += ["--become"]
if check or (parsed_args.check and check is None):
cmd += ["--check"]
if parsed_args.limit or limit:
limits = [l for l in [parsed_args.limit, limit] if l]
cmd += ["--limit", ":&".join(limits)]
if parsed_args.skip_tags:
cmd += ["--skip-tags", parsed_args.skip_tags]
if parsed_args.tags or tags:
all_tags = [t for t in [parsed_args.tags, tags] if t]
cmd += ["--tags", ",".join(all_tags)]
cmd += playbooks
return cmd
def run_playbooks(parsed_args, playbooks,
extra_vars=None, limit=None, tags=None, quiet=False,
check_output=False, verbose_level=None, check=None):
"""Run a Kayobe Ansible playbook."""
_validate_args(parsed_args, playbooks)
cmd = build_args(parsed_args, playbooks,
extra_vars=extra_vars, limit=limit, tags=tags,
verbose_level=verbose_level, check=check)
env = os.environ.copy()
vault.update_environment(parsed_args, env)
# If the configuration path has been specified via --config-path, ensure
# the environment variable is set, so that it can be referenced by
# playbooks.
env.setdefault(CONFIG_PATH_ENV, parsed_args.config_path)
try:
utils.run_command(cmd, check_output=check_output, quiet=quiet, env=env)
except subprocess.CalledProcessError as e:
LOG.error("Kayobe playbook(s) %s exited %d",
", ".join(playbooks), e.returncode)
if check_output:
LOG.error("The output was:\n%s", e.output)
sys.exit(e.returncode)
def run_playbook(parsed_args, playbook, *args, **kwargs):
"""Run a Kayobe Ansible playbook."""
return run_playbooks(parsed_args, [playbook], *args, **kwargs)
def config_dump(parsed_args, host=None, hosts=None, var_name=None,
facts=None, extra_vars=None, tags=None, verbose_level=None):
dump_dir = tempfile.mkdtemp()
try:
if not extra_vars:
extra_vars = {}
extra_vars["dump_path"] = dump_dir
if host or hosts:
extra_vars["dump_hosts"] = host or hosts
if var_name:
extra_vars["dump_var_name"] = var_name
if facts is not None:
extra_vars["dump_facts"] = facts
# Don't use check mode for configuration dumps as we won't get any
# results back.
playbook_path = utils.get_data_files_path("ansible", "dump-config.yml")
run_playbook(parsed_args, playbook_path,
extra_vars=extra_vars, tags=tags, check_output=True,
verbose_level=verbose_level, check=False)
hostvars = {}
for path in os.listdir(dump_dir):
LOG.debug("Found dump file %s", path)
inventory_hostname, ext = os.path.splitext(path)
if ext == ".yml":
hvars = utils.read_yaml_file(os.path.join(dump_dir, path))
if host:
return hvars
else:
hostvars[inventory_hostname] = hvars
else:
LOG.warning("Unexpected extension on config dump file %s",
path)
return hostvars
finally:
shutil.rmtree(dump_dir)
def install_galaxy_roles(parsed_args, force=False):
"""Install Ansible Galaxy role dependencies.
Installs dependencies specified in kayobe, and if present, in kayobe
configuration.
:param parsed_args: Parsed command line arguments.
:param force: Whether to force reinstallation of roles.
"""
LOG.info("Installing galaxy role dependencies from kayobe")
requirements = utils.get_data_files_path("requirements.yml")
roles_destination = utils.get_data_files_path('ansible', 'roles')
utils.galaxy_install(requirements, roles_destination, force=force)
# Check for requirements in kayobe configuration.
kc_reqs_path = os.path.join(parsed_args.config_path,
"ansible", "requirements.yml")
if not utils.is_readable_file(kc_reqs_path)["result"]:
LOG.info("Not installing galaxy role dependencies from kayobe config "
"- requirements.yml not present")
return
LOG.info("Installing galaxy role dependencies from kayobe config")
# Ensure a roles directory exists in kayobe-config.
kc_roles_path = os.path.join(parsed_args.config_path,
"ansible", "roles")
try:
os.makedirs(kc_roles_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise exception.Error("Failed to create directory ansible/roles/ "
"in kayobe configuration at %s: %s" %
(parsed_args.config_path, str(e)))
# Install roles from kayobe-config.
utils.galaxy_install(kc_reqs_path, kc_roles_path, force=force)
def prune_galaxy_roles(parsed_args):
"""Prune galaxy roles that are no longer necessary.
:param parsed_args: Parsed command line arguments.
"""
LOG.info("Removing unnecessary galaxy roles from kayobe")
roles_to_remove = [
'stackhpc.os-flavors',
'stackhpc.os-projects',
'stackhpc.parted-1-1',
'yatesr.timezone',
]
LOG.debug("Removing roles: %s", ",".join(roles_to_remove))
utils.galaxy_remove(roles_to_remove, "ansible/roles")
```
#### File: tests/unit/test_vault.py
```python
import argparse
import os
import unittest
import mock
from kayobe import utils
from kayobe import vault
class TestCase(unittest.TestCase):
def test_validate_args_ok(self):
parser = argparse.ArgumentParser()
vault.add_args(parser)
parsed_args = parser.parse_args([])
vault.validate_args(parsed_args)
@mock.patch.dict(os.environ, {"KAYOBE_VAULT_PASSWORD": "<PASSWORD>"})
def test_validate_args_env(self):
parser = argparse.ArgumentParser()
vault.add_args(parser)
parsed_args = parser.parse_args([])
vault.validate_args(parsed_args)
@mock.patch.dict(os.environ, {"KAYOBE_VAULT_PASSWORD": "<PASSWORD>"})
def test_validate_args_ask_vault_pass(self):
parser = argparse.ArgumentParser()
vault.add_args(parser)
parsed_args = parser.parse_args(["--ask-vault-pass"])
self.assertRaises(SystemExit, vault.validate_args, parsed_args)
@mock.patch.dict(os.environ, {"KAYOBE_VAULT_PASSWORD": "<PASSWORD>"})
def test_validate_args_vault_password_file(self):
parser = argparse.ArgumentParser()
vault.add_args(parser)
parsed_args = parser.parse_args(["--vault-password-file",
"/path/to/file"])
self.assertRaises(SystemExit, vault.validate_args, parsed_args)
@mock.patch.object(vault.getpass, 'getpass')
def test__ask_vault_pass(self, mock_getpass):
mock_getpass.return_value = 'test-pass'
# Call twice to verify that the user is only prompted once.
result = vault._ask_vault_pass()
self.assertEqual('test-pass', result)
mock_getpass.assert_called_once_with("Vault password: ")
result = vault._ask_vault_pass()
self.assertEqual('test-pass', result)
mock_getpass.assert_called_once_with("Vault password: ")
@mock.patch.object(utils, 'read_file')
def test__read_vault_password_file(self, mock_read):
mock_read.return_value = "test-pass\n"
result = vault._read_vault_password_file("/path/to/file")
self.assertEqual("test-pass", result)
mock_read.assert_called_once_with("/path/to/file")
def test_update_environment_no_vault(self):
parser = argparse.ArgumentParser()
vault.add_args(parser)
parsed_args = parser.parse_args([])
env = {}
vault.update_environment(parsed_args, env)
self.assertEqual({}, env)
@mock.patch.object(vault, '_ask_vault_pass')
def test_update_environment_prompt(self, mock_ask):
mock_ask.return_value = "test-pass"
parser = argparse.ArgumentParser()
vault.add_args(parser)
parsed_args = parser.parse_args(["--ask-vault-pass"])
env = {}
vault.update_environment(parsed_args, env)
self.assertEqual({"KAYOBE_VAULT_PASSWORD": "<PASSWORD>"}, env)
mock_ask.assert_called_once_with()
@mock.patch.object(vault, '_read_vault_password_file')
def test_update_environment_file(self, mock_read):
mock_read.return_value = "test-pass"
parser = argparse.ArgumentParser()
vault.add_args(parser)
args = ["--vault-password-file", "/path/to/file"]
parsed_args = parser.parse_args(args)
env = {}
vault.update_environment(parsed_args, env)
self.assertEqual({"KAYOBE_VAULT_PASSWORD": "<PASSWORD>"}, env)
mock_read.assert_called_once_with("/path/to/file")
``` |
{
"source": "jovial/kayobe",
"score": 2
} |
#### File: ansible/filter_plugins/switches.py
```python
import six
def switch_interface_config_select_name(switch_interface_config, names):
"""Select and return all switch interfaces matching requested names.
:param switch_interface_config: Switch interface configuration dict
:param names: String or list of strings - interface names to match
"""
if isinstance(names, six.string_types):
names = [names]
return {
name: config
for name, config in switch_interface_config.items()
if name in names
}
def switch_interface_config_select_description(switch_interface_config, descriptions):
"""Select and return all switch interfaces matching requested descriptions.
:param switch_interface_config: Switch interface configuration dict
:param descriptions: String or list of strings - descriptions to match
"""
if isinstance(descriptions, six.string_types):
descriptions = [descriptions]
return {
name: config
for name, config in switch_interface_config.items()
if (config.get('description') in descriptions and
config.get('ngs_trunk_port', True))
}
def switch_interface_config_select_trunk(switch_interface_config):
"""Select and return all switch interfaces which are trunk links.
Interfaces are assumed to be trunked, unless they have a ngs_trunk_port
item which is set to False.
:param switch_interface_config: Switch interface configuration dict
"""
return {
name: config
for name, config in switch_interface_config.items()
if config.get('ngs_trunk_port', True)
}
class FilterModule(object):
"""Switch filters."""
def filters(self):
return {
'switch_interface_config_select_name': switch_interface_config_select_name,
'switch_interface_config_select_description': switch_interface_config_select_description,
'switch_interface_config_select_trunk': switch_interface_config_select_trunk,
}
```
#### File: unit/cli/test_commands.py
```python
import unittest
import cliff.app
import cliff.commandmanager
import mock
from kayobe.cli import commands
from kayobe import utils
class TestApp(cliff.app.App):
def __init__(self):
super(TestApp, self).__init__(
description='Test app',
version='0.1',
command_manager=cliff.commandmanager.CommandManager('kayobe.cli'))
class TestCase(unittest.TestCase):
@mock.patch.object(utils, "galaxy_install", spec=True)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_control_host_bootstrap(self, mock_run, mock_install):
command = commands.ControlHostBootstrap(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
mock_install.assert_called_once_with("requirements.yml",
"ansible/roles")
expected_calls = [
mock.call(mock.ANY, ["ansible/bootstrap.yml"]),
mock.call(mock.ANY, ["ansible/kolla-ansible.yml"],
tags="install"),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(utils, "galaxy_install", spec=True)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_control_host_upgrade(self, mock_run, mock_install):
command = commands.ControlHostUpgrade(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
mock_install.assert_called_once_with("requirements.yml",
"ansible/roles", force=True)
expected_calls = [
mock.call(mock.ANY, ["ansible/bootstrap.yml"]),
mock.call(mock.ANY, ["ansible/kolla-ansible.yml"],
tags="install"),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_network_connectivity_check(self, mock_run):
command = commands.NetworkConnectivityCheck(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(mock.ANY, ["ansible/network-connectivity.yml"]),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_seed_hypervisor_host_configure(self, mock_run, mock_dump):
command = commands.SeedHypervisorHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = "stack"
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(mock.ANY, host="seed-hypervisor",
var_name="kayobe_ansible_user", tags="dump-config")
]
self.assertEqual(expected_calls, mock_dump.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/ip-allocation.yml",
"ansible/ssh-known-host.yml",
"ansible/kayobe-ansible-user.yml",
"ansible/kayobe-target-venv.yml",
"ansible/users.yml",
"ansible/yum.yml",
"ansible/dev-tools.yml",
"ansible/network.yml",
"ansible/sysctl.yml",
"ansible/ntp.yml",
"ansible/seed-hypervisor-libvirt-host.yml",
],
limit="seed-hypervisor",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_seed_hypervisor_host_upgrade(self, mock_run):
command = commands.SeedHypervisorHostUpgrade(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/kayobe-target-venv.yml",
"ansible/kolla-target-venv.yml",
],
limit="seed-hypervisor",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_seed")
def test_seed_host_configure(self, mock_kolla_run, mock_run, mock_dump):
command = commands.SeedHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"seed": {"kayobe_ansible_user": "stack"}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(mock.ANY, hosts="seed", tags="dump-config")
]
self.assertEqual(expected_calls, mock_dump.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/ip-allocation.yml",
"ansible/ssh-known-host.yml",
"ansible/kayobe-ansible-user.yml",
"ansible/kayobe-target-venv.yml",
"ansible/users.yml",
"ansible/yum.yml",
"ansible/dev-tools.yml",
"ansible/disable-selinux.yml",
"ansible/network.yml",
"ansible/sysctl.yml",
"ansible/ip-routing.yml",
"ansible/snat.yml",
"ansible/disable-glean.yml",
"ansible/ntp.yml",
"ansible/lvm.yml",
],
limit="seed",
),
mock.call(
mock.ANY,
["ansible/kolla-ansible.yml"],
tags="config",
),
mock.call(
mock.ANY,
[
"ansible/kolla-target-venv.yml",
"ansible/kolla-host.yml",
"ansible/docker.yml",
],
limit="seed",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={"ansible_user": "stack"},
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_seed")
def test_seed_host_configure_kayobe_venv(self, mock_kolla_run, mock_run,
mock_dump):
command = commands.SeedHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"seed": {
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"kayobe_ansible_user": "stack",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"ansible_user": "stack",
},
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_seed")
def test_seed_host_configure_kolla_venv(self, mock_kolla_run, mock_run,
mock_dump):
command = commands.SeedHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"seed": {
"kayobe_ansible_user": "stack",
"kolla_ansible_target_venv": "/kolla/venv/bin/python",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/usr/bin/python",
"ansible_user": "stack",
"virtualenv": "/kolla/venv/bin/python",
},
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_seed")
def test_seed_host_configure_both_venvs(self, mock_kolla_run, mock_run,
mock_dump):
command = commands.SeedHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"seed": {
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"kayobe_ansible_user": "stack",
"kolla_ansible_target_venv": "/kolla/venv/bin/python",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"ansible_user": "stack",
"virtualenv": "/kolla/venv/bin/python",
},
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_seed_host_upgrade(self, mock_run):
command = commands.SeedHostUpgrade(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/kayobe-target-venv.yml",
"ansible/kolla-target-venv.yml",
],
limit="seed",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_seed_container_image_build(self, mock_run):
command = commands.SeedContainerImageBuild(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/container-image-builders-check.yml",
"ansible/kolla-build.yml",
"ansible/container-image-build.yml"
],
extra_vars={
"container_image_sets": (
"{{ seed_container_image_sets }}"),
"push_images": False,
}
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_seed_container_image_build_with_regex(self, mock_run):
command = commands.SeedContainerImageBuild(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args(["--push", "^regex1$", "^regex2$"])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/container-image-builders-check.yml",
"ansible/kolla-build.yml",
"ansible/container-image-build.yml"
],
extra_vars={
"container_image_regexes": "'^regex1$ ^regex2$'",
"push_images": True,
}
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_seed")
def test_service_deploy(self, mock_kolla_run, mock_run):
command = commands.SeedServiceDeploy(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
["ansible/kolla-ansible.yml"],
tags="config",
),
mock.call(
mock.ANY,
["ansible/kolla-bifrost.yml"],
),
mock.call(
mock.ANY,
[
"ansible/overcloud-host-image-workaround-resolv.yml",
"ansible/seed-introspection-rules.yml",
"ansible/dell-switch-bmp.yml",
],
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
"deploy-bifrost",
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_overcloud")
def test_overcloud_host_configure(self, mock_kolla_run, mock_run,
mock_dump):
command = commands.OvercloudHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"controller0": {"kayobe_ansible_user": "stack"}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(mock.ANY, hosts="overcloud", tags="dump-config")
]
self.assertEqual(expected_calls, mock_dump.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/ip-allocation.yml",
"ansible/ssh-known-host.yml",
"ansible/kayobe-ansible-user.yml",
"ansible/kayobe-target-venv.yml",
"ansible/users.yml",
"ansible/yum.yml",
"ansible/dev-tools.yml",
"ansible/disable-selinux.yml",
"ansible/network.yml",
"ansible/sysctl.yml",
"ansible/disable-glean.yml",
"ansible/disable-cloud-init.yml",
"ansible/ntp.yml",
"ansible/lvm.yml",
],
limit="overcloud",
),
mock.call(
mock.ANY,
["ansible/kolla-ansible.yml"],
tags="config",
),
mock.call(
mock.ANY,
[
"ansible/kolla-target-venv.yml",
"ansible/kolla-host.yml",
"ansible/docker.yml",
"ansible/ceph-block-devices.yml",
],
limit="overcloud",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={"ansible_user": "stack"},
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_overcloud")
def test_overcloud_host_configure_kayobe_venv(self, mock_kolla_run,
mock_run, mock_dump):
command = commands.OvercloudHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"controller0": {
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"kayobe_ansible_user": "stack",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"ansible_user": "stack",
}
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_overcloud")
def test_overcloud_host_configure_kolla_venv(self, mock_kolla_run,
mock_run, mock_dump):
command = commands.OvercloudHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"controller0": {
"kayobe_ansible_user": "stack",
"kolla_ansible_target_venv": "/kolla/venv/bin/python",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/usr/bin/python",
"ansible_user": "stack",
"virtualenv": "/kolla/venv/bin/python",
}
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_overcloud")
def test_overcloud_host_configure_both_venvs(self, mock_kolla_run,
mock_run, mock_dump):
command = commands.OvercloudHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"controller0": {
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"kayobe_ansible_user": "stack",
"kolla_ansible_target_venv": "/kolla/venv/bin/python",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"ansible_user": "stack",
"virtualenv": "/kolla/venv/bin/python",
}
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_overcloud_host_upgrade(self, mock_run):
command = commands.OvercloudHostUpgrade(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/kayobe-target-venv.yml",
"ansible/kolla-target-venv.yml",
"ansible/overcloud-docker-sdk-upgrade.yml",
"ansible/overcloud-etc-hosts-fixup.yml",
],
limit="overcloud",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_overcloud_container_image_build(self, mock_run):
command = commands.OvercloudContainerImageBuild(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/container-image-builders-check.yml",
"ansible/kolla-build.yml",
"ansible/container-image-build.yml"
],
extra_vars={
"container_image_sets": (
"{{ overcloud_container_image_sets }}"),
"push_images": False,
}
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_overcloud_container_image_build_with_regex(self, mock_run):
command = commands.OvercloudContainerImageBuild(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args(["--push", "^regex1$", "^regex2$"])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/container-image-builders-check.yml",
"ansible/kolla-build.yml",
"ansible/container-image-build.yml"
],
extra_vars={
"container_image_regexes": "'^regex1$ ^regex2$'",
"push_images": True,
}
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_overcloud_post_configure(self, mock_run):
command = commands.OvercloudPostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
'ansible/overcloud-ipa-images.yml',
'ansible/overcloud-introspection-rules.yml',
'ansible/overcloud-introspection-rules-dell-lldp-workaround.yml', # noqa
'ansible/provision-net.yml',
'ansible/overcloud-grafana-configure.yml'
],
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_baremetal_compute_inspect(self, mock_run):
command = commands.BaremetalComputeInspect(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/baremetal-compute-inspect.yml",
],
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_baremetal_compute_manage(self, mock_run):
command = commands.BaremetalComputeManage(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/baremetal-compute-manage.yml",
],
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_baremetal_compute_provide(self, mock_run):
command = commands.BaremetalComputeProvide(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/baremetal-compute-provide.yml",
],
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
``` |
{
"source": "JovialKnoll/jovial-engine",
"score": 3
} |
#### File: src/jovialengine/fontwrap.py
```python
import pygame
class FontWrap(object):
__slots__ = (
'font',
'line_height',
'_antialias',
)
def __init__(self, font: pygame.font.Font, line_height: int, antialias: bool):
self.font = font
self.line_height = line_height
self._antialias = antialias
def renderTo(self, surf: pygame.Surface, dest, text: str, color, background=None):
surf.blit(self.font.render(text, self._antialias, color, background), dest)
def renderToCentered(self, surf: pygame.Surface, dest, text: str, color, background=None):
text_size = self.font.size(text)
surf.blit(
self.font.render(text, self._antialias, color, background),
(dest[0] - text_size[0] // 2, dest[1] - text_size[1] // 2)
)
def _calculateLinesForWords(self, width: int, words: list[str]):
lines = [words[0].replace('_', ' ')]
for word in words[1:]:
new_word = word.replace('_', ' ')
if self.font.size(lines[-1] + " " + new_word)[0] > width:
lines.append(new_word)
else:
lines[-1] += " " + new_word
return lines
def renderToInside(self, surf: pygame.Surface, dest, width: int, text: str, color, background=None):
# probably more efficient to do once?
part_dest = [dest[0], dest[1]]
for words in [line.split() for line in text.splitlines()]:
if not words:
words = [""]
lines = self._calculateLinesForWords(width, words)
for line in lines:
surf.blit(
self.font.render(line, self._antialias, color, background),
part_dest
)
part_dest[1] += self.line_height
def _renderWordsInside(self, width: int, words: list[str], color, background):
"""Returns a surface of the width with the words drawn on it.
If any word is too long to fit, it will be in its own line, and truncated.
"""
lines = self._calculateLinesForWords(width, words)
result = pygame.Surface((width, self.line_height * len(lines))).convert()
result.fill(background)
for i, line in enumerate(lines):
drawn_line = self.font.render(line, self._antialias, color, background).convert()
result.blit(drawn_line, (0, i * self.line_height))
return result
def renderInside(self, width: int, text: str, color, background):
# probably more efficient if keeping resultant surface and using that to draw over and over?
height = 0
imgs = []
for words in [line.split() for line in text.splitlines()]:
if not words:
words = [""]
imgs.append(self._renderWordsInside(width, words, color, background))
height += imgs[-1].get_height()
result = pygame.Surface((width, height)).convert()
dest = [0, 0]
for img in imgs:
result.blit(img, dest)
dest[1] += img.get_height()
return result
```
#### File: src/jovialengine/modebase.py
```python
import abc
import typing
import pygame
import constants
class ModeBase(abc.ABC):
"""This is an abstract object for game modes.
"""
__slots__ = (
'__pressed_mouse_buttons',
'_space',
'_background',
'_all_sprites',
'_camera',
'next_mode',
)
def __init__(self):
"""If you want a mode's space to not share dimension with the screen size, call out to init yourself."""
self._init(constants.SCREEN_SIZE)
def _init(self, space_size: tuple[int, int]):
self.__pressed_mouse_buttons = dict()
self._space = pygame.Surface(space_size).convert()
self._background = pygame.Surface(space_size).convert()
self._background.fill((255, 255, 255))
self._all_sprites = pygame.sprite.LayeredDirty()
self._camera = pygame.rect.Rect((0, 0), constants.SCREEN_SIZE)
"""All game modes must set the next mode when they are done.
Don't create another mode unless you are immediately assigning it to self.next_mode
"""
self.next_mode = None
def cleanup(self):
self._all_sprites.empty()
def __trackMouseButton(self, event: pygame.event.Event):
if event.type == pygame.MOUSEBUTTONDOWN:
self.__pressed_mouse_buttons[event.button] = event.pos
elif event.type == pygame.MOUSEBUTTONUP:
if event.button in self.__pressed_mouse_buttons:
del self.__pressed_mouse_buttons[event.button]
def _mouseButtonStatus(self, button: int):
if button not in self.__pressed_mouse_buttons:
return False
return self.__pressed_mouse_buttons[button]
@abc.abstractmethod
def _input(self, event: pygame.event.Event):
raise NotImplementedError(
type(self).__name__ + "._input(self, event)"
)
def _postInput(self):
"""Handle any input that's checked directly, like joysticks etc."""
pass
@typing.final
def inputEvents(self, events: typing.Iterable[pygame.event.Event]):
"""All game modes can take in events."""
for event in events:
self._input(event)
self.__trackMouseButton(event)
self._postInput()
def _update(self, dt: int):
pass
@typing.final
def update(self, dt: int):
"""All game modes can update."""
self._update(dt)
self._all_sprites.update(dt)
def _updatePreDraw(self, screen: pygame.surface.Surface):
pass
def _drawPreSprites(self, screen: pygame.surface.Surface):
pass
def _drawPostSprites(self, screen: pygame.surface.Surface):
pass
def _drawPostCamera(self, screen: pygame.surface.Surface):
pass
@typing.final
def draw(self, screen: pygame.surface.Surface):
"""All game modes can draw to the screen"""
self._updatePreDraw(screen)
self._space.blit(self._background, (0, 0))
self._drawPreSprites(self._space)
self._all_sprites.draw(self._space)
self._drawPostSprites(self._space)
screen.blit(self._space, (0, 0), self._camera)
self._drawPostCamera(screen)
@staticmethod
def _stopMixer():
pygame.mixer.music.stop()
pygame.mixer.music.unload()
pygame.mixer.stop()
``` |
{
"source": "JovialKnoll/monsters",
"score": 3
} |
#### File: monsters/src/anim.py
```python
import pygame.mixer
from vec2d import Vec2d
from saveable import Saveable
class Anim(Saveable):
__slots__ = (
'func',
'time',
'pos',
'sound',
'positional_sound',
)
def __init__(self, func: str, time: int, x_or_pair, y=None,
sound: pygame.mixer.Sound = None, positional_sound: bool = False):
self.func = func
self.time = time
self.pos = Vec2d(x_or_pair, y)
self.sound = sound
self.positional_sound = positional_sound
def save(self):
# no sound right now, sorry
# if we need it, either start passing sounds as paths
# or don't save when there are pending Anims
return self.func, self.time, self.pos
@classmethod
def load(cls, save_data):
return cls(*save_data)
```
#### File: monsters/src/animsprite.py
```python
from collections import deque
import pygame
import constants
import utility
from anim import Anim
from vec2d import Vec2d
from saveable import Saveable
class AnimSprite(pygame.sprite.DirtySprite, Saveable):
Binary = 'Binary'
Lerp = 'LERP'
IncSpeed = 'INC'
DecSpeed = 'DEC'
IncDecSpeed = 'INC_DEC'
DecIncSpeed = 'DEC_INC'
funcDict = {
Binary: utility.binary,
Lerp: utility.lerp,
IncSpeed: utility.incSpeedLerp,
DecSpeed: utility.decSpeedLerp,
IncDecSpeed: utility.incDecSpeedLerp,
DecIncSpeed: utility.decIncSpeedLerp,
}
@classmethod
def toFunc(cls, func):
return cls.funcDict.get(func, utility.lerp)
__slots__ = (
'anims',
'last_pos',
'time',
'positional_sound',
'sound_channel',
)
def __init__(self):
super().__init__()
# dirty = 2 : always draw
self.dirty = 2
self.anims = deque()
self.last_pos = None
self.time = 0
self.positional_sound = False
self.sound_channel = None
def save(self):
return {
'rect_topleft': self.rect.topleft,
'anims': self.anims,
'last_pos': self.last_pos,
'time': self.time,
}
@classmethod
def load(cls, save_data):
new_obj = cls()
new_obj.rect.topleft = save_data['rect_topleft']
new_obj.anims = save_data['anims']
new_obj.last_pos = save_data['last_pos']
new_obj.time = save_data['time']
return new_obj
def stillAnimating(self):
if self.anims:
return True
return False
def update(self, *args):
if self.last_pos is None:
self.last_pos = self.rect.center
# adding dt
self.time += args[0]
while self.anims and self.time >= self.anims[0].time:
done_anim = self.anims.popleft()
self.time -= done_anim.time
self.rect.center = done_anim.pos
self.last_pos = self.rect.center
if done_anim.sound:
self.positional_sound = done_anim.positional_sound
channel = done_anim.sound.play()
if self.positional_sound:
self.sound_channel = channel
if self.anims:
current_anim = self.anims[0]
func = self.toFunc(current_anim.func)
self.rect.center = func(
self.last_pos,
current_anim.pos,
self.time / current_anim.time
)
else:
self.last_pos = None
self.time = 0
if self.positional_sound:
if self.sound_channel.get_busy():
pos = min(max(self.rect.centerx / constants.SCREEN_SIZE[0], 0), 1)
channel_l = self.boundChannelVolume(utility.cos_curve(pos))
channel_r = self.boundChannelVolume(utility.sin_curve(pos))
self.sound_channel.set_volume(channel_l, channel_r)
else:
self.positional_sound = False
self.sound_channel = None
@staticmethod
def boundChannelVolume(volume):
return .2 + (volume * .8)
def addPosAbs(self, func, time, x_or_pair, y=None, sound=None, positional_sound=False):
self.anims.append(
Anim(func, time, x_or_pair, y, sound, positional_sound)
)
def addPosRel(self, func, time, x_or_pair, y=None, sound=None, positional_sound=False):
newPos = Vec2d(x_or_pair, y)
if self.anims:
newPos += self.anims[-1].pos
else:
newPos += self.rect.center
self.addPosAbs(func, time, newPos, sound=sound, positional_sound=positional_sound)
def addWait(self, time, sound=None, positional_sound=False):
self.addPosRel(AnimSprite.Binary, time, 0, 0, sound, positional_sound)
```
#### File: monsters/src/convopart.py
```python
import csv
import mode
class ConvoChoice(object):
__slots__ = (
'text',
'key',
)
def __init__(self, text: str, key: str):
self.text = text
self.key = key
def getNextMode(self):
if hasattr(mode, self.key):
mode_cls = getattr(mode, self.key)
# currently only handling modes that can be created with no arguments
return mode_cls
return None
class ConvoPart(object):
__slots__ = (
'style',
'text',
'choices',
)
def __init__(self, style: set[str], text: str, choices: list[ConvoChoice]):
self.style = style
self.text = text
self.choices = choices
@staticmethod
def getConvoDict(convo_file: str):
convo_dict = {}
with open(convo_file) as convo_data:
convo_reader = csv.reader(convo_data)
for row in convo_reader:
row_iter = iter(row)
key = next(row_iter)
if key in convo_dict:
raise ValueError(f"The convo file {convo_file} has a duplicate row key {key}.")
style = {tag.strip() for tag in next(row_iter).upper().split('|')}
text = next(row_iter)
choices = []
try:
for i in range(len(mode.ModeConvo.buttons)):
choice_text = next(row_iter)
choice_key = next(row_iter)
if choice_key:
choice = ConvoChoice(choice_text, choice_key)
choices.append(choice)
except StopIteration:
pass
if not choices:
raise ValueError(f"The convo file {convo_file} has no choices in the row with key {key}.")
convo_part = ConvoPart(style, text, choices)
convo_dict[key] = convo_part
return convo_dict
```
#### File: src/mode/modegamemenu.py
```python
import abc
import pygame
import constants
import shared
from save import Save
from saveable import Saveable
from .mode import Mode
class ModeGameMenu(Mode, abc.ABC):
MENU_CHAR_WIDTH = 20
MENU_WIDTH = MENU_CHAR_WIDTH * constants.FONT_SIZE
SHARED_DISP_TEXT = "Options:\nESC) Go Back\n"
__slots__ = (
'_previous_mode',
'_old_screen',
)
def __init__(self, previous_mode, old_screen=None):
super().__init__()
self._previous_mode = previous_mode
if old_screen is None:
old_screen = self._getOldScreen()
self._old_screen = old_screen
def _getOldScreen(self):
old_screen = pygame.Surface(constants.SCREEN_SIZE).convert(shared.display.screen)
self._previous_mode.draw(old_screen)
old_screen = pygame.transform.smoothscale(
pygame.transform.smoothscale(
old_screen,
(constants.SCREEN_SIZE[0] * 4 // 5, constants.SCREEN_SIZE[1] * 4 // 5)
),
constants.SCREEN_SIZE
)
return old_screen
def _drawScreen(self, screen):
screen.blit(self._old_screen, (0, 0))
@classmethod
def _drawText(cls, screen, disp_text):
shared.font_wrap.renderToInside(
screen,
(0, 0),
cls.MENU_WIDTH,
disp_text,
False,
constants.WHITE,
constants.BLACK
)
class ModeGameMenuTop(ModeGameMenu):
def _input(self, event):
if event.type == pygame.QUIT:
shared.game_running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.next_mode = self._previous_mode
elif event.key == pygame.K_1:
self.next_mode = ModeGameMenuSave(self._previous_mode, self._old_screen)
elif event.key == pygame.K_2:
self.next_mode = ModeGameMenuLoad(self._previous_mode, self._old_screen)
elif event.key == pygame.K_3:
self.next_mode = ModeGameMenuOptions(self._previous_mode, self._old_screen)
elif event.key == pygame.K_4:
shared.game_running = False
def _drawScreen(self, screen):
super()._drawScreen(screen)
disp_text = self.SHARED_DISP_TEXT
disp_text += "1) Save\n2) Load\n3) Options\n4) Quit"
self._drawText(screen, disp_text)
class ModeGameMenuSave(ModeGameMenu):
FILE_EXT = '.sav'
__slots__ = (
'_save_name',
'_cursor_position',
'_confirm_overwrite',
'_save_success',
'_cursor_switch',
'_cursor_timer',
)
def __init__(self, previous_mode, old_screen=None):
super().__init__(previous_mode, old_screen)
self._save_name = ''
self._resetCursorBlink()
self._cursor_position = 0
self._confirm_overwrite = False
self._save_success = None
def _resetCursorBlink(self):
self._cursor_switch = True
self._cursor_timer = 0
def _input(self, event):
if event.type == pygame.QUIT:
self.next_mode = ModeGameMenuTop(self._previous_mode, self._old_screen)
elif event.type == pygame.KEYDOWN:
char = event.unicode
length = len(self._save_name)
if self._save_success:
self.next_mode = ModeGameMenuTop(self._previous_mode, self._old_screen)
elif event.key == pygame.K_ESCAPE:
if self._confirm_overwrite:
self._confirm_overwrite = False
self._save_success = None
else:
self.next_mode = ModeGameMenuTop(self._previous_mode, self._old_screen)
elif event.key == pygame.K_RETURN:
if self._save_name and isinstance(self._previous_mode, Saveable):
if Save.willOverwrite(self._save_name + self.FILE_EXT) and not self._confirm_overwrite:
self._confirm_overwrite = True
elif not self._save_success:
new_save = Save.getFromMode(self._save_name + self.FILE_EXT, self._previous_mode)
self._save_success = new_save.save()
elif event.key == pygame.K_LEFT:
self._cursor_position = max(self._cursor_position - 1, 0)
self._resetCursorBlink()
elif event.key == pygame.K_RIGHT:
self._cursor_position = min(self._cursor_position + 1, length)
self._resetCursorBlink()
elif event.key in (pygame.K_UP, pygame.K_HOME):
self._cursor_position = 0
self._resetCursorBlink()
elif event.key in (pygame.K_DOWN, pygame.K_END):
self._cursor_position = length
self._resetCursorBlink()
elif event.key == pygame.K_DELETE:
self._save_name = self._save_name[:self._cursor_position] + self._save_name[self._cursor_position + 1:]
self._resetCursorBlink()
elif event.key == pygame.K_BACKSPACE:
if self._cursor_position > 0:
self._save_name = self._save_name[:self._cursor_position - 1] \
+ self._save_name[self._cursor_position:]
self._cursor_position -= 1
self._resetCursorBlink()
elif (
length < (self.MENU_CHAR_WIDTH - len(self.FILE_EXT) - 1)
and (
# numbers
('0' <= char <= '9')
# or letters
or (96 < event.key < 123)
)
):
self._save_name = self._save_name[:self._cursor_position] \
+ char \
+ self._save_name[self._cursor_position:]
self._cursor_position += 1
self._resetCursorBlink()
def _update(self, dt):
self._cursor_timer += dt
if self._cursor_timer >= constants.CURSOR_TIME:
self._cursor_switch = not self._cursor_switch
self._cursor_timer -= constants.CURSOR_TIME
def _drawScreen(self, screen):
super()._drawScreen(screen)
disp_text = self.SHARED_DISP_TEXT
if not isinstance(self._previous_mode, Saveable):
disp_text += "\nYou can't save now."
elif not self._save_success:
disp_text += "ENTER) Save\nType a file name:\n>"
if self._save_name:
disp_text += self._save_name
disp_text += self.FILE_EXT
if self._confirm_overwrite and self._save_success is None:
disp_text += "\nThis will overwrite an existing save file." \
+ "\nPress ENTER again to confirm, or ESC to go back."
elif self._save_success is False:
disp_text += "\nSave failed.\nPress ENTER to try again, or ESC to go back."
else:
disp_text += "\nSaved successfully.\nPress any key to go back."
self._drawText(screen, disp_text)
if self._cursor_switch and not self._confirm_overwrite and self._save_success is None:
screen.fill(
constants.WHITE,
(
((self._cursor_position + 1) * constants.FONT_SIZE, 4 * constants.FONT_HEIGHT),
(1, constants.FONT_HEIGHT)
)
)
class ModeGameMenuLoad(ModeGameMenu):
__slots__ = (
'_saves',
'_save_index',
'_loaded_save',
)
def __init__(self, previous_mode, old_screen=None):
super().__init__(previous_mode, old_screen)
self._saves = Save.getAllFromFiles()
self._save_index = 0
self._loaded_save = False
def _input(self, event):
if event.type == pygame.QUIT:
self.next_mode = ModeGameMenuTop(self._previous_mode, self._old_screen)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE or self._loaded_save:
self.next_mode = ModeGameMenuTop(self._previous_mode, self._old_screen)
elif len(self._saves) > 0:
if event.key in (pygame.K_UP, pygame.K_LEFT):
self._save_index = max(self._save_index - 1, 0)
pass
elif event.key in (pygame.K_DOWN, pygame.K_RIGHT):
self._save_index = min(self._save_index + 1, len(self._saves) - 1)
pass
elif event.key == pygame.K_RETURN:
self._stopMixer()
self._previous_mode = self._saves[self._save_index].load()
pygame.mixer.music.pause()
pygame.mixer.pause()
self._old_screen = self._getOldScreen()
self._loaded_save = True
pass
def _drawScreen(self, screen):
super()._drawScreen(screen)
disp_text = self.SHARED_DISP_TEXT
if len(self._saves) == 0:
disp_text += "\nThere are no save files to select from."
elif self._loaded_save:
disp_text += "\nLoaded successfully.\nPress any key to go back."
else:
disp_text += "ENTER) Load\nARROW KEYS) Select a file:"
for i in range(-1, 2):
disp_text += "\n"
this_index = self._save_index + i
if i == 0:
disp_text += ">"
else:
disp_text += "_"
if 0 <= this_index < len(self._saves):
disp_text += self._saves[this_index].file_name
self._drawText(screen, disp_text)
class ModeGameMenuOptions(ModeGameMenu):
def _input(self, event):
if event.type == pygame.QUIT:
self.next_mode = ModeGameMenuTop(self._previous_mode, self._old_screen)
elif event.type == pygame.KEYUP:
if event.key in (
pygame.K_DOWN, pygame.K_s,
pygame.K_LEFT, pygame.K_a,
pygame.K_PAGEDOWN, pygame.K_MINUS,
):
shared.display.changeScale(-1)
elif event.key in (
pygame.K_UP, pygame.K_w,
pygame.K_RIGHT, pygame.K_d,
pygame.K_PAGEUP, pygame.K_EQUALS,
):
shared.display.changeScale(1)
elif event.key in (pygame.K_f, pygame.K_F11,):
shared.display.toggleFullscreen()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.next_mode = ModeGameMenuTop(self._previous_mode, self._old_screen)
elif '1' <= event.unicode <= '9':
target_scale = int(event.unicode)
shared.display.setScale(target_scale)
def _drawScreen(self, screen):
super()._drawScreen(screen)
disp_text = self.SHARED_DISP_TEXT
disp_text += f"ARROWS) Upscaling: {shared.display.upscale}" \
f"\nF) Fullscreen: {self.getTickBox(shared.display.is_fullscreen)}"
self._drawText(screen, disp_text)
@staticmethod
def getTickBox(value: bool):
inside = "X" if value else "_"
return f"[{inside}]"
```
#### File: src/mode/modemonconvotest.py
```python
import shared
from monster import Monster
from .modefight import ModeFight
from .modeconvo import ModeConvo
class ModeMonConvoTest(ModeConvo):
def _handleButton(self, prev_convo_key: str, index: int):
if index == 1:
print("Really anything can happen here.")
self._stopMixer()
self.next_mode = ModeFight(
shared.state.protag_mon,
Monster.atLevel(0),
lambda: ModeMonConvoTest(),
)
return True
elif index == 3:
print("The other main thing would be to have pressing a button change the mode.\n"
+ "It could set variables and then change the mode.")
return False
```
#### File: src/mode/modeopening3.py
```python
import random
from collections import deque
import pygame
import constants
import shared
from monster import Monster
from .modeintroduction0 import ModeIntroduction0
from .modeopening import ModeOpening
class ModeOpening3(ModeOpening):
GROUND_LEVEL = constants.SCREEN_SIZE[1] - 8
CENTER_TIME = 2500
TRANSITION_TIME = 750
EMPTY_TIME = 250
FULL_MONSTER_WAIT_TIME = EMPTY_TIME + TRANSITION_TIME + CENTER_TIME + TRANSITION_TIME
__slots__ = (
'monsters',
'wait_time',
'last_level',
'background',
'initial_wait_time',
)
def __init__(self):
super().__init__()
# static elements setup
self.background = pygame.Surface(constants.SCREEN_SIZE).convert(shared.display.screen)
self.background.fill(constants.WHITE)
shared.font_wrap.renderToCentered(
self.background,
(constants.SCREEN_SIZE[0] // 2, constants.SCREEN_SIZE[1] // 2 + 4),
"press any key to start",
False,
constants.BLACK
)
logo = pygame.image.load(constants.CHIKKAI_LOGO).convert(shared.display.screen)
self.background.blit(
logo,
(
constants.SCREEN_SIZE[0] // 2 - logo.get_width() // 2,
constants.SCREEN_SIZE[1] // 4 - logo.get_height() // 2,
)
)
# monster loop setup
self.last_level = 3
self.monsters = deque((), 3)
monster = self._getMonster(0, 3)
# start the first one in the center
monster.rect.midbottom = (constants.SCREEN_SIZE[0] // 2, self.GROUND_LEVEL)
monster.anims.popleft()
monster.anims.popleft()
self.monsters.append(monster)
self.wait_time = self.CENTER_TIME + self.TRANSITION_TIME
self.monsters.append(self._getMonster(self.wait_time))
self.wait_time += self.FULL_MONSTER_WAIT_TIME
self.monsters.append(self._getMonster(self.wait_time))
self.wait_time += self.FULL_MONSTER_WAIT_TIME
self.initial_wait_time = self.wait_time
def _getMonster(self, wait_time, level=None):
# wait_time is how much time until the previous mon is off the screen
if level is None:
level = random.choice(
[i for i in range(1, 4) if i != self.last_level]
)
monster = Monster.atLevel(level)
self.last_level = level
self.all_sprites.add(monster)
monster.rect.midbottom = (
constants.SCREEN_SIZE[0] + monster.rect.width // 2,
self.GROUND_LEVEL
)
monster.addWait(wait_time + self.EMPTY_TIME)
monster.addPosAbs(
Monster.Lerp,
self.TRANSITION_TIME,
constants.SCREEN_SIZE[0] // 2,
self.GROUND_LEVEL - monster.rect.height // 2
)
monster.addWait(self.CENTER_TIME)
monster.addPosAbs(
Monster.Lerp,
self.TRANSITION_TIME,
monster.rect.width // -2,
self.GROUND_LEVEL - monster.rect.height // 2
)
return monster
def _switchMode(self):
self.next_mode = ModeIntroduction0()
def _update(self, dt):
self.wait_time -= dt
# every so often, set up additional looping monsters here, so we don't run out
if self.wait_time < self.initial_wait_time - self.FULL_MONSTER_WAIT_TIME:
monster = self._getMonster(self.wait_time)
self.monsters[0].kill()
self.monsters.append(monster)
self.wait_time += self.FULL_MONSTER_WAIT_TIME
def _drawScreen(self, screen):
screen.blit(self.background, (0, 0))
```
#### File: monsters/src/saveable.py
```python
import sys
import abc
import json
from collections import deque
class Saveable(abc.ABC):
@abc.abstractmethod
def save(self):
"""Return a serializable object representing all the information that should be saved from this object."""
raise NotImplementedError(
type(self).__name__ + ".saveMode(self)"
)
@classmethod
@abc.abstractmethod
def load(cls, save_data):
"""Take in an object (the result of a call to save()), and return an instance of this object."""
raise NotImplementedError(
cls.__name__ + ".loadMode(cls, saveData)"
)
KEY_COLLECTION = 'COLLECTION'
COLLECTION_DEQUE = 'DEQUE'
COLLECTION_SET = 'SET'
KEY_ELEMENTS = 'ELEMENTS'
KEY_MAXLEN = 'MAXLEN'
KEY_MODULE = 'MODULE'
KEY_CLASS = 'CLASS'
KEY_SAVEABLE = 'SAVEABLE'
class SaveableJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, deque):
return {
KEY_COLLECTION: COLLECTION_DEQUE,
KEY_ELEMENTS: list(o),
KEY_MAXLEN: o.maxlen,
}
elif isinstance(o, set):
return {
KEY_COLLECTION: COLLECTION_SET,
KEY_ELEMENTS: list(o),
}
elif isinstance(o, type):
return {
KEY_MODULE: o.__module__,
KEY_CLASS: o.__qualname__,
}
elif isinstance(o, Saveable):
return {
KEY_MODULE: type(o).__module__,
KEY_CLASS: type(o).__qualname__,
KEY_SAVEABLE: o.save(),
}
return super().default(o)
def _getClass(dct: dict):
attr = sys.modules[dct[KEY_MODULE]]
for name in dct[KEY_CLASS].split('.'):
attr = getattr(attr, name)
return attr
def decodeSaveable(dct: dict):
if KEY_COLLECTION in dct:
if dct[KEY_COLLECTION] == COLLECTION_DEQUE:
return deque(dct[KEY_ELEMENTS], dct[KEY_MAXLEN])
elif dct[KEY_COLLECTION] == COLLECTION_SET:
return set(dct[KEY_ELEMENTS])
elif {KEY_MODULE, KEY_CLASS} == dct.keys():
return _getClass(dct)
elif {KEY_MODULE, KEY_CLASS, KEY_SAVEABLE} == dct.keys():
saveable_class = _getClass(dct)
return saveable_class.load(dct[KEY_SAVEABLE])
return dct
```
#### File: monsters/src/save.py
```python
import os
import json
import constants
import shared
import saveable
import mode
from state import State
class Save(object):
__slots__ = (
'file_name',
'_mode_name',
'_mode_data',
'_shared_data',
)
def __init__(self, file_name: str, mode_name: str, mode_data, shared_data):
self.file_name = file_name
self._mode_name = mode_name
self._mode_data = mode_data
self._shared_data = shared_data
@staticmethod
def willOverwrite(file_name: str):
return os.path.exists(
os.path.join(constants.SAVE_DIRECTORY, file_name)
)
@staticmethod
def _getSaveFiles():
if not os.path.isdir(constants.SAVE_DIRECTORY):
return ()
return (
file_name
for file_name
in os.listdir(constants.SAVE_DIRECTORY)
if os.path.isfile(
os.path.join(constants.SAVE_DIRECTORY, file_name)
)
)
@classmethod
def getAllFromFiles(cls):
return tuple(
sorted(
(
save
for save
in (
cls.getFromFile(file)
for file
in cls._getSaveFiles()
)
if save
),
key=lambda s: (s.file_name.lower(), s.file_name)
)
)
@classmethod
def getFromFile(cls, file_name: str):
file_path = os.path.join(constants.SAVE_DIRECTORY, file_name)
try:
with open(file_path, 'r') as file:
save_object = json.load(file, object_hook=saveable.decodeSaveable)
return cls(file_name, save_object['mode_name'], save_object['mode_data'], save_object['shared_data'])
except (IOError, json.decoder.JSONDecodeError):
return False
@classmethod
def getFromMode(cls, file_name: str, from_mode: saveable.Saveable):
return cls(file_name, type(from_mode).__name__, from_mode.save(), shared.state.save())
def save(self):
try:
os.mkdir(constants.SAVE_DIRECTORY)
except FileExistsError:
pass
save_object = {
'mode_name': self._mode_name,
'mode_data': self._mode_data,
'shared_data': self._shared_data,
}
file_path = os.path.join(constants.SAVE_DIRECTORY, self.file_name)
try:
with open(file_path, 'w') as file:
json.dump(save_object, file, cls=saveable.SaveableJSONEncoder)
return True
except IOError:
return False
def load(self):
shared.state = State.load(self._shared_data)
mode_cls = getattr(mode, self._mode_name)
new_mode = mode_cls.load(self._mode_data)
return new_mode
``` |
{
"source": "jovial/netmiko",
"score": 2
} |
#### File: netmiko/juniper/juniper.py
```python
from __future__ import unicode_literals
import re
import time
from netmiko.base_connection import BaseConnection
from netmiko.scp_handler import BaseFileTransfer
from netmiko.py23_compat import text_type
class JuniperBase(BaseConnection):
"""
Implement methods for interacting with Juniper Networks devices.
Disables `enable()` and `check_enable_mode()`
methods. Overrides several methods for Juniper-specific compatibility.
"""
def session_preparation(self):
"""
Prepare the session after the connection has been established.
Disable paging (the '--more--' prompts).
Set the base prompt for interaction ('>').
"""
self._test_channel_read()
self.enter_cli_mode()
self.set_base_prompt()
self.disable_paging(command="set cli screen-length 0")
self.set_terminal_width(command='set cli screen-width 511')
# Clear the read buffer
time.sleep(.3 * self.global_delay_factor)
self.clear_buffer()
def _enter_shell(self):
"""Enter the Bourne Shell."""
return self.send_command('start shell sh', expect_string=r"[\$#]")
def _return_cli(self):
"""Return to the Juniper CLI."""
return self.send_command('exit', expect_string=r"[#>]")
def enter_cli_mode(self):
"""Check if at shell prompt root@ and go into CLI."""
delay_factor = self.select_delay_factor(delay_factor=0)
count = 0
cur_prompt = ''
while count < 50:
self.write_channel(self.RETURN)
time.sleep(.1 * delay_factor)
cur_prompt = self.read_channel()
if re.search(r'root@', cur_prompt) or re.search(r"^%$", cur_prompt.strip()):
self.write_channel("cli" + self.RETURN)
time.sleep(.3 * delay_factor)
self.clear_buffer()
break
elif '>' in cur_prompt or '#' in cur_prompt:
break
count += 1
def check_enable_mode(self, *args, **kwargs):
"""No enable mode on Juniper."""
pass
def enable(self, *args, **kwargs):
"""No enable mode on Juniper."""
pass
def exit_enable_mode(self, *args, **kwargs):
"""No enable mode on Juniper."""
pass
def check_config_mode(self, check_string=']'):
"""Checks if the device is in configuration mode or not."""
return super(JuniperBase, self).check_config_mode(check_string=check_string)
def config_mode(self, config_command='configure'):
"""Enter configuration mode."""
return super(JuniperBase, self).config_mode(config_command=config_command)
def exit_config_mode(self, exit_config='exit configuration-mode'):
"""Exit configuration mode."""
output = ""
if self.check_config_mode():
output = self.send_command_timing(exit_config, strip_prompt=False, strip_command=False)
if 'Exit with uncommitted changes?' in output:
output += self.send_command_timing('yes', strip_prompt=False, strip_command=False)
if self.check_config_mode():
raise ValueError("Failed to exit configuration mode")
return output
def commit(self, confirm=False, confirm_delay=None, check=False, comment='',
and_quit=False, delay_factor=1):
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
Automatically enters configuration mode
default:
command_string = commit
check and (confirm or confirm_dely or comment):
Exception
confirm_delay and no confirm:
Exception
confirm:
confirm_delay option
comment option
command_string = commit confirmed or commit confirmed <confirm_delay>
check:
command_string = commit check
"""
delay_factor = self.select_delay_factor(delay_factor)
if check and (confirm or confirm_delay or comment):
raise ValueError("Invalid arguments supplied with commit check")
if confirm_delay and not confirm:
raise ValueError("Invalid arguments supplied to commit method both confirm and check")
# Select proper command string based on arguments provided
command_string = 'commit'
commit_marker = 'commit complete'
if check:
command_string = 'commit check'
commit_marker = 'configuration check succeeds'
elif confirm:
if confirm_delay:
command_string = 'commit confirmed ' + text_type(confirm_delay)
else:
command_string = 'commit confirmed'
commit_marker = 'commit confirmed will be automatically rolled back in'
# wrap the comment in quotes
if comment:
if '"' in comment:
raise ValueError("Invalid comment contains double quote")
comment = '"{0}"'.format(comment)
command_string += ' comment ' + comment
if and_quit:
command_string += ' and-quit'
# Enter config mode (if necessary)
output = self.config_mode()
# and_quit will get out of config mode on commit
if and_quit:
prompt = self.base_prompt
output += self.send_command_expect(command_string, expect_string=prompt,
strip_prompt=False,
strip_command=False, delay_factor=delay_factor)
else:
output += self.send_command_expect(command_string, strip_prompt=False,
strip_command=False, delay_factor=delay_factor)
if commit_marker not in output:
raise ValueError("Commit failed with the following errors:\n\n{0}"
.format(output))
return output
def strip_prompt(self, *args, **kwargs):
"""Strip the trailing router prompt from the output."""
a_string = super(JuniperBase, self).strip_prompt(*args, **kwargs)
return self.strip_context_items(a_string)
def strip_context_items(self, a_string):
"""Strip Juniper-specific output.
Juniper will also put a configuration context:
[edit]
and various chassis contexts:
{master:0}, {backup:1}
This method removes those lines.
"""
strings_to_strip = [
r'\[edit.*\]',
r'\{master:.*\}',
r'\{backup:.*\}',
r'\{line.*\}',
r'\{primary.*\}',
r'\{secondary.*\}',
]
response_list = a_string.split(self.RESPONSE_RETURN)
last_line = response_list[-1]
for pattern in strings_to_strip:
if re.search(pattern, last_line):
return self.RESPONSE_RETURN.join(response_list[:-1])
return a_string
class JuniperSSH(JuniperBase):
pass
class JuniperTelnet(JuniperBase):
def __init__(self, *args, **kwargs):
default_enter = kwargs.get('default_enter')
kwargs['default_enter'] = '\r\n' if default_enter is None else default_enter
super(JuniperTelnet, self).__init__(*args, **kwargs)
class JuniperFileTransfer(BaseFileTransfer):
"""Juniper SCP File Transfer driver."""
def __init__(self, ssh_conn, source_file, dest_file, file_system="/var/tmp", direction='put'):
return super(JuniperFileTransfer, self).__init__(ssh_conn=ssh_conn,
source_file=source_file,
dest_file=dest_file,
file_system=file_system,
direction=direction)
def remote_space_available(self, search_pattern=""):
"""Return space available on remote device."""
return self._remote_space_available_unix(search_pattern=search_pattern)
def check_file_exists(self, remote_cmd=""):
"""Check if the dest_file already exists on the file system (return boolean)."""
return self._check_file_exists_unix(remote_cmd=remote_cmd)
def remote_file_size(self, remote_cmd="", remote_file=None):
"""Get the file size of the remote file."""
return self._remote_file_size_unix(remote_cmd=remote_cmd, remote_file=remote_file)
def remote_md5(self, base_cmd='file checksum md5', remote_file=None):
return super(JuniperFileTransfer, self).remote_md5(base_cmd=base_cmd,
remote_file=remote_file)
def enable_scp(self, cmd=None):
raise NotImplementedError
def disable_scp(self, cmd=None):
raise NotImplementedError
```
#### File: netmiko/tests/test_netmiko_sesssion_log.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import time
import hashlib
import io
from netmiko import ConnectHandler
def calc_md5(file_name=None, contents=None):
"""Compute MD5 hash of file."""
if contents is not None:
pass
elif file_name:
with open(file_name, "rb") as f:
contents = f.read()
else:
raise ValueError("Most specify either file_name or contents")
return hashlib.md5(contents).hexdigest()
def read_session_log(session_file, append=False):
"""Leading white-space can vary. Strip off leading white-space."""
with open(session_file, "rb") as f:
if append is True:
line = f.readline().decode()
assert 'Initial file contents' in line
log_content = f.read().lstrip()
return log_content
def session_action(my_connect, command):
"""Common actions in the netmiko session to generate the session log."""
time.sleep(1)
my_connect.clear_buffer()
output = my_connect.send_command(command)
my_connect.disconnect()
return output
def session_log_md5(session_file, compare_file):
"""Compare the session_log MD5 to the compare_file MD5"""
compare_log_md5 = calc_md5(file_name=compare_file)
log_content = read_session_log(session_file)
session_log_md5 = calc_md5(contents=log_content)
assert session_log_md5 == compare_log_md5
def session_log_md5_append(session_file, compare_file):
"""Compare the session_log MD5 to the compare_file MD5"""
compare_log_md5 = calc_md5(file_name=compare_file)
log_content = read_session_log(session_file, append=True)
session_log_md5 = calc_md5(contents=log_content)
assert session_log_md5 == compare_log_md5
def test_session_log(net_connect, commands, expected_responses):
"""Verify session_log matches expected content."""
command = commands["basic"]
session_action(net_connect, command)
compare_file = expected_responses['compare_log']
session_file = expected_responses['session_log']
session_log_md5(session_file, compare_file)
def test_session_log_write(net_connect_slog_wr, commands, expected_responses):
"""Verify session_log matches expected content, but when channel writes are also logged."""
command = commands["basic"]
session_action(net_connect_slog_wr, command)
compare_file = expected_responses['compare_log_wr']
session_file = expected_responses['session_log']
session_log_md5(session_file, compare_file)
def test_session_log_append(device_slog, commands, expected_responses):
"""Verify session_log matches expected content, but when channel writes are also logged."""
session_file = expected_responses['session_log_append']
# Create a starting file
with open(session_file, "wb") as f:
f.write(b"Initial file contents\n\n")
# The netmiko connection has not been established yet.
device_slog['session_log'] = session_file
conn = ConnectHandler(**device_slog)
command = commands["basic"]
session_action(conn, command)
compare_file = expected_responses['compare_log_append']
session_log_md5_append(session_file, compare_file)
def test_session_log_bytesio(device_slog, commands, expected_responses):
"""Verify session_log matches expected content, but when channel writes are also logged."""
s_log = io.BytesIO()
# The netmiko connection has not been established yet.
device_slog['session_log'] = s_log
device_slog['session_log_file_mode'] = 'write'
conn = ConnectHandler(**device_slog)
command = commands["basic"]
session_action(conn, command)
compare_file = expected_responses['compare_log']
compare_log_md5 = calc_md5(file_name=compare_file)
log_content = s_log.getvalue()
session_log_md5 = calc_md5(contents=log_content)
assert session_log_md5 == compare_log_md5
``` |
{
"source": "jovial/nova",
"score": 2
} |
#### File: placement/handlers/resource_provider.py
```python
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import webob
from nova.api.openstack.placement import errors
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.policies import resource_provider as policies
from nova.api.openstack.placement.schemas import resource_provider as rp_schema
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova.i18n import _
def _serialize_links(environ, resource_provider):
url = util.resource_provider_url(environ, resource_provider)
links = [{'rel': 'self', 'href': url}]
rel_types = ['inventories', 'usages']
want_version = environ[microversion.MICROVERSION_ENVIRON]
if want_version >= (1, 1):
rel_types.append('aggregates')
if want_version >= (1, 6):
rel_types.append('traits')
if want_version >= (1, 11):
rel_types.append('allocations')
for rel in rel_types:
links.append({'rel': rel, 'href': '%s/%s' % (url, rel)})
return links
def _serialize_provider(environ, resource_provider, want_version):
data = {
'uuid': resource_provider.uuid,
'name': resource_provider.name,
'generation': resource_provider.generation,
'links': _serialize_links(environ, resource_provider)
}
if want_version.matches((1, 14)):
data['parent_provider_uuid'] = resource_provider.parent_provider_uuid
data['root_provider_uuid'] = resource_provider.root_provider_uuid
return data
def _serialize_providers(environ, resource_providers, want_version):
output = []
last_modified = None
get_last_modified = want_version.matches((1, 15))
for provider in resource_providers:
if get_last_modified:
last_modified = util.pick_last_modified(last_modified, provider)
provider_data = _serialize_provider(environ, provider, want_version)
output.append(provider_data)
last_modified = last_modified or timeutils.utcnow(with_timezone=True)
return ({"resource_providers": output}, last_modified)
@wsgi_wrapper.PlacementWsgify
@util.require_content('application/json')
def create_resource_provider(req):
"""POST to create a resource provider.
On success return a 201 response with an empty body and a location
header pointing to the newly created resource provider.
"""
context = req.environ['placement.context']
context.can(policies.CREATE)
schema = rp_schema.POST_RESOURCE_PROVIDER_SCHEMA
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
if want_version.matches((1, 14)):
schema = rp_schema.POST_RP_SCHEMA_V1_14
data = util.extract_json(req.body, schema)
try:
uuid = data.setdefault('uuid', uuidutils.generate_uuid())
resource_provider = rp_obj.ResourceProvider(context, **data)
resource_provider.create()
except db_exc.DBDuplicateEntry as exc:
# Whether exc.columns has one or two entries (in the event
# of both fields being duplicates) appears to be database
# dependent, so going with the complete solution here.
duplicate = ', '.join(['%s: %s' % (column, data[column])
for column in exc.columns])
raise webob.exc.HTTPConflict(
_('Conflicting resource provider %(duplicate)s already exists.') %
{'duplicate': duplicate},
comment=errors.DUPLICATE_NAME)
except exception.ObjectActionError as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to create resource provider "%(name)s", %(rp_uuid)s: '
'%(error)s') %
{'name': data['name'], 'rp_uuid': uuid, 'error': exc})
req.response.location = util.resource_provider_url(
req.environ, resource_provider)
if want_version.matches(min_version=(1, 20)):
req.response.body = encodeutils.to_utf8(jsonutils.dumps(
_serialize_provider(req.environ, resource_provider, want_version)))
req.response.content_type = 'application/json'
modified = util.pick_last_modified(None, resource_provider)
req.response.last_modified = modified
req.response.cache_control = 'no-cache'
else:
req.response.status = 201
req.response.content_type = None
return req.response
@wsgi_wrapper.PlacementWsgify
def delete_resource_provider(req):
"""DELETE to destroy a single resource provider.
On success return a 204 and an empty body.
"""
uuid = util.wsgi_path_item(req.environ, 'uuid')
context = req.environ['placement.context']
context.can(policies.DELETE)
# The containing application will catch a not found here.
try:
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
resource_provider.destroy()
except exception.ResourceProviderInUse as exc:
raise webob.exc.HTTPConflict(
_('Unable to delete resource provider %(rp_uuid)s: %(error)s') %
{'rp_uuid': uuid, 'error': exc},
comment=errors.PROVIDER_IN_USE)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("No resource provider with uuid %s found for delete") % uuid)
except exception.CannotDeleteParentResourceProvider as exc:
raise webob.exc.HTTPConflict(
_("Unable to delete parent resource provider %(rp_uuid)s: "
"It has child resource providers.") % {'rp_uuid': uuid},
comment=errors.PROVIDER_CANNOT_DELETE_PARENT)
req.response.status = 204
req.response.content_type = None
return req.response
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
def get_resource_provider(req):
"""Get a single resource provider.
On success return a 200 with an application/json body representing
the resource provider.
"""
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
uuid = util.wsgi_path_item(req.environ, 'uuid')
context = req.environ['placement.context']
context.can(policies.SHOW)
# The containing application will catch a not found here.
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
response = req.response
response.body = encodeutils.to_utf8(jsonutils.dumps(
_serialize_provider(req.environ, resource_provider, want_version)))
response.content_type = 'application/json'
if want_version.matches((1, 15)):
modified = util.pick_last_modified(None, resource_provider)
response.last_modified = modified
response.cache_control = 'no-cache'
return response
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
def list_resource_providers(req):
"""GET a list of resource providers.
On success return a 200 and an application/json body representing
a collection of resource providers.
"""
context = req.environ['placement.context']
context.can(policies.LIST)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
schema = rp_schema.GET_RPS_SCHEMA_1_0
if want_version.matches((1, 18)):
schema = rp_schema.GET_RPS_SCHEMA_1_18
elif want_version.matches((1, 14)):
schema = rp_schema.GET_RPS_SCHEMA_1_14
elif want_version.matches((1, 4)):
schema = rp_schema.GET_RPS_SCHEMA_1_4
elif want_version.matches((1, 3)):
schema = rp_schema.GET_RPS_SCHEMA_1_3
allow_forbidden = want_version.matches((1, 22))
util.validate_query_params(req, schema)
filters = {}
# special handling of member_of qparam since we allow multiple member_of
# params at microversion 1.24.
if 'member_of' in req.GET:
filters['member_of'] = util.normalize_member_of_qs_params(req)
qpkeys = ('uuid', 'name', 'in_tree', 'resources', 'required')
for attr in qpkeys:
if attr in req.GET:
value = req.GET[attr]
if attr == 'resources':
value = util.normalize_resources_qs_param(value)
elif attr == 'required':
value = util.normalize_traits_qs_param(
value, allow_forbidden=allow_forbidden)
filters[attr] = value
try:
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
context, filters)
except exception.ResourceClassNotFound as exc:
raise webob.exc.HTTPBadRequest(
_('Invalid resource class in resources parameter: %(error)s') %
{'error': exc})
except exception.TraitNotFound as exc:
raise webob.exc.HTTPBadRequest(
_('Invalid trait(s) in "required" parameter: %(error)s') %
{'error': exc})
response = req.response
output, last_modified = _serialize_providers(
req.environ, resource_providers, want_version)
response.body = encodeutils.to_utf8(jsonutils.dumps(output))
response.content_type = 'application/json'
if want_version.matches((1, 15)):
response.last_modified = last_modified
response.cache_control = 'no-cache'
return response
@wsgi_wrapper.PlacementWsgify
@util.require_content('application/json')
def update_resource_provider(req):
"""PUT to update a single resource provider.
On success return a 200 response with a representation of the updated
resource provider.
"""
uuid = util.wsgi_path_item(req.environ, 'uuid')
context = req.environ['placement.context']
context.can(policies.UPDATE)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
# The containing application will catch a not found here.
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
schema = rp_schema.PUT_RESOURCE_PROVIDER_SCHEMA
if want_version.matches((1, 14)):
schema = rp_schema.PUT_RP_SCHEMA_V1_14
data = util.extract_json(req.body, schema)
for field in rp_obj.ResourceProvider.SETTABLE_FIELDS:
if field in data:
setattr(resource_provider, field, data[field])
try:
resource_provider.save()
except db_exc.DBDuplicateEntry as exc:
raise webob.exc.HTTPConflict(
_('Conflicting resource provider %(name)s already exists.') %
{'name': data['name']},
comment=errors.DUPLICATE_NAME)
except exception.ObjectActionError as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to save resource provider %(rp_uuid)s: %(error)s') %
{'rp_uuid': uuid, 'error': exc})
response = req.response
response.status = 200
response.body = encodeutils.to_utf8(jsonutils.dumps(
_serialize_provider(req.environ, resource_provider, want_version)))
response.content_type = 'application/json'
if want_version.matches((1, 15)):
response.last_modified = resource_provider.updated_at
response.cache_control = 'no-cache'
return response
```
#### File: openstack/placement/test_direct.py
```python
from oslo_config import cfg
from nova.api.openstack.placement import direct
from nova.api.openstack.placement.objects import resource_provider
from nova import context
from nova import test
from nova.tests import fixtures
from nova.tests import uuidsentinel
CONF = cfg.CONF
# FIXME(cdent): some dupes with db/test_base.py
class TestDirect(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(TestDirect, self).setUp()
self.api_db = self.useFixture(fixtures.Database(database='placement'))
self._reset_traits_synced()
self.context = context.get_admin_context()
self.addCleanup(self._reset_traits_synced)
@staticmethod
def _reset_traits_synced():
"""Reset the _TRAITS_SYNCED boolean to base state."""
resource_provider._TRAITS_SYNCED = False
def test_direct_is_there(self):
with direct.PlacementDirect(CONF) as client:
resp = client.get('/')
self.assertTrue(resp)
data = resp.json()
self.assertEqual('v1.0', data['versions'][0]['id'])
def test_get_resource_providers(self):
with direct.PlacementDirect(CONF) as client:
resp = client.get('/resource_providers')
self.assertTrue(resp)
data = resp.json()
self.assertEqual([], data['resource_providers'])
def test_create_resource_provider(self):
data = {'name': 'fake'}
with direct.PlacementDirect(CONF) as client:
resp = client.post('/resource_providers', json=data)
self.assertTrue(resp)
resp = client.get('/resource_providers')
self.assertTrue(resp)
data = resp.json()
self.assertEqual(1, len(data['resource_providers']))
def test_json_validation_happens(self):
data = {'name': 'fake', 'cowsay': 'moo'}
with direct.PlacementDirect(CONF) as client:
resp = client.post('/resource_providers', json=data)
self.assertFalse(resp)
self.assertEqual(400, resp.status_code)
def test_microversion_handling(self):
with direct.PlacementDirect(CONF) as client:
# create parent
parent_data = {'name': uuidsentinel.p_rp,
'uuid': uuidsentinel.p_rp}
resp = client.post('/resource_providers', json=parent_data)
self.assertTrue(resp, resp.text)
# attempt to create child
data = {'name': 'child', 'parent_provider_uuid': uuidsentinel.p_rp}
# no microversion, 400
resp = client.post('/resource_providers', json=data)
self.assertFalse(resp)
self.assertEqual(400, resp.status_code)
# low microversion, 400
resp = client.post('/resource_providers', json=data,
microversion='1.13')
self.assertFalse(resp)
self.assertEqual(400, resp.status_code)
resp = client.post('/resource_providers', json=data,
microversion='1.14')
self.assertTrue(resp, resp.text)
``` |
{
"source": "jovial/PerfKitBenchmarker",
"score": 2
} |
#### File: PerfKitBenchmarker/perfkitbenchmarker/messaging_service.py
```python
import abc
import os
from typing import Any, Dict
from perfkitbenchmarker import resource
MESSAGING_SERVICE_DATA_DIR = 'messaging_service'
MESSAGING_SERVICE_CLIENT_PY = 'messaging_service_client.py'
def GetMessagingServiceClass(cloud, delivery):
"""Gets the underlying Messaging Service class."""
return resource.GetResourceClass(BaseMessagingService, CLOUD=cloud,
DELIVERY=delivery)
class BaseMessagingService(resource.BaseResource):
"""Common interface of a messaging service resource.
Attributes:
client: The client virtual machine that runs the benchmark.
"""
REQUIRED_ATTRS = ['CLOUD', 'DELIVERY']
RESOURCE_TYPE = 'BaseMessagingService'
# TODO(odiego): Move DELIVERY down to child classes when adding more options
DELIVERY = 'pull'
@classmethod
def FromSpec(cls, messaging_service_spec):
return cls()
def setVms(self, vm_groups):
self.client_vm = vm_groups['clients' if 'clients' in
vm_groups else 'default'][0]
def PrepareClientVm(self):
self._InstallCommonClientPackages()
self._InstallCloudClients()
def _InstallCommonClientPackages(self):
"""Installs common software for running benchmarks on the client VM."""
# Install commom packages
self.client_vm.Install('python3')
self.client_vm.Install('pip3')
self.client_vm.RemoteCommand('sudo pip3 install absl-py numpy')
# Upload Common Client Interface
self.client_vm.PushDataFile(
os.path.join(MESSAGING_SERVICE_DATA_DIR, MESSAGING_SERVICE_CLIENT_PY))
@abc.abstractmethod
def _InstallCloudClients(self):
"""Installs software for running benchmarks on the client VM.
This method should be overriden by subclasses to install software specific
to the flavor of MessagingService they provide.
"""
raise NotImplementedError
@abc.abstractmethod
def Run(self, benchmark_scenario: str, number_of_messages: str,
message_size: str) -> Dict[str, Any]:
"""Runs remote commands on client VM - benchmark's run phase.
Runs a benchmark that consists of first publishing messages and then
pulling messages from messaging service, based on the configuration
specified through the FLAGS: benchmark_scenario, number_of_messages, and
message_size. Specific implementations should override this method.
Different providers needs different info to run the benchmark - for GCP we
need 'topic_name' and 'subscription_name', while for AWS 'queue_name'
suffices.
Args:
benchmark_scenario: Specifies which benchmark scenario to run.
number_of_messages: Number of messages to use on the benchmark.
message_size: Size of the messages that will be used on the benchmark. It
specifies the number of characters in those messages.
Returns:
Dictionary with metric_name (mean_latency, p50_latency...) as key and the
results from the benchmark as the value:
results = {
'mean_latency': 0.3423443...
...
}
"""
raise NotImplementedError
```
#### File: providers/aws/aws_container_service.py
```python
import json
import os
import uuid
from absl import flags
from perfkitbenchmarker import container_service
from perfkitbenchmarker import context
from perfkitbenchmarker import errors
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import aws_load_balancer
from perfkitbenchmarker.providers.aws import aws_logs
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import s3
from perfkitbenchmarker.providers.aws import util
import requests
import six
import yaml
FLAGS = flags.FLAGS
_ECS_NOT_READY = frozenset(['PROVISIONING', 'PENDING'])
class EcrRepository(resource.BaseResource):
"""Class representing an Elastic Container Registry image repository."""
def __init__(self, name, region):
super(EcrRepository, self).__init__()
self.name = name
self.region = region
def _Create(self):
"""Creates the image repository."""
if self._Exists():
self.user_managed = True
return
create_cmd = util.AWS_PREFIX + [
'ecr', 'create-repository', '--region', self.region,
'--repository-name', self.name
]
_, stderr, retcode = vm_util.IssueCommand(
create_cmd, raise_on_failure=False)
if retcode:
if 'InsufficientInstanceCapacity' in stderr:
raise errors.Benchmarks.InsufficientCapacityCloudFailure(stderr)
if 'InstanceLimitExceeded' in stderr or 'VpcLimitExceeded' in stderr:
raise errors.Benchmarks.QuotaFailure(stderr)
raise errors.Resource.CreationError(
'Failed to create EKS Cluster: {} return code: {}'.format(
retcode, stderr))
def _Exists(self):
"""Returns True if the repository exists."""
describe_cmd = util.AWS_PREFIX + [
'ecr', 'describe-repositories', '--region', self.region,
'--repository-names', self.name
]
stdout, _, _ = vm_util.IssueCommand(
describe_cmd, suppress_warning=True, raise_on_failure=False)
if not stdout or not json.loads(stdout)['repositories']:
return False
return True
def _Delete(self):
"""Deletes the repository."""
delete_cmd = util.AWS_PREFIX + [
'ecr', 'delete-repository', '--region', self.region,
'--repository-name', self.name, '--force'
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
class ElasticContainerRegistry(container_service.BaseContainerRegistry):
"""Class for building and storing container images on AWS."""
CLOUD = aws.CLOUD
def __init__(self, registry_spec):
super(ElasticContainerRegistry, self).__init__(registry_spec)
self.account = self.project or util.GetAccount()
self.region = util.GetRegionFromZone(self.zone.split(',')[0])
self.repositories = []
def _Delete(self):
"""Deletes the repositories."""
for repository in self.repositories:
repository.Delete()
def Push(self, image):
"""Push a locally built image to the registry."""
repository_name = '{namespace}/{name}'.format(
namespace=self.name, name=image.name)
repository = EcrRepository(repository_name, self.region)
self.repositories.append(repository)
repository.Create()
super(ElasticContainerRegistry, self).Push(image)
def GetFullRegistryTag(self, image):
"""Gets the full tag of the image."""
tag = '{account}.dkr.ecr.{region}.amazonaws.com/{namespace}/{name}'.format(
account=self.account,
region=self.region,
namespace=self.name,
name=image)
return tag
def Login(self):
"""Logs in to the registry."""
get_login_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecr', 'get-login', '--no-include-email'
]
stdout, _, _ = vm_util.IssueCommand(get_login_cmd)
login_cmd = stdout.split()
vm_util.IssueCommand(login_cmd)
def RemoteBuild(self, image):
"""Build the image remotely."""
# TODO(ehankland) use AWS codebuild to build the image.
raise NotImplementedError()
class TaskDefinition(resource.BaseResource):
"""Class representing an AWS task definition."""
def __init__(self, name, container_spec, cluster):
super(TaskDefinition, self).__init__()
self.name = name
self.cpus = container_spec.cpus
self.memory = container_spec.memory
self.image = container_spec.image
self.container_port = container_spec.container_port
self.region = cluster.region
self.arn = None
self.log_group = aws_logs.LogGroup(self.region, 'pkb')
def _CreateDependencies(self):
"""Create the log group if it doesn't exist."""
if not self.log_group.Exists():
self.log_group.Create()
def _Create(self):
"""Create the task definition."""
register_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'register-task-definition', '--family',
self.name, '--execution-role-arn', 'ecsTaskExecutionRole',
'--network-mode', 'awsvpc', '--requires-compatibilities=FARGATE',
'--cpu',
str(int(1024 * self.cpus)), '--memory',
str(self.memory), '--container-definitions',
self._GetContainerDefinitions()
]
stdout, _, _ = vm_util.IssueCommand(register_cmd)
response = json.loads(stdout)
self.arn = response['taskDefinition']['taskDefinitionArn']
def _Delete(self):
"""Deregister the task definition."""
if self.arn is None:
return
deregister_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'deregister-task-definition',
'--task-definition', self.arn
]
vm_util.IssueCommand(deregister_cmd)
def _GetContainerDefinitions(self):
"""Returns a JSON representation of the container definitions."""
definitions = [{
'name': self.name,
'image': self.image,
'essential': True,
'portMappings': [{
'containerPort': self.container_port,
'protocol': 'TCP'
}],
'logConfiguration': {
'logDriver': 'awslogs',
'options': {
'awslogs-group': 'pkb',
'awslogs-region': self.region,
'awslogs-stream-prefix': 'pkb'
}
}
}]
return json.dumps(definitions)
class EcsTask(container_service.BaseContainer):
"""Class representing an ECS/Fargate task."""
def __init__(self, name, container_spec, cluster):
super(EcsTask, self).__init__(container_spec)
self.name = name
self.task_def = cluster.task_defs[name]
self.arn = None
self.region = cluster.region
self.cluster_name = cluster.name
self.subnet_id = cluster.network.subnet.id
self.ip_address = None
self.security_group_id = (
cluster.network.regional_network.vpc.default_security_group_id)
def _GetNetworkConfig(self):
network_config = {
'awsvpcConfiguration': {
'subnets': [self.subnet_id],
'securityGroups': [self.security_group_id],
'assignPublicIp': 'ENABLED',
}
}
return json.dumps(network_config)
def _GetOverrides(self):
"""Returns a JSON representaion of task overrides.
While the container level resources can be overridden, they have no
effect on task level resources for Fargate tasks. This means
that modifying a container spec will only affect the command of any
new containers launched from it and not cpu/memory.
"""
overrides = {
'containerOverrides': [{
'name': self.name,
}]
}
if self.command:
overrides['containerOverrides'][0]['command'] = self.command
return json.dumps(overrides)
def _Create(self):
"""Creates the task."""
run_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'run-task', '--cluster',
self.cluster_name, '--task-definition', self.task_def.arn,
'--launch-type', 'FARGATE', '--network-configuration',
self._GetNetworkConfig(), '--overrides',
self._GetOverrides()
]
stdout, _, _ = vm_util.IssueCommand(run_cmd)
response = json.loads(stdout)
self.arn = response['tasks'][0]['taskArn']
def _PostCreate(self):
"""Gets the tasks IP address."""
container = self._GetTask()['containers'][0]
self.ip_address = container['networkInterfaces'][0]['privateIpv4Address']
def _DeleteDependencies(self):
"""Delete the task def."""
self.task_def.Delete()
def _Delete(self):
"""Deletes the task."""
if self.arn is None:
return
stop_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'stop-task', '--cluster',
self.cluster_name, '--task', self.arn
]
vm_util.IssueCommand(stop_cmd)
def _GetTask(self):
"""Returns a dictionary representation of the task."""
describe_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'describe-tasks', '--cluster',
self.cluster_name, '--tasks', self.arn
]
stdout, _, _ = vm_util.IssueCommand(describe_cmd)
response = json.loads(stdout)
return response['tasks'][0]
def _IsReady(self):
"""Returns true if the task has stopped pending."""
return self._GetTask()['lastStatus'] not in _ECS_NOT_READY
def WaitForExit(self, timeout=None):
"""Waits until the task has finished running."""
@vm_util.Retry(
timeout=timeout,
retryable_exceptions=(container_service.RetriableContainerException,))
def _WaitForExit():
task = self._GetTask()
if task['lastStatus'] != 'STOPPED':
raise container_service.RetriableContainerException(
'Task is not STOPPED.')
return task
return _WaitForExit()
def GetLogs(self):
"""Returns the logs from the container."""
task_id = self.arn.split('/')[-1]
log_stream = 'pkb/{name}/{task_id}'.format(name=self.name, task_id=task_id)
return six.text_type(
aws_logs.GetLogStreamAsString(self.region, log_stream, 'pkb'))
class EcsService(container_service.BaseContainerService):
"""Class representing an ECS/Fargate service."""
def __init__(self, name, container_spec, cluster):
super(EcsService, self).__init__(container_spec)
self.client_token = str(uuid.uuid4())[:32]
self.name = name
self.task_def = cluster.task_defs[name]
self.arn = None
self.region = cluster.region
self.cluster_name = cluster.name
self.subnet_id = cluster.network.subnet.id
self.security_group_id = (
cluster.network.regional_network.vpc.default_security_group_id)
self.load_balancer = aws_load_balancer.LoadBalancer(
[cluster.network.subnet])
self.target_group = aws_load_balancer.TargetGroup(
cluster.network.regional_network.vpc, self.container_port)
self.port = 80
def _CreateDependencies(self):
"""Creates the load balancer for the service."""
self.load_balancer.Create()
self.target_group.Create()
listener = aws_load_balancer.Listener(self.load_balancer, self.target_group,
self.port)
listener.Create()
self.ip_address = self.load_balancer.dns_name
def _DeleteDependencies(self):
"""Deletes the service's load balancer."""
self.task_def.Delete()
self.load_balancer.Delete()
self.target_group.Delete()
# TODO(ferneyhough): Consider supporting the flag container_cluster_version.
def _Create(self):
"""Creates the service."""
create_cmd = util.AWS_PREFIX + [
'--region',
self.region,
'ecs',
'create-service',
'--desired-count',
'1',
'--client-token',
self.client_token,
'--cluster',
self.cluster_name,
'--service-name',
self.name,
'--task-definition',
self.task_def.arn,
'--launch-type',
'FARGATE',
'--network-configuration',
self._GetNetworkConfig(),
'--load-balancers',
self._GetLoadBalancerConfig(),
]
vm_util.IssueCommand(create_cmd)
def _Delete(self):
"""Deletes the service."""
update_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'update-service', '--cluster',
self.cluster_name, '--service', self.name, '--desired-count', '0'
]
vm_util.IssueCommand(update_cmd)
delete_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'delete-service', '--cluster',
self.cluster_name, '--service', self.name
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def _GetNetworkConfig(self):
network_config = {
'awsvpcConfiguration': {
'subnets': [self.subnet_id],
'securityGroups': [self.security_group_id],
'assignPublicIp': 'ENABLED',
}
}
return json.dumps(network_config)
def _GetLoadBalancerConfig(self):
"""Returns the JSON representation of the service load balancers."""
load_balancer_config = [{
'targetGroupArn': self.target_group.arn,
'containerName': self.name,
'containerPort': self.container_port,
}]
return json.dumps(load_balancer_config)
def _IsReady(self):
"""Returns True if the Service is ready."""
url = 'http://%s' % self.ip_address
try:
r = requests.get(url)
except requests.ConnectionError:
return False
if r.status_code == 200:
return True
return False
class FargateCluster(container_service.BaseContainerCluster):
"""Class representing an AWS Fargate cluster."""
CLOUD = aws.CLOUD
CLUSTER_TYPE = 'Fargate'
def __init__(self, cluster_spec):
super(FargateCluster, self).__init__(cluster_spec)
self.region = util.GetRegionFromZone(self.zone)
self.network = aws_network.AwsNetwork.GetNetwork(self)
self.firewall = aws_network.AwsFirewall.GetFirewall()
self.name = 'pkb-%s' % FLAGS.run_uri
self.task_defs = {}
self.arn = None
def _Create(self):
"""Creates the cluster."""
create_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'create-cluster', '--cluster-name',
self.name
]
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.arn = response['cluster']['clusterArn']
def _Exists(self):
"""Returns True if the cluster exists."""
if not self.arn:
return False
describe_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'describe-clusters', '--clusters',
self.arn
]
stdout, _, _ = vm_util.IssueCommand(describe_cmd)
response = json.loads(stdout)
clusters = response['clusters']
if not clusters or clusters[0]['status'] == 'INACTIVE':
return False
return True
def _Delete(self):
"""Deletes the cluster."""
delete_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'delete-cluster', '--cluster', self.name
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def DeployContainer(self, name, container_spec):
"""Deploys the container according to the spec."""
if name not in self.task_defs:
task_def = TaskDefinition(name, container_spec, self)
self.task_defs[name] = task_def
task_def.Create()
task = EcsTask(name, container_spec, self)
self.containers[name].append(task)
task.Create()
def DeployContainerService(self, name, container_spec):
"""Deploys the container service according to the spec."""
if name not in self.task_defs:
task_def = TaskDefinition(name, container_spec, self)
self.task_defs[name] = task_def
task_def.Create()
service = EcsService(name, container_spec, self)
self.services[name] = service
self.firewall.AllowPortInSecurityGroup(service.region,
service.security_group_id,
service.container_port)
service.Create()
class AwsKopsCluster(container_service.KubernetesCluster):
"""Class representing a kops based Kubernetes cluster."""
CLOUD = aws.CLOUD
CLUSTER_TYPE = 'kops'
def __init__(self, spec):
super(AwsKopsCluster, self).__init__(spec)
self.name += '.k8s.local'
self.config_bucket = 'kops-%s-%s' % (FLAGS.run_uri, str(uuid.uuid4()))
self.region = util.GetRegionFromZone(self.zone)
self.s3_service = s3.S3Service()
self.s3_service.PrepareService(self.region)
def _CreateDependencies(self):
"""Create the bucket to store cluster config."""
self.s3_service.MakeBucket(self.config_bucket)
def _DeleteDependencies(self):
"""Delete the bucket that stores cluster config."""
self.s3_service.DeleteBucket(self.config_bucket)
def _Create(self):
"""Creates the cluster."""
# Create the cluster spec but don't provision any resources.
create_cmd = [
FLAGS.kops, 'create', 'cluster',
'--name=%s' % self.name,
'--zones=%s' % self.zone,
'--node-count=%s' % self.num_nodes,
'--node-size=%s' % self.machine_type
]
env = os.environ.copy()
env['KUBECONFIG'] = FLAGS.kubeconfig
env['KOPS_STATE_STORE'] = 's3://%s' % self.config_bucket
vm_util.IssueCommand(create_cmd, env=env)
# Download the cluster spec and modify it.
get_cmd = [FLAGS.kops, 'get', 'cluster', self.name, '--output=yaml']
stdout, _, _ = vm_util.IssueCommand(get_cmd, env=env)
spec = yaml.safe_load(stdout)
spec['metadata']['creationTimestamp'] = None
spec['spec']['api']['loadBalancer']['idleTimeoutSeconds'] = 3600
benchmark_spec = context.GetThreadBenchmarkSpec()
spec['spec']['cloudLabels'] = {
'owner': FLAGS.owner,
'perfkitbenchmarker-run': FLAGS.run_uri,
'benchmark': benchmark_spec.name,
'perfkit_uuid': benchmark_spec.uuid,
'benchmark_uid': benchmark_spec.uid
}
# Replace the cluster spec.
with vm_util.NamedTemporaryFile() as tf:
yaml.dump(spec, tf)
tf.close()
replace_cmd = [FLAGS.kops, 'replace', '--filename=%s' % tf.name]
vm_util.IssueCommand(replace_cmd, env=env)
# Create the actual cluster.
update_cmd = [FLAGS.kops, 'update', 'cluster', self.name, '--yes']
vm_util.IssueCommand(update_cmd, env=env)
def _Delete(self):
"""Deletes the cluster."""
delete_cmd = [
FLAGS.kops, 'delete', 'cluster',
'--name=%s' % self.name,
'--state=s3://%s' % self.config_bucket, '--yes'
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def _IsReady(self):
"""Returns True if the cluster is ready, else False."""
validate_cmd = [
FLAGS.kops, 'validate', 'cluster',
'--name=%s' % self.name,
'--state=s3://%s' % self.config_bucket
]
env = os.environ.copy()
env['KUBECONFIG'] = FLAGS.kubeconfig
_, _, retcode = vm_util.IssueCommand(
validate_cmd, env=env, suppress_warning=True, raise_on_failure=False)
return not retcode
``` |
{
"source": "jovial/refstack",
"score": 2
} |
#### File: api/controllers/results.py
```python
import functools
from oslo_config import cfg
from oslo_log import log
import pecan
from pecan import rest
from six.moves.urllib import parse
from refstack import db
from refstack.api import constants as const
from refstack.api import utils as api_utils
from refstack.api import validators
from refstack.api.controllers import validation
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class MetadataController(rest.RestController):
"""/v1/results/<test_id>/meta handler."""
rw_access_keys = ('shared', 'guideline', 'target',)
def _check_key(func):
"""Decorator to check that a specific key has write access."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
meta_key = args[2]
if meta_key not in args[0].rw_access_keys:
pecan.abort(403)
return func(*args, **kwargs)
return wrapper
@pecan.expose('json')
def get(self, test_id):
"""Get test run metadata."""
test_info = db.get_test_result(test_id)
role = api_utils.get_user_role(test_id)
if role in (const.ROLE_FOUNDATION, const.ROLE_OWNER):
return test_info['meta']
elif role in (const.ROLE_USER):
return {k: v for k, v in test_info['meta'].items()
if k in self.rw_access_keys}
pecan.abort(403)
@pecan.expose('json')
def get_one(self, test_id, key):
"""Get value for key from test run metadata."""
role = api_utils.get_user_role(test_id)
if role in (const.ROLE_FOUNDATION, const.ROLE_OWNER):
return db.get_test_result_meta_key(test_id, key)
elif role in (const.ROLE_USER) and key in self.rw_access_keys:
return db.get_test_result_meta_key(test_id, key)
pecan.abort(403)
@_check_key
@api_utils.check_permissions(level=const.ROLE_OWNER)
@pecan.expose('json')
def post(self, test_id, key):
"""Save value for key in test run metadata."""
test = db.get_test_result(test_id)
if test['verification_status'] == const.TEST_VERIFIED:
pecan.abort(403, 'Can not add/alter a new metadata key for a '
'verified test run.')
db.save_test_result_meta_item(test_id, key, pecan.request.body)
pecan.response.status = 201
@_check_key
@api_utils.check_permissions(level=const.ROLE_OWNER)
@pecan.expose('json')
def delete(self, test_id, key):
"""Delete key from test run metadata."""
test = db.get_test_result(test_id)
if test['verification_status'] == const.TEST_VERIFIED:
pecan.abort(403, 'Can not delete a metadata key for a '
'verified test run.')
db.delete_test_result_meta_item(test_id, key)
pecan.response.status = 204
class ResultsController(validation.BaseRestControllerWithValidation):
"""/v1/results handler."""
__validator__ = validators.TestResultValidator
meta = MetadataController()
def _check_authentication(self):
x_public_key = pecan.request.headers.get('X-Public-Key')
if x_public_key:
public_key = x_public_key.strip().split()[1]
stored_public_key = db.get_pubkey(public_key)
if not stored_public_key:
pecan.abort(401, 'User with specified key not found. '
'Please log into the RefStack server to '
'upload your key.')
else:
stored_public_key = None
if not CONF.api.enable_anonymous_upload and not stored_public_key:
pecan.abort(401, 'Anonymous result uploads are disabled. '
'Please create a user account and an api '
'key at https://refstack.openstack.org/#/')
return stored_public_key
def _auto_version_associate(self, test, test_, pubkey):
if test.get('cpid'):
version = db.get_product_version_by_cpid(
test['cpid'], allowed_keys=['id', 'product_id'])
# Only auto-associate if there is a single product version
# with the given cpid.
if len(version) == 1:
is_foundation = api_utils.check_user_is_foundation_admin(
pubkey.openid)
is_product_admin = api_utils.check_user_is_product_admin(
version[0]['product_id'], pubkey.openid)
if is_foundation or is_product_admin:
test_['product_version_id'] = version[0]['id']
return test_
@pecan.expose('json')
@api_utils.check_permissions(level=const.ROLE_USER)
def get_one(self, test_id):
"""Handler for getting item."""
user_role = api_utils.get_user_role(test_id)
if user_role in (const.ROLE_FOUNDATION, const.ROLE_OWNER):
test_info = db.get_test_result(
test_id, allowed_keys=['id', 'cpid', 'created_at',
'duration_seconds', 'meta',
'product_version',
'verification_status']
)
else:
test_info = db.get_test_result(test_id)
test_list = db.get_test_results(test_id)
test_name_list = [test_dict['name'] for test_dict in test_list]
test_info.update({'results': test_name_list,
'user_role': user_role})
if user_role not in (const.ROLE_FOUNDATION, const.ROLE_OWNER):
# Don't expose product information if product is not public.
if (test_info.get('product_version') and
not test_info['product_version']
['product_info']['public']):
test_info['product_version'] = None
test_info['meta'] = {
k: v for k, v in test_info['meta'].items()
if k in MetadataController.rw_access_keys
}
return test_info
def store_item(self, test):
"""Handler for storing item. Should return new item id."""
# If we need a key, or the key isn't available, this will throw
# an exception with a 401
pubkey = self._check_authentication()
test_ = test.copy()
if pubkey:
if 'meta' not in test_:
test_['meta'] = {}
test_['meta'][const.USER] = pubkey.openid
test_ = self._auto_version_associate(test, test_, pubkey)
test_id = db.store_test_results(test_)
return {'test_id': test_id,
'url': parse.urljoin(CONF.ui_url,
CONF.api.test_results_url) % test_id}
@pecan.expose('json')
@api_utils.check_permissions(level=const.ROLE_OWNER)
def delete(self, test_id):
"""Delete test run."""
test = db.get_test_result(test_id)
if test['verification_status'] == const.TEST_VERIFIED:
pecan.abort(403, 'Can not delete a verified test run.')
db.delete_test_result(test_id)
pecan.response.status = 204
@pecan.expose('json')
def get(self):
"""Get information of all uploaded test results.
Get information of all uploaded test results in descending
chronological order. Make it possible to specify some
input parameters for filtering.
For example:
/v1/results?page=<page number>&cpid=1234.
By default, page is set to page number 1,
if the page parameter is not specified.
"""
expected_input_params = [
const.START_DATE,
const.END_DATE,
const.CPID,
const.SIGNED,
const.VERIFICATION_STATUS,
const.PRODUCT_ID
]
filters = api_utils.parse_input_params(expected_input_params)
if const.PRODUCT_ID in filters:
product = db.get_product(filters[const.PRODUCT_ID])
vendor_id = product['organization_id']
is_admin = (api_utils.check_user_is_foundation_admin() or
api_utils.check_user_is_vendor_admin(vendor_id))
if is_admin:
filters[const.ALL_PRODUCT_TESTS] = True
elif not product['public']:
pecan.abort(403, 'Forbidden.')
records_count = db.get_test_result_records_count(filters)
page_number, total_pages_number = \
api_utils.get_page_number(records_count)
try:
per_page = CONF.api.results_per_page
results = db.get_test_result_records(
page_number, per_page, filters)
is_foundation = api_utils.check_user_is_foundation_admin()
for result in results:
if not (api_utils.check_owner(result['id']) or is_foundation):
# Don't expose product info if the product is not public.
if (result.get('product_version') and not
result['product_version']['product_info']
['public']):
result['product_version'] = None
# Only show all metadata if the user is the owner or a
# member of the Foundation group.
result['meta'] = {
k: v for k, v in result['meta'].items()
if k in MetadataController.rw_access_keys
}
result.update({'url': parse.urljoin(
CONF.ui_url, CONF.api.test_results_url
) % result['id']})
page = {'results': results,
'pagination': {
'current_page': page_number,
'total_pages': total_pages_number
}}
except Exception as ex:
LOG.debug('An error occurred during '
'operation with database: %s' % str(ex))
pecan.abort(500)
return page
@api_utils.check_permissions(level=const.ROLE_OWNER)
@pecan.expose('json')
def put(self, test_id, **kw):
"""Update a test result."""
test_info = {'id': test_id}
is_foundation_admin = api_utils.check_user_is_foundation_admin()
if 'product_version_id' in kw:
test = db.get_test_result(test_id)
if test['verification_status'] == const.TEST_VERIFIED:
pecan.abort(403, 'Can not update product_version_id for a '
'verified test run.')
if kw['product_version_id']:
# Verify that the user is a member of the product's vendor.
version = db.get_product_version(kw['product_version_id'],
allowed_keys=['product_id'])
is_vendor_admin = (
api_utils
.check_user_is_product_admin(version['product_id'])
)
else:
# No product vendor to check membership for, so just set
# is_vendor_admin to True.
is_vendor_admin = True
kw['product_version_id'] = None
if not is_vendor_admin and not is_foundation_admin:
pecan.abort(403, 'Forbidden.')
test_info['product_version_id'] = kw['product_version_id']
if 'verification_status' in kw:
if not is_foundation_admin:
pecan.abort(403, 'You do not have permission to change a '
'verification status.')
if kw['verification_status'] not in (0, 1):
pecan.abort(400, 'Invalid verification_status value: %d' %
kw['verification_status'])
# Check pre-conditions are met to mark a test verified.
if (kw['verification_status'] == 1 and
not (db.get_test_result_meta_key(test_id, 'target') and
db.get_test_result_meta_key(test_id, 'guideline') and
db.get_test_result_meta_key(test_id,
const.SHARED_TEST_RUN))):
pecan.abort(403, 'In order to mark a test verified, the '
'test must be shared and have been '
'associated to a guideline and target '
'program.')
test_info['verification_status'] = kw['verification_status']
test = db.update_test_result(test_info)
pecan.response.status = 201
return test
```
#### File: api/controllers/validation.py
```python
import json
import pecan
from pecan import rest
class BaseRestControllerWithValidation(rest.RestController):
"""Rest controller with validation.
Controller provides validation for POSTed data
exposed endpoints:
POST base_url/
GET base_url/<item uid>
GET base_url/schema
"""
__validator__ = None
_custom_actions = {
"schema": ["GET"],
}
def __init__(self): # pragma: no cover
"""Init."""
if self.__validator__:
self.validator = self.__validator__()
else:
raise ValueError("__validator__ is not defined")
def store_item(self, item_in_json): # pragma: no cover
"""Handler for storing item. Should return new item id."""
raise NotImplementedError
@pecan.expose('json')
def schema(self):
"""Return validation schema."""
return self.validator.schema
@pecan.expose('json')
def post(self, ):
"""POST handler."""
self.validator.validate(pecan.request)
item = json.loads(pecan.request.body.decode('utf-8'))
item_id = self.store_item(item)
pecan.response.status = 201
return item_id
```
#### File: refstack/api/guidelines.py
```python
import itertools
from oslo_config import cfg
from oslo_log import log
from operator import itemgetter
import re
import requests
import requests_cache
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# Cached requests will expire after 12 hours.
requests_cache.install_cache(cache_name='github_cache',
backend='memory',
expire_after=43200)
class Guidelines:
"""This class handles guideline/capability listing and retrieval."""
def __init__(self,
repo_url=None,
raw_url=None,
additional_capability_urls=None):
"""Initialize class with needed URLs.
The URL for the guidelines repository is specified with 'repo_url'.
The URL for where raw files are served is specified with 'raw_url'.
These values will default to the values specified in the RefStack
config file.
"""
self.guideline_sources = list()
if additional_capability_urls:
self.additional_urls = additional_capability_urls.split(',')
else:
self.additional_urls = \
CONF.api.additional_capability_urls.split(',')
[self.guideline_sources.append(url) for url in self.additional_urls]
if repo_url:
self.repo_url = repo_url
else:
self.repo_url = CONF.api.github_api_capabilities_url
if self.repo_url and self.repo_url not in self.guideline_sources:
self.guideline_sources.append(self.repo_url)
if raw_url:
self.raw_url = raw_url
else:
self.raw_url = CONF.api.github_raw_base_url
def get_guideline_list(self):
"""Return a list of a guideline files.
The repository url specificed in class instantiation is checked
for a list of JSON guideline files. A list of these is returned.
"""
capability_files = {}
capability_list = []
powered_files = []
addon_files = []
for src_url in self.guideline_sources:
try:
resp = requests.get(src_url)
LOG.debug("Response Status: %s / Used Requests Cache: %s" %
(resp.status_code,
getattr(resp, 'from_cache', False)))
if resp.status_code == 200:
regex = re.compile('([0-9]{4}\.[0-9]{2}|next)\.json')
for rfile in resp.json():
if rfile["type"] == "file" and \
regex.search(rfile["name"]):
if 'add-ons' in rfile['path'] and \
rfile[
'name'] not in map(itemgetter('name'),
addon_files):
file_dict = {'name': rfile['name']}
addon_files.append(file_dict)
elif 'add-ons' not in rfile['path'] and \
rfile['name'] not in map(itemgetter('name'),
powered_files):
file_dict = {'name': rfile['name'],
'file': rfile['path']}
powered_files.append(file_dict)
else:
LOG.warning('Guidelines repo URL (%s) returned '
'non-success HTTP code: %s' %
(src_url, resp.status_code))
except requests.exceptions.RequestException as e:
LOG.warning('An error occurred trying to get repository '
'contents through %s: %s' % (src_url, e))
for k, v in itertools.groupby(addon_files,
key=lambda x: x['name'].split('.')[0]):
values = [{'name': x['name'].split('.', 1)[1], 'file': x['name']}
for x in list(v)]
capability_list.append((k, list(values)))
capability_list.append(('powered', powered_files))
capability_files = dict((x, y) for x, y in capability_list)
return capability_files
def get_guideline_contents(self, gl_file):
"""Get contents for a given guideline path."""
if '.json' not in gl_file:
gl_file = '.'.join((gl_file, 'json'))
regex = re.compile("[a-z]*\.([0-9]{4}\.[0-9]{2}|next)\.json")
if regex.search(gl_file):
guideline_path = 'add-ons/' + gl_file
else:
guideline_path = gl_file
file_url = ''.join((self.raw_url.rstrip('/'),
'/', guideline_path))
LOG.debug("file_url: %s" % (file_url))
try:
response = requests.get(file_url)
LOG.debug("Response Status: %s / Used Requests Cache: %s" %
(response.status_code,
getattr(response, 'from_cache', False)))
LOG.debug("Response body: %s" % str(response.text))
if response.status_code == 200:
return response.json()
else:
LOG.warning('Raw guideline URL (%s) returned non-success HTTP '
'code: %s' % (self.raw_url, response.status_code))
return None
except requests.exceptions.RequestException as e:
LOG.warning('An error occurred trying to get raw capability file '
'contents from %s: %s' % (self.raw_url, e))
return None
def get_target_capabilities(self, guideline_json, types=None,
target='platform'):
"""Get list of capabilities that match the given statuses and target.
If no list of types in given, then capabilities of all types
are given. If not target is specified, then all capabilities are given.
"""
components = guideline_json['components']
if ('metadata' in guideline_json and
guideline_json['metadata']['schema'] >= '2.0'):
schema = guideline_json['metadata']['schema']
platformsMap = {
'platform': 'OpenStack Powered Platform',
'compute': 'OpenStack Powered Compute',
'object': 'OpenStack Powered Storage',
'dns': 'OpenStack with DNS',
'orchestration': 'OpenStack with Orchestration'
}
if target == 'dns' or target == 'orchestration':
targets = ['os_powered_' + target]
else:
comps = \
guideline_json['platforms'][platformsMap[target]
]['components']
targets = (obj['name'] for obj in comps)
else:
schema = guideline_json['schema']
targets = set()
if target != 'platform':
targets.add(target)
else:
targets.update(guideline_json['platform']['required'])
target_caps = set()
for component in targets:
complist = components[component]
if schema >= '2.0':
complist = complist['capabilities']
for status, capabilities in complist.items():
if types is None or status in types:
target_caps.update(capabilities)
return list(target_caps)
def get_test_list(self, guideline_json, capabilities=[],
alias=True, show_flagged=True):
"""Generate a test list based on input.
A test list is formed from the given guideline JSON data and
list of capabilities. If 'alias' is True, test aliases are
included in the list. If 'show_flagged' is True, flagged tests are
included in the list.
"""
caps = guideline_json['capabilities']
if ('metadata' in guideline_json and
guideline_json['metadata']['schema'] >= '2.0'):
schema = guideline_json['metadata']['schema']
else:
schema = guideline_json['schema']
test_list = []
for cap, cap_details in caps.items():
if cap in capabilities:
if schema == '1.2':
for test in cap_details['tests']:
if show_flagged:
test_list.append(test)
elif not show_flagged and \
test not in cap_details['flagged']:
test_list.append(test)
else:
for test, test_details in cap_details['tests'].items():
added = False
if test_details.get('flagged'):
if show_flagged:
test_str = '{}[{}]'.format(
test,
test_details.get('idempotent_id', '')
)
test_list.append(test_str)
added = True
else:
# Make sure the test UUID is in the test string.
test_str = '{}[{}]'.format(
test,
test_details.get('idempotent_id', '')
)
test_list.append(test_str)
added = True
if alias and test_details.get('aliases') and added:
for alias in test_details['aliases']:
test_str = '{}[{}]'.format(
alias,
test_details.get('idempotent_id', '')
)
test_list.append(test_str)
test_list.sort()
return test_list
```
#### File: db/sqlalchemy/api.py
```python
import base64
import hashlib
import sys
import uuid
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log
from refstack.api import constants as api_const
from refstack.db.sqlalchemy import models
CONF = cfg.CONF
_FACADE = None
LOG = log.getLogger(__name__)
db_options.set_defaults(cfg.CONF)
class NotFound(Exception):
"""Raise if item not found in db."""
pass
class Duplication(Exception):
"""Raise if unique constraint violates."""
pass
def _create_facade_lazily():
"""Create DB facade lazily."""
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
"""Get DB engine."""
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
"""Get DB session."""
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def _to_dict(sqlalchemy_object, allowed_keys=None):
if isinstance(sqlalchemy_object, list):
return [_to_dict(obj, allowed_keys=allowed_keys)
for obj in sqlalchemy_object]
if (hasattr(sqlalchemy_object, 'keys')
and hasattr(sqlalchemy_object, 'index')):
return {key: getattr(sqlalchemy_object, key)
for key in sqlalchemy_object.keys()}
if hasattr(sqlalchemy_object, 'default_allowed_keys'):
items = sqlalchemy_object.iteritems()
if not allowed_keys:
allowed_keys = sqlalchemy_object.default_allowed_keys
if allowed_keys:
items = filter(lambda item: item[0] in allowed_keys, items)
result = {}
for key, value in items:
if key in sqlalchemy_object.metadata_keys:
result[key] = {
item.get(sqlalchemy_object.metadata_keys[key]['key']):
item.get(sqlalchemy_object.metadata_keys[key]['value'])
for item in value}
elif hasattr(value, 'default_allowed_keys'):
result[key] = _to_dict(value)
elif (isinstance(value, list) and value
and hasattr(value[0], 'default_allowed_keys')):
result[key] = [_to_dict(item) for item in value]
else:
result[key] = value
return result
if hasattr(sqlalchemy_object, 'all'):
return _to_dict(sqlalchemy_object.all())
return sqlalchemy_object
def store_test_results(results):
"""Store test results."""
test = models.Test()
test_id = str(uuid.uuid4())
test.id = test_id
test.cpid = results.get('cpid')
test.duration_seconds = results.get('duration_seconds')
test.product_version_id = results.get('product_version_id')
session = get_session()
with session.begin():
for result in results.get('results', []):
test_result = models.TestResults()
test_result.test_id = test_id
test_result.name = result['name']
test_result.uuid = result.get('uuid', None)
test.results.append(test_result)
for k, v in results.get('meta', {}).items():
meta = models.TestMeta()
meta.meta_key, meta.value = k, v
test.meta.append(meta)
test.save(session)
return test_id
def get_test_result(test_id, allowed_keys=None):
"""Get test info."""
session = get_session()
test_info = session.query(models.Test). \
filter_by(id=test_id). \
first()
if not test_info:
raise NotFound('Test result %s not found' % test_id)
return _to_dict(test_info, allowed_keys)
def delete_test_result(test_id):
"""Delete test information from the database."""
session = get_session()
with session.begin():
test = session.query(models.Test).filter_by(id=test_id).first()
if test:
session.query(models.TestMeta) \
.filter_by(test_id=test_id).delete()
session.query(models.TestResults) \
.filter_by(test_id=test_id).delete()
session.delete(test)
else:
raise NotFound('Test result %s not found' % test_id)
def update_test_result(test_info):
"""Update test from the given test_info dictionary."""
session = get_session()
_id = test_info.get('id')
test = session.query(models.Test).filter_by(id=_id).first()
if test is None:
raise NotFound('Test result with id %s not found' % _id)
keys = ['product_version_id', 'verification_status']
for key in keys:
if key in test_info:
setattr(test, key, test_info[key])
with session.begin():
test.save(session=session)
return _to_dict(test)
def get_test_result_meta_key(test_id, key, default=None):
"""Get metadata value related to specified test run."""
session = get_session()
meta_item = session.query(models.TestMeta). \
filter_by(test_id=test_id). \
filter_by(meta_key=key). \
first()
value = meta_item.value if meta_item else default
return value
def save_test_result_meta_item(test_id, key, value):
"""Store or update item value related to specified test run."""
session = get_session()
meta_item = (session.query(models.TestMeta)
.filter_by(test_id=test_id)
.filter_by(meta_key=key).first() or models.TestMeta())
meta_item.test_id = test_id
meta_item.meta_key = key
meta_item.value = value
with session.begin():
meta_item.save(session)
def delete_test_result_meta_item(test_id, key):
"""Delete metadata item related to specified test run."""
session = get_session()
meta_item = session.query(models.TestMeta). \
filter_by(test_id=test_id). \
filter_by(meta_key=key). \
first()
if meta_item:
with session.begin():
session.delete(meta_item)
else:
raise NotFound('Metadata key %s '
'not found for test run %s' % (key, test_id))
def get_test_results(test_id):
"""Get test results."""
session = get_session()
results = session.query(models.TestResults). \
filter_by(test_id=test_id). \
all()
return [_to_dict(result) for result in results]
def _apply_filters_for_query(query, filters):
"""Apply filters for DB query."""
start_date = filters.get(api_const.START_DATE)
if start_date:
query = query.filter(models.Test.created_at >= start_date)
end_date = filters.get(api_const.END_DATE)
if end_date:
query = query.filter(models.Test.created_at <= end_date)
cpid = filters.get(api_const.CPID)
if cpid:
query = query.filter(models.Test.cpid == cpid)
verification_status = filters.get(api_const.VERIFICATION_STATUS)
if verification_status:
query = query.filter(models.Test.verification_status ==
verification_status)
if api_const.PRODUCT_ID in filters:
query = (query
.join(models.ProductVersion)
.filter(models.ProductVersion.product_id ==
filters[api_const.PRODUCT_ID]))
all_product_tests = filters.get(api_const.ALL_PRODUCT_TESTS)
signed = api_const.SIGNED in filters
# If we only want to get the user's test results.
if signed:
query = (query
.join(models.Test.meta)
.filter(models.TestMeta.meta_key == api_const.USER)
.filter(models.TestMeta.value == filters[api_const.OPENID])
)
elif not all_product_tests:
# Get all non-signed (aka anonymously uploaded) test results
# along with signed but shared test results.
signed_results = (query.session
.query(models.TestMeta.test_id)
.filter_by(meta_key=api_const.USER))
shared_results = (query.session
.query(models.TestMeta.test_id)
.filter_by(meta_key=api_const.SHARED_TEST_RUN))
query = (query.filter(models.Test.id.notin_(signed_results))
.union(query.filter(models.Test.id.in_(shared_results))))
return query
def get_test_result_records(page, per_page, filters):
"""Get page with list of test records."""
session = get_session()
query = session.query(models.Test)
query = _apply_filters_for_query(query, filters)
results = query.order_by(models.Test.created_at.desc()). \
offset(per_page * (page - 1)). \
limit(per_page).all()
return _to_dict(results)
def get_test_result_records_count(filters):
"""Get total test records count."""
session = get_session()
query = session.query(models.Test.id)
records_count = _apply_filters_for_query(query, filters).count()
return records_count
def user_get(user_openid):
"""Get user info by openid."""
session = get_session()
user = session.query(models.User).filter_by(openid=user_openid).first()
if user is None:
raise NotFound('User with OpenID %s not found' % user_openid)
return user
def user_save(user_info):
"""Create user DB record if it exists, otherwise record will be updated."""
try:
user = user_get(user_info['openid'])
except NotFound:
user = models.User()
session = get_session()
with session.begin():
user.update(user_info)
user.save(session=session)
return user
def get_pubkey(key):
"""Get the pubkey info corresponding to the given public key.
The md5 hash of the key is used for the query for quicker lookups.
"""
session = get_session()
md5_hash = hashlib.md5(base64.b64decode(key)).hexdigest()
pubkeys = session.query(models.PubKey).filter_by(md5_hash=md5_hash).all()
if len(pubkeys) == 1:
return pubkeys[0]
elif len(pubkeys) > 1:
for pubkey in pubkeys:
if pubkey['pubkey'] == key:
return pubkey
return None
def store_pubkey(pubkey_info):
"""Store public key in to DB."""
pubkey = models.PubKey()
pubkey.openid = pubkey_info['openid']
pubkey.format = pubkey_info['format']
pubkey.pubkey = pubkey_info['pubkey']
pubkey.md5_hash = hashlib.md5(
base64.b64decode(
pubkey_info['pubkey']
)
).hexdigest()
pubkey.comment = pubkey_info['comment']
session = get_session()
with session.begin():
pubkeys_collision = (session.
query(models.PubKey).
filter_by(md5_hash=pubkey.md5_hash).
filter_by(pubkey=pubkey.pubkey).all())
if not pubkeys_collision:
pubkey.save(session)
else:
raise Duplication('Public key already exists.')
return pubkey.id
def delete_pubkey(id):
"""Delete public key from DB."""
session = get_session()
with session.begin():
key = session.query(models.PubKey).filter_by(id=id).first()
session.delete(key)
def get_user_pubkeys(user_openid):
"""Get public pubkeys for specified user."""
session = get_session()
pubkeys = session.query(models.PubKey).filter_by(openid=user_openid).all()
return _to_dict(pubkeys)
def add_user_to_group(user_openid, group_id, created_by_user):
"""Add specified user to specified group."""
item = models.UserToGroup()
session = get_session()
with session.begin():
item.user_openid = user_openid
item.group_id = group_id
item.created_by_user = created_by_user
item.save(session=session)
def remove_user_from_group(user_openid, group_id):
"""Remove specified user from specified group."""
session = get_session()
with session.begin():
(session.query(models.UserToGroup).
filter_by(user_openid=user_openid).
filter_by(group_id=group_id).
delete(synchronize_session=False))
def add_organization(organization_info, creator):
"""Add organization."""
session = get_session()
with session.begin():
group = models.Group()
group.name = 'Group for %s' % organization_info['name']
group.save(session=session)
group_id = group.id
item = models.UserToGroup()
item.user_openid = creator
item.group_id = group_id
item.created_by_user = creator
item.save(session=session)
organization = models.Organization()
organization.type = organization_info.get(
'type', api_const.PRIVATE_VENDOR)
organization.name = organization_info['name']
organization.description = organization_info.get('description')
organization.group_id = group_id
organization.created_by_user = creator
organization.properties = organization_info.get('properties')
organization.save(session=session)
return _to_dict(organization)
def update_organization(organization_info):
"""Update organization."""
session = get_session()
_id = organization_info['id']
organization = (session.query(models.Organization).
filter_by(id=_id).first())
if organization is None:
raise NotFound('Organization with id %s not found' % _id)
with session.begin():
organization.type = organization_info.get(
'type', organization.type)
organization.name = organization_info.get(
'name', organization.name)
organization.description = organization_info.get(
'description', organization.description)
organization.properties = organization_info.get(
'properties', organization.properties)
organization.save(session=session)
return _to_dict(organization)
def get_organization(organization_id, allowed_keys=None):
"""Get organization by id."""
session = get_session()
organization = (session.query(models.Organization).
filter_by(id=organization_id).first())
if organization is None:
raise NotFound('Organization with id %s not found' % organization_id)
return _to_dict(organization, allowed_keys=allowed_keys)
def delete_organization(organization_id):
"""delete organization by id."""
session = get_session()
with session.begin():
product_ids = (session
.query(models.Product.id)
.filter_by(organization_id=organization_id))
(session.query(models.ProductVersion).
filter(models.ProductVersion.product_id.in_(product_ids)).
delete(synchronize_session=False))
(session.query(models.Product).
filter_by(organization_id=organization_id).
delete(synchronize_session=False))
(session.query(models.Organization).
filter_by(id=organization_id).
delete(synchronize_session=False))
def add_product(product_info, creator):
"""Add product."""
product = models.Product()
product.id = str(uuid.uuid4())
product.type = product_info['type']
product.product_type = product_info['product_type']
product.product_ref_id = product_info.get('product_ref_id')
product.name = product_info['name']
product.description = product_info.get('description')
product.organization_id = product_info['organization_id']
product.created_by_user = creator
product.public = product_info.get('public', False)
product.properties = product_info.get('properties')
session = get_session()
with session.begin():
product.save(session=session)
product_version = models.ProductVersion()
product_version.created_by_user = creator
product_version.version = product_info.get('version')
product_version.product_id = product.id
product_version.save(session=session)
return _to_dict(product)
def update_product(product_info):
"""Update product by id."""
session = get_session()
_id = product_info.get('id')
product = session.query(models.Product).filter_by(id=_id).first()
if product is None:
raise NotFound('Product with id %s not found' % _id)
keys = ['name', 'description', 'product_ref_id', 'public', 'properties']
for key in keys:
if key in product_info:
setattr(product, key, product_info[key])
with session.begin():
product.save(session=session)
return _to_dict(product)
def get_product(id, allowed_keys=None):
"""Get product by id."""
session = get_session()
product = session.query(models.Product).filter_by(id=id).first()
if product is None:
raise NotFound('Product with id "%s" not found' % id)
return _to_dict(product, allowed_keys=allowed_keys)
def delete_product(id):
"""delete product by id."""
session = get_session()
with session.begin():
(session.query(models.ProductVersion)
.filter_by(product_id=id)
.delete(synchronize_session=False))
(session.query(models.Product).filter_by(id=id).
delete(synchronize_session=False))
def get_foundation_users():
"""Get users' openid-s that belong to group of foundation."""
session = get_session()
organization = (
session.query(models.Organization.group_id)
.filter_by(type=api_const.FOUNDATION).first())
if organization is None:
LOG.warning('Foundation organization record not found in DB.')
return []
group_id = organization.group_id
users = (session.query(models.UserToGroup.user_openid).
filter_by(group_id=group_id))
return [user.user_openid for user in users]
def get_organization_users(organization_id):
"""Get users that belong to group of organization."""
session = get_session()
organization = (session.query(models.Organization.group_id)
.filter_by(id=organization_id).first())
if organization is None:
raise NotFound('Organization with id %s is not found'
% organization_id)
group_id = organization.group_id
users = (session.query(models.UserToGroup, models.User)
.join(models.User,
models.User.openid == models.UserToGroup.user_openid)
.filter(models.UserToGroup.group_id == group_id))
keys = ['openid', 'fullname', 'email']
return {item[1].openid: _to_dict(item[1], allowed_keys=keys)
for item in users}
def get_organizations(allowed_keys=None):
"""Get all organizations."""
session = get_session()
items = (
session.query(models.Organization)
.order_by(models.Organization.created_at.desc()).all())
return _to_dict(items, allowed_keys=allowed_keys)
def get_organizations_by_types(types, allowed_keys=None):
"""Get organization by list of types."""
session = get_session()
items = (
session.query(models.Organization)
.filter(models.Organization.type.in_(types))
.order_by(models.Organization.created_at.desc()).all())
return _to_dict(items, allowed_keys=allowed_keys)
def get_organizations_by_user(user_openid, allowed_keys=None):
"""Get organizations for specified user."""
session = get_session()
items = (
session.query(models.Organization, models.Group, models.UserToGroup)
.join(models.Group,
models.Group.id == models.Organization.group_id)
.join(models.UserToGroup,
models.Group.id == models.UserToGroup.group_id)
.filter(models.UserToGroup.user_openid == user_openid)
.order_by(models.Organization.created_at.desc()).all())
items = [item[0] for item in items]
return _to_dict(items, allowed_keys=allowed_keys)
def get_products(allowed_keys=None, filters=None):
"""Get products based on passed in filters."""
if filters is None:
filters = {}
expected_filters = ['public', 'organization_id']
filter_args = {}
for key, value in filters.items():
if key not in expected_filters:
raise Exception('Unknown filter key "%s"' % key)
filter_args[key] = value
session = get_session()
query = session.query(models.Product)
if filter_args:
query = query.filter_by(**filter_args)
items = query.order_by(models.Product.created_at.desc()).all()
return _to_dict(items, allowed_keys=allowed_keys)
def get_products_by_user(user_openid, allowed_keys=None, filters=None):
"""Get products that a user can manage."""
if filters is None:
filters = {}
session = get_session()
query = (
session.query(models.Product, models.Organization, models.Group,
models.UserToGroup)
.join(models.Organization,
models.Organization.id == models.Product.organization_id)
.join(models.Group,
models.Group.id == models.Organization.group_id)
.join(models.UserToGroup,
models.Group.id == models.UserToGroup.group_id)
.filter(models.UserToGroup.user_openid == user_openid))
expected_filters = ['organization_id']
for key, value in filters.items():
if key not in expected_filters:
raise Exception('Unknown filter key "%s"' % key)
query = query.filter(getattr(models.Product, key) ==
filters[key])
items = query.order_by(models.Organization.created_at.desc()).all()
items = [item[0] for item in items]
return _to_dict(items, allowed_keys=allowed_keys)
def get_product_by_version(product_version_id, allowed_keys=None):
"""Get product info from a product version ID."""
session = get_session()
product = (session.query(models.Product).join(models.ProductVersion)
.filter(models.ProductVersion.id == product_version_id).first())
return _to_dict(product, allowed_keys=allowed_keys)
def get_product_version(product_version_id, allowed_keys=None):
"""Get details of a specific version given the id."""
session = get_session()
version = (
session.query(models.ProductVersion)
.filter_by(id=product_version_id).first()
)
if version is None:
raise NotFound('Version with id "%s" not found' % product_version_id)
return _to_dict(version, allowed_keys=allowed_keys)
def get_product_version_by_cpid(cpid, allowed_keys=None):
"""Get a product version given a cloud provider id."""
session = get_session()
version = (
session.query(models.ProductVersion)
.filter_by(cpid=cpid).all()
)
return _to_dict(version, allowed_keys=allowed_keys)
def get_product_versions(product_id, allowed_keys=None):
"""Get all versions for a product."""
session = get_session()
version_info = (
session.query(models.ProductVersion)
.filter_by(product_id=product_id).all()
)
return _to_dict(version_info, allowed_keys=allowed_keys)
def add_product_version(product_id, version, creator, cpid, allowed_keys=None):
"""Add a new product version."""
product_version = models.ProductVersion()
product_version.created_by_user = creator
product_version.version = version
product_version.product_id = product_id
product_version.cpid = cpid
session = get_session()
with session.begin():
product_version.save(session=session)
return _to_dict(product_version, allowed_keys=allowed_keys)
def update_product_version(product_version_info):
"""Update product version from product_info_version dictionary."""
session = get_session()
_id = product_version_info.get('id')
version = session.query(models.ProductVersion).filter_by(id=_id).first()
if version is None:
raise NotFound('Product version with id %s not found' % _id)
# Only allow updating cpid.
keys = ['cpid']
for key in keys:
if key in product_version_info:
setattr(version, key, product_version_info[key])
with session.begin():
version.save(session=session)
return _to_dict(version)
def delete_product_version(product_version_id):
"""Delete a product version."""
session = get_session()
with session.begin():
(session.query(models.ProductVersion).filter_by(id=product_version_id).
delete(synchronize_session=False))
```
#### File: tests/api/test_profile.py
```python
import binascii
import json
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
import mock
import webtest.app
from refstack.tests import api
from refstack import db
class TestProfileEndpoint(api.FunctionalTest):
"""Test case for the 'profile' API endpoint."""
URL = '/v1/profile/'
def setUp(self):
super(TestProfileEndpoint, self).setUp()
self.user_info = {
'openid': 'test-open-id',
'email': '<EMAIL>',
'fullname': '<NAME>'
}
db.user_save(self.user_info)
@mock.patch('refstack.api.utils.get_user_id', return_value='test-open-id')
def test_get(self, mock_get_user):
response = self.get_json(self.URL)
self.user_info['is_admin'] = False
self.assertEqual(self.user_info, response)
@mock.patch('refstack.api.utils.get_user_id', return_value='test-open-id')
def test_pubkeys(self, mock_get_user):
"""Test '/v1/profile/pubkeys' API endpoint."""
url = self.URL + 'pubkeys'
key = rsa.generate_private_key(
public_exponent=65537,
key_size=1024,
backend=default_backend()
)
signer = key.signer(padding.PKCS1v15(), hashes.SHA256())
signer.update('signature'.encode('utf-8'))
sign = signer.finalize()
pubkey = key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
).decode('utf-8')
body = {'raw_key': pubkey,
'self_signature': binascii.b2a_hex(sign).decode('utf-8')}
json_params = json.dumps(body)
# POST endpoint
pubkey_id = self.post_json(url, params=json_params)
# GET endpoint
user_pubkeys = self.get_json(url)
self.assertEqual(1, len(user_pubkeys))
self.assertEqual(pubkey.split()[1], user_pubkeys[0]['pubkey'])
self.assertEqual('ssh-rsa', user_pubkeys[0]['format'])
self.assertEqual(pubkey_id, user_pubkeys[0]['id'])
delete_url = '{}/{}'.format(url, pubkey_id)
# DELETE endpoint
response = self.delete(delete_url)
self.assertEqual(204, response.status_code)
user_pubkeys = self.get_json(url)
self.assertEqual(0, len(user_pubkeys))
# DELETE endpoint - nonexistent pubkey
self.assertRaises(webtest.app.AppError, self.delete, delete_url)
```
#### File: tests/api/test_results.py
```python
import json
import uuid
import mock
from oslo_config import fixture as config_fixture
import six
import webtest.app
from refstack.api import constants as api_const
from refstack.api import validators
from refstack import db
from refstack.tests import api
import binascii
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
FAKE_TESTS_RESULT = {
'cpid': 'foo',
'duration_seconds': 10,
'results': [
{'name': 'tempest.foo.bar'},
{'name': 'tempest.buzz',
'uid': '42'}
]
}
FAKE_JSON_WITH_EMPTY_RESULTS = {
'cpid': 'foo',
'duration_seconds': 20,
'results': [
]
}
class TestResultsEndpoint(api.FunctionalTest):
"""Test case for the 'results' API endpoint."""
URL = '/v1/results/'
def setUp(self):
super(TestResultsEndpoint, self).setUp()
self.config_fixture = config_fixture.Config()
self.CONF = self.useFixture(self.config_fixture).conf
def test_post(self):
"""Test results endpoint with post request."""
results = json.dumps(FAKE_TESTS_RESULT)
actual_response = self.post_json(self.URL, params=results)
self.assertIn('test_id', actual_response)
try:
uuid.UUID(actual_response.get('test_id'), version=4)
except ValueError:
self.fail("actual_response doesn't contain test_id")
def test_post_with_empty_result(self):
"""Test results endpoint with empty test results request."""
results = json.dumps(FAKE_JSON_WITH_EMPTY_RESULTS)
self.assertRaises(webtest.app.AppError,
self.post_json,
self.URL,
params=results)
def test_post_with_invalid_schema(self):
"""Test post request with invalid schema."""
results = json.dumps({
'foo': 'bar',
'duration_seconds': 999,
})
self.assertRaises(webtest.app.AppError,
self.post_json,
self.URL,
params=results)
@mock.patch('refstack.api.utils.check_owner')
@mock.patch('refstack.api.utils.check_user_is_foundation_admin')
@mock.patch('refstack.api.utils.get_user_id', return_value='test-open-id')
def test_put(self, mock_user, mock_check_foundation, mock_check_owner):
"""Test results endpoint with put request."""
results = json.dumps(FAKE_TESTS_RESULT)
test_response = self.post_json(self.URL, params=results)
test_id = test_response.get('test_id')
url = self.URL + test_id
user_info = {
'openid': 'test-open-id',
'email': '<EMAIL>',
'fullname': '<NAME>'
}
db.user_save(user_info)
fake_product = {
'name': 'product name',
'description': 'product description',
'product_type': api_const.CLOUD,
}
# Create a product
product_response = self.post_json('/v1/products/',
params=json.dumps(fake_product))
# Create a product version
version_url = '/v1/products/' + product_response['id'] + '/versions/'
version_response = self.post_json(version_url,
params=json.dumps({'version': '1'}))
# Test Foundation admin can put.
mock_check_foundation.return_value = True
body = {'product_version_id': version_response['id']}
self.put_json(url, params=json.dumps(body))
get_response = self.get_json(url)
self.assertEqual(version_response['id'],
get_response['product_version']['id'])
# Test when product_version_id is None.
body = {'product_version_id': None}
self.put_json(url, params=json.dumps(body))
get_response = self.get_json(url)
self.assertIsNone(get_response['product_version'])
# Test when test verification preconditions are not met.
body = {'verification_status': api_const.TEST_VERIFIED}
put_response = self.put_json(url, expect_errors=True,
params=json.dumps(body))
self.assertEqual(403, put_response.status_code)
# Share the test run.
db.save_test_result_meta_item(test_id, api_const.SHARED_TEST_RUN, True)
put_response = self.put_json(url, expect_errors=True,
params=json.dumps(body))
self.assertEqual(403, put_response.status_code)
# Now associate guideline and target program. Now we should be
# able to mark a test verified.
db.save_test_result_meta_item(test_id, 'target', 'platform')
db.save_test_result_meta_item(test_id, 'guideline', '2016.01.json')
put_response = self.put_json(url, params=json.dumps(body))
self.assertEqual(api_const.TEST_VERIFIED,
put_response['verification_status'])
# Unshare the test, and check that we can mark it not verified.
db.delete_test_result_meta_item(test_id, api_const.SHARED_TEST_RUN)
body = {'verification_status': api_const.TEST_NOT_VERIFIED}
put_response = self.put_json(url, params=json.dumps(body))
self.assertEqual(api_const.TEST_NOT_VERIFIED,
put_response['verification_status'])
# Test when verification_status value is invalid.
body = {'verification_status': 111}
put_response = self.put_json(url, expect_errors=True,
params=json.dumps(body))
self.assertEqual(400, put_response.status_code)
# Check test owner can put.
mock_check_foundation.return_value = False
mock_check_owner.return_value = True
body = {'product_version_id': version_response['id']}
self.put_json(url, params=json.dumps(body))
get_response = self.get_json(url)
self.assertEqual(version_response['id'],
get_response['product_version']['id'])
# Test non-Foundation user can't change verification_status.
body = {'verification_status': 1}
put_response = self.put_json(url, expect_errors=True,
params=json.dumps(body))
self.assertEqual(403, put_response.status_code)
# Test unauthorized put.
mock_check_foundation.return_value = False
mock_check_owner.return_value = False
self.assertRaises(webtest.app.AppError,
self.put_json,
url,
params=json.dumps(body))
def test_get_one(self):
"""Test get request."""
results = json.dumps(FAKE_TESTS_RESULT)
post_response = self.post_json(self.URL, params=results)
get_response = self.get_json(self.URL + post_response.get('test_id'))
# CPID is only exposed to the owner.
self.assertNotIn('cpid', get_response)
self.assertEqual(FAKE_TESTS_RESULT['duration_seconds'],
get_response['duration_seconds'])
for test in FAKE_TESTS_RESULT['results']:
self.assertIn(test['name'], get_response['results'])
def test_get_one_with_nonexistent_uuid(self):
"""Test get request with nonexistent uuid."""
self.assertRaises(webtest.app.AppError,
self.get_json,
self.URL + six.text_type(uuid.uuid4()))
def test_get_one_schema(self):
"""Test get request for getting JSON schema."""
validator = validators.TestResultValidator()
expected_schema = validator.schema
actual_schema = self.get_json(self.URL + 'schema')
self.assertEqual(actual_schema, expected_schema)
def test_get_one_invalid_url(self):
"""Test get request with invalid url."""
self.assertRaises(webtest.app.AppError,
self.get_json,
self.URL + 'fake_url')
def test_get_pagination(self):
self.CONF.set_override('results_per_page',
2,
'api')
responses = []
for i in range(3):
fake_results = {
'cpid': six.text_type(i),
'duration_seconds': i,
'results': [
{'name': 'tempest.foo.bar'},
{'name': 'tempest.buzz'}
]
}
actual_response = self.post_json(self.URL,
params=json.dumps(fake_results))
responses.append(actual_response)
page_one = self.get_json(self.URL)
page_two = self.get_json('/v1/results?page=2')
self.assertEqual(len(page_one['results']), 2)
self.assertEqual(len(page_two['results']), 1)
self.assertNotIn(page_two['results'][0], page_one)
self.assertEqual(page_one['pagination']['current_page'], 1)
self.assertEqual(page_one['pagination']['total_pages'], 2)
self.assertEqual(page_two['pagination']['current_page'], 2)
self.assertEqual(page_two['pagination']['total_pages'], 2)
def test_get_with_not_existing_page(self):
self.assertRaises(webtest.app.AppError,
self.get_json,
'/v1/results?page=2')
def test_get_with_empty_database(self):
results = self.get_json(self.URL)
self.assertEqual([], results['results'])
def test_get_with_cpid_filter(self):
self.CONF.set_override('results_per_page',
2,
'api')
responses = []
for i in range(2):
fake_results = {
'cpid': '12345',
'duration_seconds': i,
'results': [
{'name': 'tempest.foo'},
{'name': 'tempest.bar'}
]
}
json_result = json.dumps(fake_results)
actual_response = self.post_json(self.URL,
params=json_result)
responses.append(actual_response)
for i in range(3):
fake_results = {
'cpid': '54321',
'duration_seconds': i,
'results': [
{'name': 'tempest.foo'},
{'name': 'tempest.bar'}
]
}
results = self.get_json('/v1/results?page=1&cpid=12345')
self.assertEqual(len(results), 2)
response_test_ids = [test['test_id'] for test in responses[0:2]]
for r in results['results']:
self.assertIn(r['id'], response_test_ids)
def test_get_with_date_filters(self):
self.CONF.set_override('results_per_page',
10,
'api')
responses = []
for i in range(5):
fake_results = {
'cpid': '12345',
'duration_seconds': i,
'results': [
{'name': 'tempest.foo'},
{'name': 'tempest.bar'}
]
}
json_result = json.dumps(fake_results)
actual_response = self.post_json(self.URL,
params=json_result)
responses.append(actual_response)
all_results = self.get_json(self.URL)
slice_results = all_results['results'][1:4]
url = '/v1/results?start_date=%(start)s&end_date=%(end)s' % {
'start': slice_results[2]['created_at'],
'end': slice_results[0]['created_at']
}
filtering_results = self.get_json(url)
for r in slice_results:
self.assertIn(r, filtering_results['results'])
url = '/v1/results?end_date=1000-01-01 12:00:00'
filtering_results = self.get_json(url)
self.assertEqual([], filtering_results['results'])
@mock.patch('refstack.api.utils.get_user_id')
def test_get_with_product_id(self, mock_get_user):
user_info = {
'openid': 'test-open-id',
'email': '<EMAIL>',
'fullname': '<NAME>'
}
db.user_save(user_info)
mock_get_user.return_value = 'test-open-id'
fake_product = {
'name': 'product name',
'description': 'product description',
'product_type': api_const.CLOUD,
}
product = json.dumps(fake_product)
response = self.post_json('/v1/products/', params=product)
product_id = response['id']
# Create a version.
version_url = '/v1/products/' + product_id + '/versions'
version = {'cpid': '123', 'version': '6.0'}
post_response = self.post_json(version_url, params=json.dumps(version))
version_id = post_response['id']
# Create a test and associate it to the product version and user.
results = json.dumps(FAKE_TESTS_RESULT)
post_response = self.post_json('/v1/results', params=results)
test_id = post_response['test_id']
test_info = {'id': test_id, 'product_version_id': version_id}
db.update_test_result(test_info)
db.save_test_result_meta_item(test_id, api_const.USER, 'test-open-id')
url = self.URL + '?page=1&product_id=' + product_id
# Test GET.
response = self.get_json(url)
self.assertEqual(1, len(response['results']))
self.assertEqual(test_id, response['results'][0]['id'])
# Test unauthorized.
mock_get_user.return_value = 'test-foo-id'
response = self.get_json(url, expect_errors=True)
self.assertEqual(403, response.status_code)
# Make product public.
product_info = {'id': product_id, 'public': 1}
db.update_product(product_info)
# Test result is not shared yet, so no tests should return.
response = self.get_json(url)
self.assertFalse(response['results'])
# Share the test run.
db.save_test_result_meta_item(test_id, api_const.SHARED_TEST_RUN, 1)
response = self.get_json(url)
self.assertEqual(1, len(response['results']))
self.assertEqual(test_id, response['results'][0]['id'])
@mock.patch('refstack.api.utils.check_owner')
def test_delete(self, mock_check_owner):
results = json.dumps(FAKE_TESTS_RESULT)
test_response = self.post_json(self.URL, params=results)
test_id = test_response.get('test_id')
url = self.URL + test_id
mock_check_owner.return_value = True
# Test can't delete verified test run.
db.update_test_result({'id': test_id, 'verification_status': 1})
resp = self.delete(url, expect_errors=True)
self.assertEqual(403, resp.status_code)
# Test can delete verified test run.
db.update_test_result({'id': test_id, 'verification_status': 0})
resp = self.delete(url, expect_errors=True)
self.assertEqual(204, resp.status_code)
class TestResultsEndpointNoAnonymous(api.FunctionalTest):
URL = '/v1/results/'
def _generate_keypair_(self):
return rsa.generate_private_key(
public_exponent=65537,
key_size=1024,
backend=default_backend()
)
def _sign_body_(self, keypair, body):
signer = keypair.signer(padding.PKCS1v15(), hashes.SHA256())
signer.update(body)
return signer.finalize()
def _get_public_key_(self, keypair):
pubkey = keypair.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
)
return pubkey
def setUp(self):
super(TestResultsEndpointNoAnonymous, self).setUp()
self.config_fixture = config_fixture.Config()
self.CONF = self.useFixture(self.config_fixture).conf
self.CONF.api.enable_anonymous_upload = False
self.user_info = {
'openid': 'test-open-id',
'email': '<EMAIL>',
'fullname': '<NAME>'
}
db.user_save(self.user_info)
good_key = self._generate_keypair_()
self.body = json.dumps(FAKE_TESTS_RESULT).encode()
signature = self._sign_body_(good_key, self.body)
pubkey = self._get_public_key_(good_key)
x_signature = binascii.b2a_hex(signature)
self.good_headers = {
'X-Signature': x_signature,
'X-Public-Key': pubkey
}
self.pubkey_info = {
'openid': 'test-open-id',
'format': 'ssh-rsa',
'pubkey': pubkey.split()[1],
'comment': 'comment'
}
db.store_pubkey(self.pubkey_info)
bad_key = self._generate_keypair_()
bad_signature = self._sign_body_(bad_key, self.body)
bad_pubkey = self._get_public_key_(bad_key)
x_bad_signature = binascii.b2a_hex(bad_signature)
self.bad_headers = {
'X-Signature': x_bad_signature,
'X-Public-Key': bad_pubkey
}
def test_post_with_no_token(self):
"""Test results endpoint with post request."""
results = json.dumps(FAKE_TESTS_RESULT)
actual_response = self.post_json(self.URL, expect_errors=True,
params=results)
self.assertEqual(actual_response.status_code, 401)
def test_post_with_valid_token(self):
"""Test results endpoint with post request."""
results = json.dumps(FAKE_TESTS_RESULT)
actual_response = self.post_json(self.URL,
headers=self.good_headers,
params=results)
self.assertIn('test_id', actual_response)
try:
uuid.UUID(actual_response.get('test_id'), version=4)
except ValueError:
self.fail("actual_response doesn't contain test_id")
def test_post_with_invalid_token(self):
results = json.dumps(FAKE_TESTS_RESULT)
actual_response = self.post_json(self.URL,
headers=self.bad_headers,
expect_errors=True,
params=results)
self.assertEqual(actual_response.status_code, 401)
``` |
{
"source": "jovian34/j34rockpaper",
"score": 4
} |
#### File: jovian34/j34rockpaper/players.py
```python
import random
class Player(object):
def __init__(self):
self.score = 0
self.choice = 'START'
def add_to_score(self):
self.score += 1
class Human(Player):
def get_user_input(self):
options = ['r', 'p', 's']
print('choose (r)ock, (p)aper, or (s)cissors: ', end='')
self.choice = input()
if self.choice[0].lower() in options:
self.choice = self.choice[0].lower()
else:
print('Invalid choice... please try again.')
return self.get_user_input()
def process_choice(self):
if self.choice == 'r':
print('You chose rock!')
self.choice = 'rock'
if self.choice == 'p':
print('You chose paper!')
self.choice = 'paper'
if self.choice == 's':
print('You chose scissors!')
self.choice = 'scissors'
def set_user_choice(self):
self.get_user_input()
self.process_choice()
class Computer(Player):
def set_random_choice(self):
self.choice = random.randint(1, 3)
``` |
{
"source": "Jovian-Dsouza/MOC-Detector",
"score": 2
} |
#### File: MOC-Detector/src/DataModule.py
```python
import math
import random
import pytorch_lightning as pl
import torch
import os
import pickle
import cv2
import numpy as np
from torch.utils import data
from datasets.init_dataset import get_dataset
from ACT_utils.ACT_utils import tubelet_in_out_tubes, tubelet_has_gt
from MOC_utils.gaussian_hm import gaussian_radius, draw_umich_gaussian
from ACT_utils.ACT_aug import apply_distort, apply_expand, crop_image
from pprint import pprint
class UCFDataset(data.Dataset):
def __init__(self,
root_dir,
mode, # train or val
pkl_filename = 'UCF101v2-GT.pkl',
K=7,
skip=1,
downratio=4,
mean=[0.40789654, 0.44719302, 0.47026115],
std=[0.28863828, 0.27408164, 0.27809835],
resize=(288, 288), # (h, w)
max_objs=128):
super().__init__()
self.root_dir = root_dir
self.mode = mode
self.K = K
self.skip = skip # TODO implement skiping frames in getitem
self._resize_height = resize[0]
self._resize_width = resize[1]
self.down_ratio = downratio
self.mean = mean
self.std = std
self.max_objs = max_objs
pkl_file = os.path.join(root_dir, pkl_filename)
with open(pkl_file, 'rb') as fid:
pkl = pickle.load(fid, encoding='iso-8859-1')
for k in pkl:
setattr(self, ('_' if k != 'labels' else '') + k, pkl[k])
# labels, _nframes, _train_videos, _test_videos
# _gttubes, _resolution
self.num_classes = len(self.labels)
self._indices = []
video_list = self._train_videos if mode == 'train' else self._test_videos
for v in video_list:
vtubes = sum(self._gttubes[v].values(), [])
self._indices += [(v, i) for i in range(1, self._nframes[v] + 2 - self.K, self.K)
if tubelet_in_out_tubes(vtubes, i, self.K) and tubelet_has_gt(vtubes, i, self.K)]
self.init_aug_params()
def init_aug_params(self):
self._mean_values = [104.0136177, 114.0342201, 119.91659325]
self.distort_param = {
'brightness_prob': 0.5,
'brightness_delta': 32,
'contrast_prob': 0.5,
'contrast_lower': 0.5,
'contrast_upper': 1.5,
'hue_prob': 0.5,
'hue_delta': 18,
'saturation_prob': 0.5,
'saturation_lower': 0.5,
'saturation_upper': 1.5,
'random_order_prob': 0.0,
}
self.expand_param = {
'expand_prob': 0.5,
'max_expand_ratio': 4.0,
}
self.batch_samplers = [{
'sampler': {},
'max_trials': 1,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.1, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.3, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.5, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.7, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.9, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'max_jaccard_overlap': 1.0, },
'max_trials': 50,
'max_sample': 1,
}, ]
def __len__(self):
return len(self._indices)
def imagefile(self, v, i):
return os.path.join(self.root_dir, 'rgb-images', v, '{:0>5}.jpg'.format(i))
def flip_video(self, images, frame, v):
do_mirror = random.getrandbits(1) == 1
# filp the image
if do_mirror:
images = [im[:, ::-1, :] for im in images]
h, w = self._resolution[v]
gt_bbox = {}
for ilabel, tubes in self._gttubes[v].items():
for t in tubes:
if frame not in t[:, 0]:
continue
assert frame + self.K - 1 in t[:, 0]
# copy otherwise it will change the gt of the dataset also
t = t.copy()
if do_mirror:
# filp the gt bbox
xmin = w - t[:, 3]
t[:, 3] = w - t[:, 1]
t[:, 1] = xmin
boxes = t[(t[:, 0] >= frame) * (t[:, 0] < frame + self.K), 1:5]
assert boxes.shape[0] == self.K
if ilabel not in gt_bbox:
gt_bbox[ilabel] = []
# gt_bbox[ilabel] ---> a list of numpy array, each one is K, x1, x2, y1, y2
gt_bbox[ilabel].append(boxes)
return images, gt_bbox
def make_gttbox(self, frame, v):
gt_bbox = {}
for ilabel, tubes in self._gttubes[v].items():
for t in tubes:
if frame not in t[:, 0]:
continue
assert frame + self.K - 1 in t[:, 0]
t = t.copy()
boxes = t[(t[:, 0] >= frame) * (t[:, 0] < frame + self.K), 1:5]
assert boxes.shape[0] == self.K
if ilabel not in gt_bbox:
gt_bbox[ilabel] = []
gt_bbox[ilabel].append(boxes)
return gt_bbox
def resize_video(self, images, gt_bbox):
original_h, original_w = images[0].shape[:2]
output_h = self._resize_height // self.down_ratio
output_w = self._resize_width // self.down_ratio
# resize the original img and it's GT bbox
for ilabel in gt_bbox:
for itube in range(len(gt_bbox[ilabel])):
gt_bbox[ilabel][itube][:, 0] = gt_bbox[ilabel][itube][:, 0] / original_w * output_w
gt_bbox[ilabel][itube][:, 1] = gt_bbox[ilabel][itube][:, 1] / original_h * output_h
gt_bbox[ilabel][itube][:, 2] = gt_bbox[ilabel][itube][:, 2] / original_w * output_w
gt_bbox[ilabel][itube][:, 3] = gt_bbox[ilabel][itube][:, 3] / original_h * output_h
images = [cv2.resize(im, (self._resize_width, self._resize_height), interpolation=cv2.INTER_LINEAR) for im in images]
return images, gt_bbox
def normalize(self, images):
data = [np.empty((3, self._resize_height, self._resize_width), dtype=np.float32) for i in range(self.K)]
mean = np.tile(np.array(self.mean, dtype=np.float32)[:, None, None], (1, 1, 1))
std = np.tile(np.array(self.std, dtype=np.float32)[:, None, None], (1, 1, 1))
for i in range(self.K):
data[i][0:3, :, :] = np.transpose(images[i], (2, 0, 1))
data[i] = ((data[i] / 255.) - mean) / std
return data
def draw_ground_truths(self, gt_bbox):
output_h = self._resize_height // self.down_ratio
output_w = self._resize_width // self.down_ratio
hm = np.zeros((self.num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, self.K * 2), dtype=np.float32)
mov = np.zeros((self.max_objs, self.K * 2), dtype=np.float32)
index = np.zeros((self.max_objs), dtype=np.int64)
index_all = np.zeros((self.max_objs, self.K * 2), dtype=np.int64)
mask = np.zeros((self.max_objs), dtype=np.uint8)
num_objs = 0
for ilabel in gt_bbox:
for itube in range(len(gt_bbox[ilabel])):
key = self.K // 2
# key frame's bbox height and width (both on the feature map)
key_h, key_w = gt_bbox[ilabel][itube][key, 3] - gt_bbox[ilabel][itube][key, 1], gt_bbox[ilabel][itube][key, 2] - gt_bbox[ilabel][itube][key, 0]
# create gaussian heatmap
radius = gaussian_radius((math.ceil(key_h), math.ceil(key_w)))
radius = max(0, int(radius))
# ground truth bbox's center in key frame
center = np.array([(gt_bbox[ilabel][itube][key, 0] + gt_bbox[ilabel][itube][key, 2]) / 2, (gt_bbox[ilabel][itube][key, 1] + gt_bbox[ilabel][itube][key, 3]) / 2], dtype=np.float32)
center_int = center.astype(np.int32)
assert 0 <= center_int[0] and center_int[0] <= output_w and 0 <= center_int[1] and center_int[1] <= output_h
# draw ground truth gaussian heatmap at each center location
draw_umich_gaussian(hm[ilabel], center_int, radius)
for i in range(self.K):
center_all = np.array([(gt_bbox[ilabel][itube][i, 0] + gt_bbox[ilabel][itube][i, 2]) / 2, (gt_bbox[ilabel][itube][i, 1] + gt_bbox[ilabel][itube][i, 3]) / 2], dtype=np.float32)
center_all_int = center_all.astype(np.int32)
# wh is ground truth bbox's height and width in i_th frame
wh[num_objs, i * 2: i * 2 + 2] = 1. * (gt_bbox[ilabel][itube][i, 2] - gt_bbox[ilabel][itube][i, 0]), 1. * (gt_bbox[ilabel][itube][i, 3] - gt_bbox[ilabel][itube][i, 1])
# mov is ground truth movement from i_th frame to key frame
mov[num_objs, i * 2: i * 2 + 2] = (gt_bbox[ilabel][itube][i, 0] + gt_bbox[ilabel][itube][i, 2]) / 2 - \
center_int[0], (gt_bbox[ilabel][itube][i, 1] + gt_bbox[ilabel][itube][i, 3]) / 2 - center_int[1]
# index_all are all frame's bbox center position
index_all[num_objs, i * 2: i * 2 + 2] = center_all_int[1] * output_w + center_all_int[0], center_all_int[1] * output_w + center_all_int[0]
# index is key frame's boox center position
index[num_objs] = center_int[1] * output_w + center_int[0]
# mask indicate how many objects in this tube
mask[num_objs] = 1
num_objs = num_objs + 1
return hm, wh, mov, index, index_all, mask
def __getitem__(self, id):
v, frame = self._indices[id]
# Read images
images = [cv2.imread(self.imagefile(v, frame + i)).astype(np.float32) for i in range(0,self.K,self.skip)]
if self.mode == 'train':
# apply data augmentation
images, gt_bbox = self.flip_video(images, frame, v)
images = apply_distort(images, self.distort_param)
images, gt_bbox = apply_expand(images, gt_bbox, self.expand_param, self._mean_values)
images, gt_bbox = crop_image(images, gt_bbox, self.batch_samplers)
else:
# no data augmentation or flip when validation
gt_bbox = self.make_gttbox(frame, v)
# Resize the video
images, gt_bbox = self.resize_video(images, gt_bbox)
data = self.normalize(images)
hm, wh, mov, index, index_all, mask = self.draw_ground_truths(gt_bbox)
return {'input': data, 'hm': hm, 'mov': mov, 'wh': wh, 'mask': mask, 'index': index, 'index_all': index_all}
def _draw_bb(self, video, frame, index):
i = index
for label in self._gttubes[video]:
# print(label)
tubes = self._gttubes[video][label]
for tube in tubes:
x = np.where(tube[..., 0] == i)[0]
if (len(x) != 0):
x = int(x)
x1, y1, x2, y2 = tube[x, 1:]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
frame = cv2.rectangle(frame, (x1, y1), (x2, y2), color=(255, 0, 0), thickness=2)
return frame
def save_video(self, index, fps=25, drawbb=True, save_dir='.'):
video, start_frame = self._indices[index]
h, w = self._resolution[video]
save_path = video.split(os.path.sep)[-1] + '_'+ str(index) + '.mp4'
save_path = os.path.join(save_dir, save_path)
out = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
for i in range(start_frame, start_frame+self.K, self.skip):
frame = cv2.imread(self.imagefile(video, i))
if drawbb:
frame = self._draw_bb(video, frame, i)
out.write(frame)
out.release()
class VideoDataModule(pl.LightningDataModule):
def __init__(self,
root_dir,
pkl_file,
K,
resize,
batch_size,
num_workers=None,
pin_memory=False):
super().__init__()
self.root_dir = root_dir
self.pkl_file = pkl_file
self.batch_size = batch_size
self.num_workers = os.cpu_count() - 1 if num_workers is None else num_workers
self.pin_memory = pin_memory
self.Dataset = get_dataset("ucf101") #ucf101 or hmdb
self.num_classes = self.Dataset.num_classes
self.K = K
self.resize = resize
def train_dataloader(self):
return torch.utils.data.DataLoader(
UCFDataset(root_dir=self.root_dir,
pkl_filename=self.pkl_file,
mode='train',
K=self.K,
resize=self.resize,
),
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
drop_last=True,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
UCFDataset(root_dir=self.root_dir,
pkl_filename=self.pkl_file,
mode='val',
K=self.K,
resize=self.resize,
),
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
drop_last=True,
)
# TEST CASES
def test_dataset():
dataset = UCFDataset(root_dir='../data/ucf24',
mode='train',
pkl_filename='SalsaSpin.pkl',
K=7
)
print("len of dataset ", len(dataset))
data = dataset.__getitem__(0)
print(data.keys()) # 'input', 'hm', 'mov', 'wh', 'mask', 'index', 'index_all']
print(data['input'][0].shape)
print(data['hm'].shape)
print(data['mov'].shape)
print(data['wh'].shape)
# save_dir = '../SalsaSpin'
# os.makedirs(save_dir, exist_ok=True)
# for i in range(len(dataset)):
# dataset.save_video(i, fps=1, save_dir=save_dir, drawbb=True)
if __name__ == '__main__':
datamodule = VideoDataModule(root_dir='../data/ucf24',
pkl_file="SalsaSpin.pkl",
K=7,
resize=(288, 288),
batch_size=2,
num_workers=0,
pin_memory=False
)
print("Number of classes ", datamodule.num_classes)
train_dl = datamodule.train_dataloader()
print("Len of train_dl", len(train_dl))
for data in train_dl:
break
print(data.keys()) # 'input', 'hm', 'mov', 'wh', 'mask', 'index', 'index_all']
print(data['hm'].shape)
print(data['mov'].shape)
print(data['wh'].shape)
val_dl = datamodule.val_dataloader()
print("Len of val_dl", len(val_dl))
for data in val_dl:
break
print(data.keys())
```
#### File: datasets/dataset/ucf101.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from .base_dataset import BaseDataset
class UCF101(BaseDataset):
num_classes = 24
def __init__(self, root_dir, mode, split=1, K=7, ninput=1, resize=(288, 288)):
assert split == 1, "We use only the first split of UCF101"
self.ROOT_DATASET_PATH = root_dir
pkl_filename = 'UCF101v2-GT.pkl'
super(UCF101, self).__init__(mode, self.ROOT_DATASET_PATH, pkl_filename, split, K, ninput,
resize_height=resize[0], resize_width=resize[1])
def imagefile(self, v, i):
return os.path.join(self.ROOT_DATASET_PATH, 'rgb-images', v, '{:0>5}.jpg'.format(i))
def flowfile(self, v, i):
return os.path.join(self.ROOT_DATASET_PATH, 'brox-images', v, '{:0>5}.jpg'.format(i))
```
#### File: src/MOC_utils/gaussian_hm.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def draw_msra_gaussian(heatmap, center, sigma):
tmp_size = sigma * 3
mu_x = int(center[0] + 0.5)
mu_y = int(center[1] + 0.5)
w, h = heatmap.shape[0], heatmap.shape[1]
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
return heatmap
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
img_x = max(0, ul[0]), min(br[0], h)
img_y = max(0, ul[1]), min(br[1], w)
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]],
g[g_y[0]:g_y[1], g_x[0]:g_x[1]])
return heatmap
```
#### File: MOC-Detector/src/model.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
import pytorch_lightning as pl
from network.dla import MOC_DLA
from network.resnet import MOC_ResNet
from trainer.losses import MOCLoss
from MOC_utils.model import load_coco_pretrained_model
backbone = {
'dla': MOC_DLA,
'resnet': MOC_ResNet
}
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class MOC_Branch(nn.Module):
def __init__(self, input_channel, arch, head_conv, branch_info, K):
super(MOC_Branch, self).__init__()
assert head_conv > 0
wh_head_conv = 64 if arch == 'resnet' else head_conv
self.hm = nn.Sequential(
nn.Conv2d(K * input_channel, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, branch_info['hm'],
kernel_size=1, stride=1,
padding=0, bias=True))
self.hm[-1].bias.data.fill_(-2.19)
self.mov = nn.Sequential(
nn.Conv2d(K * input_channel, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, branch_info['mov'],
kernel_size=1, stride=1,
padding=0, bias=True))
fill_fc_weights(self.mov)
self.wh = nn.Sequential(
nn.Conv2d(input_channel, wh_head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(wh_head_conv, branch_info['wh'] // K,
kernel_size=1, stride=1,
padding=0, bias=True))
fill_fc_weights(self.wh)
def forward(self, input_chunk):
output = {}
output_wh = []
for feature in input_chunk:
output_wh.append(self.wh(feature))
input_chunk = torch.cat(input_chunk, dim=1)
output_wh = torch.cat(output_wh, dim=1)
output['hm'] = self.hm(input_chunk)
output['mov'] = self.mov(input_chunk)
output['wh'] = output_wh
return output
class MOC_Net(pl.LightningModule):
def __init__(self, arch, num_classes, head_conv=256, K=7, **kwargs):
super().__init__()
self.save_hyperparameters()
num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0
arch = arch[:arch.find('_')] if '_' in arch else arch
branch_info = {'hm': num_classes,
'mov': 2 * K,
'wh': 2 * K}
self.K = K
self.backbone = backbone[arch](num_layers)
self.branch = MOC_Branch(self.backbone.output_channel, arch, head_conv, branch_info, K)
# Define the loss function
self.loss = MOCLoss()
def forward(self, x):
chunk = [self.backbone(x[i]) for i in range(self.K)]
return [self.branch(chunk)]
def configure_optimizers(self):
if self.hparams.optimizer == 'sgd':
return optim.SGD(self.parameters(), self.hparams.lr, momentum = 0.9)
elif self.hparams.optimizer == 'adam':
return optim.Adam(self.parameters(), self.hparams.lr)
elif self.hparams.optimizer == 'adamax':
return optim.Adamax(self.parameters(), self.hparams.lr)
def run_epoch(self, phase, batch, batch_idx):
assert len(batch['input']) == self.K
output = self(batch['input'])[0]
loss, loss_stats = self.loss(output, batch)
self.log(f'{phase}_loss', loss, prog_bar=True, logger=True)
self.log(f'{phase}_loss_hm', loss_stats['loss_hm'], prog_bar=True, logger=True)
self.log(f'{phase}_loss_mov', loss_stats['loss_mov'], prog_bar=True, logger=True)
self.log(f'{phase}_loss_wh', loss_stats['loss_wh'], prog_bar=True, logger=True)
return loss.mean()
def training_step(self, batch, batch_idx):
return self.run_epoch("train", batch, batch_idx)
def validation_step(self, batch, batch_idx):
self.run_epoch("val", batch, batch_idx)
def test_step(self, batch, batch_idx):
self.run_epoch("test", batch, batch_idx)
if __name__ == '__main__':
num_classes = 24
K = 7
arch = 'resnet_18'
head_conv = 256
model = MOC_Net(arch, num_classes, head_conv, K, lr=0.001, optimizer='adam')
model = load_coco_pretrained_model(model, arch, print_log=False)
input_shape = (1, 3, 288, 288)
x = [torch.randn(input_shape)] * K
# y = model.backbone(x) #1, 64, 72, 72
y = model(x)
# print(len(y))
print(y[0].keys())
hm = y[0]['hm']
mov = y[0]['mov']
wh = y[0]['wh']
print(hm.shape)
print(mov.shape)
print(wh.shape)
print(model.hparams)
model.configure_optimizers()
```
#### File: network/DCNv2/my_dcn_v2.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import math
from torch import nn
from torch.nn.modules.utils import _pair
class DCN(nn.Module):
## Convlotion wrapper function
def __init__(self, in_channels, out_channels,
kernel_size, stride, padding,
dilation=1, deformable_groups=1):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride
,padding,
dilation)
def register_parameter(self):
self.conv.weight.data.zero_()
self.conv.bias.data.zero_()
def forward(self, input):
return self.conv(input)
``` |
{
"source": "Jovian-Dsouza/sahayak_bot",
"score": 2
} |
#### File: ebot_main/scripts/task5.py
```python
import rospy
import random
from math import pi, sin, cos
from geometry_msgs.msg import Point, Quaternion, Pose, PointStamped, PoseStamped
from std_msgs.msg import Header
from object_msgs.msg import ObjectPose
from std_srvs.srv import Empty
from tf.transformations import quaternion_from_euler
from tf.transformations import euler_from_quaternion
from ebot_mani.srv import *
from testNav import Ebot
from perception.srv import *
transformPose = rospy.ServiceProxy('/get_transform_pose', GetTransformPose)
transformPoint = rospy.ServiceProxy('/get_transform_point', GetTransformPoint)
def TransformPoint(point, from_frame, to_frame):
req = GetTransformPointRequest()
req.point = point
req.from_frame = from_frame
req.to_frame = to_frame
return transformPoint(req).point
# width estimate = 0.2 + width of detection window (printed in terminal)
# w_dict uses real model names
w_dict = {'coke_can': 0.27086,
'battery': 0.26500,
'glue': 0.31,
'eYFi_board': 0.5,
'adhesive': 0.267674286664,
'water_glass': 0.2,
'robot_wheels': 0.26,
'FPGA_board': 0.3
}
def printReached(name):
print(">> " + name + " Reached")
def printPicked(name):
print(">> " + name + " Picked")
def printDropped(name, dropbox):
print(">> " + name + " Dropped in " + dropbox)
def printPoint(point):
p = point
print("create_point(%0.5f, %0.5f, %0.5f)" %
(p.x, p.y, p.z))
def create_point(x, y, z):
position = Point()
position.x = x
position.y = y
position.z = z
return position
def printPose(pose):
p = pose.position
q = pose.orientation
print("create_pose_quaternion(%0.5f, %0.5f, %0.5f, %0.5f, %0.5f, %0.5f, %0.5f)" %
(p.x, p.y, p.z, q.x, q.y, q.z, q.w))
def create_pose_quaternion(x, y, z, qx, qy, qz, qw):
'''
returns a Pose() object from the given x, y, z, qx, qy , qz, qw values
'''
pose = Pose()
pose.position.x = x
pose.position.y = y
pose.position.z = z
pose.orientation.x = qx
pose.orientation.y = qy
pose.orientation.z = qz
pose.orientation.w = qw
return pose
def orient_from_euler(roll, pitch, yaw):
'''
Input is roll, pitch, yaw
output is Quaternion pose.orientation
'''
q = quaternion_from_euler(roll, pitch, yaw)
o = Quaternion()
o.x, o.y, o.z, o.w = q[0], q[1], q[2], q[3]
return o
def createPoseStamped(point):
poseStamped = PoseStamped()
poseStamped.header.frame_id = 'base_link'
poseStamped.header.stamp = rospy.Time.now()
poseStamped.pose.position = point
poseStamped.pose.orientation.x = 0
poseStamped.pose.orientation.y = -0.7071
poseStamped.pose.orientation.z = 0
poseStamped.pose.orientation.w = 0.7071
return poseStamped
def pickupObject(object_name):
'''
Note : object_name should be the real model name and not the gazebo model name
'''
ur5.openGripper()
graspPose_pub.publish(createPoseStamped(detect.dict[object_name]))
if object_name == 'eYFi_board':
# TODO need a better way of finding the object's yaw angle instead of manually giving it
return ur5.graspObjectVertical(detect.dict[object_name], width=w_dict[object_name], yaw=pi/4).success
elif object_name == 'FPGA_board':
# return ur5.graspObjectVertical(detect.dict[object_name], width=w_dict[object_name], yaw=pi/3).success
return ur5.graspObjectHorizontal(detect.dict[object_name], width=w_dict[object_name], yaw=-pi/6)
else:
# .success
return ur5.graspObjectHorizontal(detect.dict[object_name], width=w_dict[object_name], yaw=0)
class Detect():
def __init__(self):
self.dict = {}
rospy.loginfo("waiting for detect service")
rospy.wait_for_service('/ebot/detect')
self.detectTable = rospy.ServiceProxy('/ebot/detectTable', Empty)
self.detect_service = rospy.ServiceProxy('/ebot/detect', Empty)
rospy.Subscriber("/detection_info", ObjectPose, self.detect_callback)
def print_detected(self):
for item in self.dict.keys():
print(">> " + item + " Identified")
def detect(self):
self.dict = {}
self.detect_service()
rospy.sleep(2)
self.print_detected()
def detect_callback(self, msg):
self.dict[msg.name] = msg.pose.pose.position
self.frame_id = msg.pose.header.frame_id
class Ur5():
def __init__(self):
rospy.loginfo("waiting for ur5_service")
rospy.wait_for_service('ebot_mani/set_named_pose')
rospy.wait_for_service('ebot_mani/set_pose')
rospy.wait_for_service('ebot_mani/set_gripper')
rospy.wait_for_service('ebot_mani/open_gripper')
rospy.wait_for_service('ebot_mani/grasp_object_vertical')
rospy.wait_for_service('ebot_mani/grasp_object_horizontal')
rospy.wait_for_service('ebot_mani/set_pose_relative')
rospy.loginfo("connected to services")
self.go_to_named_pose = rospy.ServiceProxy(
'ebot_mani/set_named_pose', SetNamedPose)
self.print_name_pose = rospy.ServiceProxy(
'/ebot_mani/print_name_pose', SetNamedPose)
self.go_to_pose = rospy.ServiceProxy('ebot_mani/set_pose', SetPose)
self.closeGripper = rospy.ServiceProxy(
'ebot_mani/set_gripper', SetGripper)
self.openGripper = rospy.ServiceProxy('ebot_mani/open_gripper', Empty)
self.graspObjectVerticalService = rospy.ServiceProxy(
'ebot_mani/grasp_object_vertical', GraspObject)
self.graspObjectHorizontalService = rospy.ServiceProxy(
'ebot_mani/grasp_object_horizontal', GraspObject)
self.set_pose_relative = rospy.ServiceProxy(
'ebot_mani/set_pose_relative', SetPose)
self.getCurrentPoseOdom = rospy.ServiceProxy(
'ebot_mani/get_current_pose_odom', GetPose)
self.set_pose_odom = rospy.ServiceProxy(
'ebot_mani/set_pose_odom', SetPose)
self.set_pose_wrist = rospy.ServiceProxy(
'ebot_mani/set_pose_wrist', SetPose)
self.align_wrist = rospy.ServiceProxy('ebot_mani/align_wrist', Empty)
self.set_pose_wrist_no_align = rospy.ServiceProxy(
'ebot_mani/set_pose_wrist_no_align', SetPose)
def go_to_pose_wrist(self, arg_pose):
req = SetPoseRequest()
req.pose = arg_pose
return self.set_pose_wrist(req).success
def go_to_pose_wrist_no_align(self, arg_pose):
req = SetPoseRequest()
req.pose = arg_pose
return self.set_pose_wrist_no_align(req).success
def go_to_pose_relative(self, arg_pose):
req = SetPoseRequest()
req.pose = arg_pose
return self.set_pose_relative(req).success
# def graspObjectHorizontal(self, point, width, yaw=0):
# req = GraspObjectRequest()
# req.point = point
# req.width = width
# req.yaw = yaw
# return self.graspObjectHorizontalService(req)
def graspObjectVerticalOld(self, point, width, yaw):
req = GraspObjectRequest()
req.point = point
req.width = width
req.yaw = yaw
return self.graspObjectVerticalService(req).success
def graspObjectVertical(self, point, width, yaw):
'''
Given the position of object within reach it grasps it.
Argument : position (Point msg)
'''
self.align_wrist()
req = GetTransformPointRequest()
req.point = point
req.from_frame = "base_link"
req.to_frame = "wrist_3_link"
point = transformPoint(req).point
graspPose = Pose()
graspPose.position = point
graspPose.position.x -= 0.25 * sin(yaw)
graspPose.position.y -= 0.15 # + 0.1
graspPose.position.z -= 0.12 # Should be 0.25 * sin(grasp_angle)
# Pose just Above the object
flag = self.go_to_pose_wrist(graspPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
# Set grasping angle
if yaw != 0.0:
newOPose = Pose()
newOPose.orientation = orient_from_euler(0, 0, yaw)
flag = self.go_to_pose_wrist(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
newOPose = Pose()
newOPose.orientation = orient_from_euler(0.558505, 0, 0) # 32 deg
flag = self.go_to_pose_wrist_no_align(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
newOPose = Pose()
newOPose.position.z += 0.01
flag = self.go_to_pose_wrist(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
return flag
def graspObjectHorizontal(self, point, width, yaw):
'''
Given the position of object within reach it grasps it.
Argument : position (Point msg)
'''
self.align_wrist()
req = GetTransformPointRequest()
req.point = point
req.from_frame = "base_link"
req.to_frame = "wrist_3_link"
point = transformPoint(req).point
graspPose = Pose()
graspPose.position = point
graspPose.position.x -= 0.25 * sin(yaw)
graspPose.position.y -= 0.188 # + 0.1
graspPose.position.z -= 0.07 # Should be 0.25 * sin(grasp_angle)
# Pose just Above the object
flag = self.go_to_pose_wrist(graspPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
# Set grasping angle
if yaw != 0.0:
newOPose = Pose()
newOPose.orientation = orient_from_euler(0, 0, yaw) # 32 deg
flag = self.go_to_pose_wrist(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
newOPose = Pose()
newOPose.orientation = orient_from_euler(0.558505, 0, 0) # 32 deg
flag = self.go_to_pose_wrist_no_align(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
# # #Grasp
self.closeGripper(width)
rospy.sleep(1)
newOPose = Pose()
newOPose.position.z = -0.09
flag = self.go_to_pose_wrist_no_align(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
return True
def main():
# maind()
getFPGA()
ur5.openGripper()
def maind():
ur5.go_to_named_pose("navPose")
# ebot.go_to_goal('store_table_fpga')
ebot.go_to_goal('store_table')
# ebot.go_to_goal_precise('store_table')
ebot.print_current_pose()
# detect.detectTable()
# ur5.go_to_named_pose("seeObjectJ")
ur5.go_to_named_pose("fpgaPoseOdom")
detect.detect()
object_name = 'FPGA_board'
pointBaseLink = detect.dict[object_name]
graspPose_pub.publish(createPoseStamped(pointBaseLink))
pointOdom = TransformPoint(pointBaseLink, 'base_link', 'odom')
ur5.go_to_named_pose("graspVerticalJ")
pose = Pose()
pose.position.z = 0.1
ur5.go_to_pose_relative(pose)
ebot.go_to_goal_precise('store_table_close')
ebot.go_to_waypoint_relative(0.4, 0 ,0)
pointBaseLink = TransformPoint(pointOdom,'odom', 'base_link')
graspPose_pub.publish(createPoseStamped(pointBaseLink))
detect.detectTable()
rospy.sleep(0.1)
flag = ur5.graspObjectVerticalOld(
pointBaseLink, width=w_dict[object_name], yaw=pi/3)
while flag == False:
ebot.go_to_waypoint_relative(0.2, 0, 0)
detect.detect()
flag = ur5.graspObjectVerticalOld(
pointBaseLink, width=w_dict[object_name], yaw=pi/3)
ur5.openGripper()
def getFPGAnew():
ur5.go_to_named_pose("navPose")
# ebot.go_to_goal('store_table_fpga')
ebot.go_to_goal('store_table')
ebot.go_to_goal_precise('store_table_fpga')
ebot.print_current_pose()
detect.detectTable()
ur5.go_to_named_pose("seeObjectJ")
ur5.go_to_named_pose("fpgaPoseOdom")
ebot.go_to_waypoint_relative(0.25, 0, 0)
ur5.go_to_named_pose("fpgaPoseOdom")
detect.detect()
detect.detectTable()
ur5.openGripper()
object_name = 'FPGA_board'
graspPose_pub.publish(createPoseStamped(detect.dict[object_name]))
printPoint(detect.dict[object_name])
ur5.go_to_named_pose("graspVerticalJ")
pose = Pose()
pose.position.z = 0.1
ur5.go_to_pose_relative(pose)
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
while flag == False:
ebot.go_to_waypoint_relative(0.2, 0, 0)
detect.detect()
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
ebot.go_to_pose_relative(-1, 0, 0, rospy.Duration(5))
ur5.go_to_named_pose("navPose")
ebot.go_to_goal("store_exit")
def getFPGA():
ur5.go_to_named_pose("navPose")
# ebot.go_to_goal('store_table_fpga')
ebot.go_to_goal('store_table')
ebot.go_to_goal_precise('store_table_fpga')
ebot.print_current_pose()
detect.detectTable()
ur5.go_to_named_pose("seeObjectJ")
ur5.go_to_named_pose("fpgaPoseOdom")
ebot.go_to_waypoint_relative(0.25, 0, 0)
ur5.go_to_named_pose("fpgaPoseOdom")
detect.detect()
detect.detectTable()
ur5.openGripper()
object_name = 'FPGA_board'
graspPose_pub.publish(createPoseStamped(detect.dict[object_name]))
printPoint(detect.dict[object_name])
ur5.go_to_named_pose("seeObjectJ")
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
while flag == False:
ebot.go_to_waypoint_relative(0.2, 0, 0)
detect.detect()
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
ebot.go_to_pose_relative(-1, 0, 0, rospy.Duration(5))
ur5.go_to_named_pose("navPose")
ebot.go_to_goal("store_exit")
def getGlue():
ur5.go_to_named_pose("navPose")
# TODO check if in meeting Room
# ebot.go_to_goal('meeting_entry')
# print("Entered room")
ebot.print_current_pose()
ebot.go_to_goal_precise('meeting_table')
ebot.go_to_goal('meeting_table')
print("Reached Goal")
ebot.print_current_pose()
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("meetingTable")
detect.detect()
pickupObject('glue')
ur5.go_to_named_pose("navPose")
ebot.releaseBrakes()
def enter_pantry():
ur5.go_to_named_pose("navPose")
ebot.go_to_goal('pantry_entry')
ebot.go_to_waypoint_relative(1.3, 0, 0)
printReached("pantry")
def getCoke():
enter_pantry()
ebot.go_to_goal_precise('pantry_table1')
ebot.go_to_goal('pantry_table1')
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("pantryTable1Odom")
detect.detect()
pickupObject('coke_can')
ur5.go_to_named_pose("navPoseOld")
ebot.releaseBrakes()
exit_pantry()
def exit_pantry():
# ebot.go_to_goal('pantry_exit')
# ebot.go_to_waypoint_relative(1.2,0,0)
# ebot.go_to_goal('pantry_exit_old')
ebot.go_to_goal_precise('pantry_exit')
ebot.set_yaw(pi/2)
ebot.go_to_waypoint_relative(1.2, 0, 0)
def dropbox3():
ebot.go_to_goal('research_entry')
ebot.print_current_pose()
ebot.go_to_goal('research_dropbox')
ebot.print_current_pose()
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("researchDropbox")
ur5.openGripper()
rospy.sleep(0.5)
ur5.go_to_named_pose("navPose")
ebot.releaseBrakes()
def exit_meeting():
ebot.go_to_goal_precise('meeting_exit')
ebot.go_to_goal('meeting_exit')
def enter_meeting():
ebot.go_to_goal('meeting_entry')
ebot.go_to_waypoint_relative(1, 0, 0)
def dropbox2():
ebot.go_to_goal_precise('meeting_dropbox')
# ebot.go_to_goal('meeting_dropbox')
ebot.print_current_pose()
detect.detectTable()
ur5.go_to_named_pose("researchDropboxJ")
ur5.go_to_named_pose("meetingDropboxOdom")
ur5.openGripper()
rospy.sleep(0.5)
ur5.go_to_named_pose("navPose")
# ebot.go_to_pose_relative(0.95,0,0)
def enter_conference_room():
ebot.go_to_goal('conference_entry')
ebot.go_to_waypoint_relative(1, 0, 0)
def dropbox1():
ur5.go_to_named_pose("navPose")
enter_conference_room()
ebot.go_to_goal('conference_dropbox')
ebot.print_current_pose()
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("conferenceDropbox")
ur5.openGripper()
rospy.sleep(0.5)
ur5.go_to_named_pose("navPose")
ebot.releaseBrakes()
exit_conference_room()
def exit_conference_room():
ebot.set_yaw(-3*pi/2)
ebot.go_to_waypoint_relative(1, 0, 0)
def subtask1():
getFPGA()
dropbox1()
def subtask2():
getCoke()
enter_meeting()
dropbox2()
def subtask3():
getGlue()
exit_meeting()
dropbox3()
if __name__ == '__main__':
rospy.init_node('grasping_node')
graspPose_pub = rospy.Publisher("/graspPose", PoseStamped, queue_size=1)
ur5 = Ur5()
ebot = Ebot()
detect = Detect()
# main()
getFPGA()
# subtask1()
# subtask2()
# subtask3()
# ebot.releaseBrakes()
```
#### File: perception/scripts/pclFilterAndObjDectection.py
```python
from pcl_helper import *
from transform_helper import Transformer
from image_helper import ImagePub
import pcl
import rospy
from sensor_msgs.msg import PointCloud2
from geometry_msgs.msg import Point, PoseStamped
from std_msgs.msg import Header
from object_msgs.msg import ObjectPose
import numpy as np
from std_srvs.srv import Empty, EmptyResponse
from ebot_mani.srv import AddPlane, AddPlaneRequest
#SVM
from svmClassifier import Classifier
camera_matrix = np.array([[554.3827128226441, 0.0, 320.5, 0],\
[0.0, 554.3827128226441, 240.5, 0.0],\
[0.0, 0.0, 1.0, 0.0]])
def euclidiean_cluster(cloud_objects):
#Remove RGB data from PCL cloud
white_cloud = pcl.PointCloud()
points = []
for p in cloud_objects:
points.append([p[0], p[1], p[2]])
white_cloud.from_list(points)
tree = white_cloud.make_kdtree()
# Create a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance(0.015)
ec.set_MinClusterSize(25)
ec.set_MaxClusterSize(2000)
ec.set_SearchMethod(tree) # Search the k-d tree for clusters
# Extract indices for each of the discovered clusters
return ec.Extract()
def mapToImage(x , y, z):
point_3d = np.array([(x, y, z, 1.0)])
point_2d = np.matmul(camera_matrix, point_3d.T)
x = int(np.asscalar(point_2d[0] / point_2d[2]))
y = int(np.asscalar(point_2d[1] / point_2d[2]))
return x, y
def numpyToPoint(np_array):
p = Point()
p.x = np.asscalar(np_array[0])
p.y = np.asscalar(np_array[1])
p.z = np.asscalar(np_array[2])
return p
class DetectObject:
def __init__(self):
rospy.Subscriber('/camera2/depth/points2', PointCloud2, self.callback, queue_size=1)
self.detection_info_pub = rospy.Publisher("/detection_info", ObjectPose, latch=True, queue_size=1)
self.service = rospy.Service('/ebot/detect', Empty, self.detect)
rospy.Service('/ebot/detectTable', Empty, self.detectTable_cb)
self.pcl_object_pub = rospy.Publisher("/pcl_objects", PointCloud2, queue_size=1)
self.pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1, latch=True)
self.tablePose_pub = rospy.Publisher("/table_Pose", PoseStamped, queue_size=1)
self.addPlane = rospy.ServiceProxy('ebot_mani/add_plane', AddPlane)
self.clear_octomap = rospy.ServiceProxy('/clear_octomap', Empty)
def callback(self, msg): #/camera2/depth/points2
self.ros_cloud = msg
def pclFilter(self):
cloud = ros_to_pcl(self.ros_cloud)
#Voxel Grid Downsampling
vox = cloud.make_voxel_grid_filter()
LEAF_SIZE = 0.008
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
cloud_filtered = vox.filter()
# PassThrough Filter x axis
passthrough = cloud_filtered.make_passthrough_filter()
filter_axis = 'x'
passthrough.set_filter_field_name(filter_axis)
axis_min = -1
axis_max = 1
passthrough.set_filter_limits(axis_min, axis_max)
cloud_filtered = passthrough.filter()
# #PassThrough Filter y axis
passthrough = cloud_filtered.make_passthrough_filter()
filter_axis = 'y'
passthrough.set_filter_field_name(filter_axis)
axis_min = -0.2
axis_max = 0.8
passthrough.set_filter_limits(axis_min, axis_max)
cloud_filtered = passthrough.filter()
#PassThrough Filter z axis
passthrough = cloud_filtered.make_passthrough_filter()
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0 #TODO Think of better way to set limits
axis_max = 2
passthrough.set_filter_limits(axis_min, axis_max)
cloud_filtered = passthrough.filter()
#RANSAC Plane Segmentation
seg = cloud_filtered.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
max_distance = 0.01
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
#Extract inliers and outliers
self.pcl_table = cloud_filtered.extract(inliers, negative=False)
self.pcl_objects = cloud_filtered.extract(inliers, negative=True)
#Convert PCL data to ROS messages
ros_cloud_objects = pcl_to_ros(self.pcl_objects)
ros_cloud_table = pcl_to_ros(self.pcl_table)
#Publish ROS messages
self.pcl_object_pub.publish(ros_cloud_objects)
self.pcl_table_pub.publish(ros_cloud_table)
def detectTable(self):
pcl_cluster_arr = self.pcl_table.to_array()
centroid = np.mean(pcl_cluster_arr , axis=0)[:3]
centroid_point = Point()
centroid_point.x = np.asscalar(centroid[0])
centroid_point.y = np.asscalar(centroid[1])
centroid_point.z = np.asscalar(centroid[2])
centroid_point = transformer.transform_point(centroid_point, 'camera_rgb_frame2' , 'base_link')
tablePose = PoseStamped()
tablePose.header.frame_id = 'base_link'
tablePose.header.stamp = rospy.Time.now()
tablePose.pose.position = centroid_point
tablePose.pose.orientation.x = 0
tablePose.pose.orientation.y = -0.7071
tablePose.pose.orientation.z = 0
tablePose.pose.orientation.w = 0.7071
# req = AddPlaneRequest()
# req.name = "table"
# req.pose = tablePose
# self.addPlane(req)
self.tablePose_pub.publish(tablePose)
return centroid_point.z
def detectTable_cb(self, req):
self.clear_octomap()
self.pclFilter()
self.detectTable()
return EmptyResponse()
def detect(self, req):
self.clear_octomap()
self.pclFilter()
tableHeight = self.detectTable()
cloud_objects = self.pcl_objects
cluster_indices = euclidiean_cluster(cloud_objects)
imagePub.capture_image()
objPoseMsg = ObjectPose()
for index , pts_list in enumerate(cluster_indices):
pcl_cluster = cloud_objects.extract(pts_list)
pcl_cluster_arr = pcl_cluster.to_array()
centroid = np.mean(pcl_cluster_arr , axis=0)[:3]
grasp_point = Point()
grasp_point.x = np.asscalar(centroid[0])
grasp_point.y = np.asscalar(centroid[1])
grasp_point.z = np.asscalar(centroid[2])
#Transform the grasp_point to base_link
grasp_point = transformer.transform_point(grasp_point, 'camera_rgb_frame2' , 'base_link')
#check if the centroid of object is above the table
if(grasp_point.z < tableHeight):
continue
#Grasp point above the object
grasp_point.z = tableHeight + 2*(grasp_point.z-tableHeight)
print(grasp_point.z)
max_val = np.max(pcl_cluster_arr, axis=0)[:2]
min_val = np.min(pcl_cluster_arr, axis=0)[:2]
x1, y1 = mapToImage(min_val[0], min_val[1], centroid[2])
x2, y2 = mapToImage(max_val[0], max_val[1], centroid[2])
#Classifier
label = classifier.predict(pcl_cluster)
rospy.loginfo("DETECTED " + label)
imagePub.draw_rectangle_with_label([x1, y1, x2, y2], label)
imagePub.publish_image()
objPoseMsg.name = label
objPoseMsg.pose.header.frame_id = 'base_link'
objPoseMsg.pose.header.stamp = rospy.Time.now()
objPoseMsg.pose.pose.position = grasp_point
#TODO REMOVE this berfore submission
# width = np.asscalar(max_val[0] - min_val[0])
# objPoseMsg.pose.pose.orientation.w = width
# hight = np.asscalar(max_val[1] - min_val[1])
# objPoseMsg.pose.pose.orientation.z = hight
self.detection_info_pub.publish(objPoseMsg)
rospy.sleep(0.1)
#print("%d, %d, %d, %d %0.5f" % (x1, y1, x2, y2, width))
return EmptyResponse()
if __name__ == '__main__':
rospy.init_node('pclFilterAndobjDetection')
transformer = Transformer()
classifier = Classifier()
objDetector = DetectObject()
imagePub = ImagePub()
rospy.loginfo("Started Object detection")
rospy.spin()
```
#### File: perception/scripts/transform_helper.py
```python
import tf2_ros
import tf2_geometry_msgs
import rospy
class Transformer:
def __init__(self):
self.tf_buffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tf_buffer)
def transform_pose(self, input_pose, from_frame, to_frame):
pose_stamped = tf2_geometry_msgs.PoseStamped()
pose_stamped.pose = input_pose
pose_stamped.header.frame_id = from_frame
pose_stamped.header.stamp = rospy.Time.now()
try:
# ** It is important to wait for the listener to start listening. Hence the rospy.Duration(1)
output_pose_stamped = self.tf_buffer.transform(pose_stamped, to_frame, rospy.Duration(1))
return output_pose_stamped.pose
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
raise
def transform_point(self, input_point, from_frame, to_frame):
point_stamped = tf2_geometry_msgs.PointStamped()
point_stamped.point = input_point
point_stamped.header.frame_id = from_frame
point_stamped.header.stamp = rospy.Time.now()
try:
# ** It is important to wait for the listener to start listening. Hence the rospy.Duration(1)
output_point_stamped = self.tf_buffer.transform(point_stamped, to_frame, rospy.Duration(1))
return output_point_stamped.point
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
raise
```
#### File: perception/scripts/transform_service.py
```python
import rospy
from transform_helper import Transformer
from perception.srv import *
def get_transform_point_cb(req):
resp = GetTransformPointResponse()
resp.point = transformer.transform_point(req.point, req.from_frame, req.to_frame)
return resp
def get_transform_pose_cb(req):
resp = GetTransformPoseResponse()
resp.pose = transformer.transform_pose(req.pose, req.from_frame, req.to_frame)
return resp
if __name__ == '__main__':
rospy.init_node('transformer_service')
transformer = Transformer()
rospy.Service('get_transform_point', GetTransformPoint, get_transform_point_cb)
rospy.Service('get_transform_pose', GetTransformPose, get_transform_pose_cb)
rospy.loginfo('\033[94m Started Transformer service \033[0m')
rospy.spin()
``` |
{
"source": "JoviaNierenberg/project5",
"score": 3
} |
#### File: project5/test/test_kmeans.py
```python
import pytest
import numpy as np
from scipy.spatial.distance import cdist
from cluster import KMeans, make_clusters
# Write your k-means unit tests here
def test_num_labels_types_is_k():
"""
Tests that the number of clusters produced is k
"""
clusters, labels = make_clusters(k=4, scale=1)
km = KMeans(k=4)
km.fit(clusters)
created_labels = km.predict(clusters)
should_be_k = np.shape(np.unique(created_labels))[0]
assert should_be_k==4
def test_vals():
"""
Tests values for error and and two centroid values when k==7
"""
clusters, labels = make_clusters(k=4, scale=1)
km = KMeans(k=7)
km.fit(clusters)
assert km.get_error() == 1.4503012126641381
assert km.get_centroids()[0,0]==7.875495297338064
assert km.get_centroids()[6,1]==-4.171662182273182
``` |
{
"source": "JovianML/opendatasets",
"score": 3
} |
#### File: opendatasets/opendatasets/__init__.py
```python
import importlib
from opendatasets.utils.network import download_url, is_url
from opendatasets.utils.googledrive import is_google_drive_url, download_google_drive
import os
from opendatasets._version import __version__
from opendatasets.utils.kaggle_api import download_kaggle_dataset, is_kaggle_url
from opendatasets.utils.archive import extract_archive
def download(dataset_id_or_url, data_dir='.', force=False, dry_run=False, **kwargs):
# Check for a Kaggle dataset URL
if is_kaggle_url(dataset_id_or_url):
return download_kaggle_dataset(dataset_id_or_url, data_dir=data_dir, force=force, dry_run=dry_run)
# Check for Google Drive URL
if is_google_drive_url(dataset_id_or_url):
return download_google_drive(dataset_id_or_url, data_dir)
# Download a raw URL
if is_url(dataset_id_or_url):
return download_url(dataset_id_or_url, data_dir)
dataset_id = dataset_id_or_url
data_dir = os.path.join(data_dir, dataset_id_or_url)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
dataset = importlib.import_module('opendatasets.datasets.' + dataset_id)
if dry_run:
print('This is a dry run. URLs will be displayed but the files will not be downloaded.')
return dataset.download(dataset_id=dataset_id, data_dir=data_dir, dry_run=dry_run, **kwargs)
def version():
return __version__
``` |
{
"source": "JovianOrigin/drf-auth-jwt",
"score": 2
} |
#### File: drf-auth-jwt/tests/test_utils.py
```python
import json
import base64
import jwt.exceptions
from django.test import TestCase
from rest_framework_jwt import utils
from rest_framework_jwt.compat import get_user_model
from rest_framework_jwt.settings import api_settings, DEFAULTS
User = get_user_model()
def base64url_decode(input):
rem = len(input) % 4
if rem > 0:
input += b'=' * (4 - rem)
return base64.urlsafe_b64decode(input)
class UtilsTests(TestCase):
def setUp(self):
self.username = 'jpueblo'
self.email = '<EMAIL>'
self.user = User.objects.create_user(self.username, self.email)
def test_jwt_payload_handler(self):
payload = utils.jwt_payload_handler(self.user)
self.assertTrue(isinstance(payload, dict))
self.assertEqual(payload['username'], self.username)
self.assertTrue('exp' in payload)
def test_jwt_encode(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
payload_data = base64url_decode(token.split('.')[1].encode('utf-8'))
payload_from_token = json.loads(payload_data.decode('utf-8'))
self.assertEqual(payload_from_token, payload)
def test_jwt_decode(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
decoded_payload = utils.jwt_decode_handler(token)
self.assertEqual(decoded_payload, payload)
def test_jwt_response_payload(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
response_data = utils.jwt_response_payload_handler(token)
self.assertEqual(response_data, dict(token=token))
def test_jwt_decode_verify_exp(self):
api_settings.JWT_VERIFY_EXPIRATION = False
payload = utils.jwt_payload_handler(self.user)
payload['exp'] = 1
token = utils.jwt_encode_handler(payload)
utils.jwt_decode_handler(token)
api_settings.JWT_VERIFY_EXPIRATION = True
class TestAudience(TestCase):
def setUp(self):
api_settings.JWT_AUDIENCE = 'my_aud'
self.username = 'jpueblo'
self.email = '<EMAIL>'
self.user = User.objects.create_user(self.username, self.email)
return super(TestAudience, self).setUp()
def test_fail_audience_missing(self):
payload = utils.jwt_payload_handler(self.user)
del payload['aud']
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.MissingRequiredClaimError):
utils.jwt_decode_handler(token)
def test_fail_audience_wrong(self):
payload = utils.jwt_payload_handler(self.user)
payload['aud'] = 'my_aud2'
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.InvalidAudienceError):
utils.jwt_decode_handler(token)
def test_correct_audience(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
decoded_payload = utils.jwt_decode_handler(token)
self.assertEqual(decoded_payload, payload)
def tearDown(self):
api_settings.JWT_AUDIENCE = DEFAULTS['JWT_AUDIENCE']
class TestIssuer(TestCase):
def setUp(self):
api_settings.JWT_ISSUER = 'example.com'
self.username = 'jpueblo'
self.email = '<EMAIL>'
self.user = User.objects.create_user(self.username, self.email)
return super(TestIssuer, self).setUp()
def test_fail_issuer_missing(self):
payload = utils.jwt_payload_handler(self.user)
del payload['iss']
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.MissingRequiredClaimError):
utils.jwt_decode_handler(token)
def test_fail_issuer_wrong(self):
payload = utils.jwt_payload_handler(self.user)
payload['iss'] = 'example2.com'
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.InvalidIssuerError):
utils.jwt_decode_handler(token)
def test_correct_issuer(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
decoded_payload = utils.jwt_decode_handler(token)
self.assertEqual(decoded_payload, payload)
def tearDown(self):
api_settings.JWT_ISSUER = DEFAULTS['JWT_ISSUER']
``` |
{
"source": "JoviCastillo/TH-Project-1-guessing-game-",
"score": 4
} |
#### File: JoviCastillo/TH-Project-1-guessing-game-/guessing_game.py
```python
import random
highscore = []
def not_in_range(guess_it):
"""This is to check that the numbers inputted by the user are in range,
and will let the user know. If the numbers are in range then it passes.
"""
if guess_it < 1:
print('I am not thinking of negative numbers!')
elif guess_it > 10:
print('That number is way bigger than 10!')
else:
pass
def new_game(tries):
"""After the user has guessed the number correctly, the game
will ask the player if they would like to play again. Yes will start
the game again. No will exit the game. Highscore will be displayed
by the lowest amount of tries recorded.
"""
play_again = input('Would you like to play again? (Yes/No) ')
if play_again.upper() == 'YES':
highscore.append(tries)
highscore.sort
print('The highscore is {}.'.format(highscore[0]))
start_game()
elif play_again.upper() == 'NO':
exit()
else:
play_again = input('Please let me know by typing yes or no: ')
def start_game(): # title screen of the game
"""This is the start of the game which include the title screen and
is the main function that runs all the other functions as well.
"""
print('-' * 40)
print('Welcome to the Number Guessing Game!!!')
print('-' * 40)
print('I am thinking of a number between 1-10.')
random_number = random.randint(1, 10)
tries = 0
while True:
try:
guess_it = int(input('Can you guess it?: '))
except ValueError:
print('I said number, not gibberish!')
else:
while guess_it != random_number:
not_in_range(guess_it)
tries += 1
if guess_it > random_number:
print('That is too high!')
elif guess_it < random_number:
print('That is too low')
break
else:
print('You guessed it right! Your number was {}.'.format(random_number))
print('It took you {} tries.'.format(tries))
break
new_game(tries)
if __name__ == '__main__':
# Kick off the program by calling the start_game function.
start_game()
``` |
{
"source": "jovicigor/opensanctions",
"score": 2
} |
#### File: opensanctions/opensanctions/cli.py
```python
import click
import logging
from followthemoney.cli.util import write_object
from opensanctions.core import Target, Context, setup
@click.group(help="OpenSanctions ETL toolkit")
@click.option("-v", "--verbose", is_flag=True, default=False)
@click.option("-q", "--quiet", is_flag=True, default=False)
def cli(verbose=False, quiet=False):
level = logging.INFO
if quiet:
level = logging.ERROR
if verbose:
level = logging.DEBUG
setup(log_level=level)
@cli.command("dump", help="Export the entities from a target")
@click.argument("target", default=Target.ALL, type=click.Choice(Target.names()))
@click.option("-o", "--outfile", type=click.File("w"), default="-")
def dump_target(target, outfile):
target = Target.get(target)
for dataset in target.datasets:
# TODO: consolidate the data
for entity in dataset.store:
write_object(outfile, entity)
@cli.command("crawl", help="Crawl entities into the given target")
@click.argument("target", default=Target.ALL, type=click.Choice(Target.names()))
def crawl(target):
target = Target.get(target)
for dataset in target.datasets:
Context(dataset).crawl()
```
#### File: opensanctions/core/target.py
```python
import yaml
from banal import ensure_list
from ftmstore import get_dataset as get_store
from opensanctions import settings
class Target(object):
"""A target (think: Makefile target) is a unit of execution of crawlers, and
a grouping of data. There are two types: datasets (which relate to a specific
data source), and collections (which group datasets into more useful units)."""
ALL = "all"
def __init__(self, type_, file_path, config):
self.type = type_
self.file_path = file_path
self.name = config.get("name", file_path.stem)
self.title = config.get("title", self.name)
self.description = config.get("description", "")
# Collections can be part of other collections.
collections = ensure_list(config.get("collections"))
if self.name != self.ALL:
collections.append(self.ALL)
self.collections = set(collections)
@property
def store(self):
name = f"{self.type}_{self.name}"
return get_store(name, database_uri=settings.DATABASE_URI)
@classmethod
def _from_metadata(cls, file_path):
from opensanctions.core.dataset import Dataset
from opensanctions.core.collection import Collection
with open(file_path, "r") as fh:
config = yaml.load(fh, Loader=yaml.SafeLoader)
type_ = config.get("type", Dataset.TYPE)
type_ = type_.lower().strip()
if type_ == Collection.TYPE:
return Collection(file_path, config)
if type_ == Dataset.TYPE:
return Dataset(file_path, config)
@classmethod
def _load_cache(cls):
if not hasattr(cls, "_cache"):
cls._cache = {}
for glob in ("**/*.yml", "**/*.yaml"):
for file_path in settings.METADATA_PATH.glob(glob):
target = cls._from_metadata(file_path)
cls._cache[target.name] = target
return cls._cache
@classmethod
def all(cls):
return cls._load_cache().values()
@classmethod
def get(cls, name):
return cls._load_cache().get(name)
@classmethod
def names(cls):
"""An array of all target names found in the metadata path."""
return [target.name for target in cls.all()]
def to_dict(self):
return {
"name": self.name,
"type": self.type,
"title": self.title,
"description": self.description,
}
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
```
#### File: opensanctions/crawlers/au_dfat_sanctions.py
```python
import xlrd
# from xlrd.xldate import xldate_as_datetime
from collections import defaultdict
from pprint import pprint # noqa
from datetime import datetime
from normality import slugify
from followthemoney import model
def clean_reference(ref):
if isinstance(ref, (int, float)):
return int(ref)
number = ref
while len(number):
try:
return int(number)
except Exception:
number = number[:-1]
raise ValueError()
def parse_reference(context, reference, rows):
entity = context.make("LegalEntity")
entity.make_id("AUDFAT", reference)
entity.add("sourceUrl", context.dataset.url)
sanction = context.make("Sanction")
sanction.make_id("Sanction", entity.id)
sanction.add("authority", context.dataset.publisher.title)
sanction.add("entity", entity)
for row in rows:
if row.pop("type") == "Individual":
entity.schema = model.get("Person")
name = row.pop("name_of_individual_or_entity", None)
if row.pop("name_type") == "aka":
entity.add("alias", name)
else:
entity.add("name", name)
entity.add("address", row.pop("address"))
entity.add("notes", row.pop("additional_information"))
sanction.add("program", row.pop("committees"))
entity.add("nationality", row.pop("citizenship"), quiet=True)
entity.add("birthDate", row.pop("date_of_birth"), quiet=True)
entity.add("birthPlace", row.pop("place_of_birth"), quiet=True)
entity.add("status", row.pop("listing_information"), quiet=True)
control_date = int(row.pop("control_date"))
base_date = datetime(1900, 1, 1).toordinal()
dt = datetime.fromordinal(base_date + control_date - 2)
sanction.add("modifiedAt", dt)
entity.add("modifiedAt", dt)
entity.context["updated_at"] = dt.isoformat()
context.emit(entity)
context.emit(sanction)
def crawl(context):
context.fetch_artifact("source.xls", context.dataset.data.url)
xls = xlrd.open_workbook(context.get_artifact_path("source.xls"))
ws = xls.sheet_by_index(0)
headers = [slugify(h, sep="_") for h in ws.row_values(0)]
references = defaultdict(list)
for r in range(1, ws.nrows):
row = ws.row(r)
row = dict(zip(headers, row))
for header, cell in row.items():
if cell.ctype == 2:
row[header] = str(int(cell.value))
elif cell.ctype == 0:
row[header] = None
else:
row[header] = cell.value
reference = clean_reference(row.get("reference"))
references[reference].append(row)
for ref, rows in references.items():
parse_reference(context, ref, rows)
``` |
{
"source": "jovil210/lab4_logiciel",
"score": 3
} |
#### File: lab4_logiciel/server/TwitterAPI.py
```python
import requests
BEARER_TOKEN = '<KEY>'
#API_KEY_SECRET = '<KEY>'
#API_KEY = '<KEY>'
class TwitterAPI:
@staticmethod
def create_twitter_headers():
headers = {'Authorization': f'Bearer {BEARER_TOKEN}'}
return headers
@staticmethod
def create_twitter_url(keyword, max_results=10):
search_url = 'https://api.twitter.com/2/tweets/search/recent'
query_params = {
'query': keyword,
'max_results': max_results,
'expansions': 'author_id,in_reply_to_user_id,geo.place_id',
'tweet.fields': 'id,text,author_id,in_reply_to_user_id,geo,conversation_id,created_at,lang,'
'public_metrics,referenced_tweets,reply_settings,source',
'user.fields': 'id,name,username,created_at,description,public_metrics,verified',
'place.fields': 'full_name,id,country,country_code,geo,name,place_type',
'next_token': {}
}
return search_url, query_params
@staticmethod
def query_twitter_api(url, headers, params):
response = requests.request('GET', url, headers=headers, params=params)
return response.json()
``` |
{
"source": "jovillarroelb/project_fyyur",
"score": 3
} |
#### File: jovillarroelb/project_fyyur/forms.py
```python
from datetime import datetime
from flask_wtf import FlaskForm as Form
from wtforms import (
StringField,
SelectField,
SelectMultipleField,
DateTimeField,
BooleanField
)
from wtforms.fields.core import BooleanField
from wtforms.validators import DataRequired, URL
from enums import Genre, State
class VenueForm(Form):
name = StringField(
'name',
validators=[DataRequired()]
)
city = StringField(
'city',
validators=[DataRequired()]
)
state = SelectField(
'state',
validators=[DataRequired()],
choices=State.choices()
)
address = StringField(
'address',
validators=[DataRequired()]
)
phone = StringField(
'phone',
validators=[DataRequired()]
)
image_link = StringField(
'image_link'
)
website = StringField(
'website',
)
seeking_talent = BooleanField(
'seeking_talent',
)
seeking_description = StringField(
'seeking_description',
)
genres = SelectMultipleField(
'genres',
validators=[DataRequired()],
choices=Genre.choices(),
)
facebook_link = StringField(
'facebook_link',
)
def validate(self):
"""Define a custom validate method in your Form:"""
rv = Form.validate(self)
if not rv:
return False
if not set(self.genres.data).issubset(dict(Genre.choices()).keys()):
self.genres.errors.append('Invalid genre.')
return False
if self.state.data not in dict(State.choices()).keys():
self.state.errors.append('Invalid state.')
return False
# if pass validation
return True
class ArtistForm(Form):
name = StringField(
'name',
validators=[DataRequired()]
)
city = StringField(
'city',
validators=[DataRequired()]
)
state = SelectField(
'state',
validators=[DataRequired()],
choices=State.choices()
)
phone = StringField(
'phone',
validators=[DataRequired()]
)
website = StringField(
'website',
)
seeking_venue = BooleanField(
'seeking_venue',
)
seeking_description = StringField(
'seeking_description',
)
image_link = StringField(
'image_link',
)
genres = SelectMultipleField(
'genres',
validators=[DataRequired()],
choices=Genre.choices()
)
facebook_link = StringField(
'facebook_link',
)
def validate(self):
"""Define a custom validate method in your Form:"""
rv = Form.validate(self)
if not rv:
return False
if not set(self.genres.data).issubset(dict(Genre.choices()).keys()):
self.genres.errors.append('Invalid genre.')
return False
if self.state.data not in dict(State.choices()).keys():
self.state.errors.append('Invalid state.')
return False
# if pass validation
return True
class ShowForm(Form):
artist_id = StringField(
'artist_id',
validators=[DataRequired()]
)
venue_id = StringField(
'venue_id',
validators=[DataRequired()]
)
start_time = DateTimeField(
'start_time',
validators=[DataRequired()],
default= datetime.today()
)
``` |
{
"source": "Jovioluiz/IA",
"score": 4
} |
#### File: IA/Tarefas RNAs/backpropagation.py
```python
RNAs/backpropagation.py
#tarefa 5
#<NAME>
import numpy as np
#função sigmoide
def sigmoid(x):
return 1/(1 + np.exp(-x))
#arquitetura da MPL
n_input = 3
n_hidden = 4
n_output = 2
#vetor dos valores de entrada(aleatoria)
x = np.array([1, 2, 3])
target = 0.6
learnrate = 0.5
#pesos camada oculta
weights_input_hidden = np.array([[0.2, 0.1, -0.08, -0.1],
[0.6, -0.8, 0.05, 0.02],
[0.5, -0.6, -0.01, -0.07]])
#pesos camada de saida
weights_hidden_output = np.array([[0.1, -0.3],
[-0.15, 0.12],
[-0.03, 0.03],
[-0.02, 0.02]])
#camada oculta
#calcule a combinação linear de entradas e pesos sinápticos
hidden_layer_input = np.dot(x, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)
#camada de saida
output_layer_in = np.dot(hidden_layer_output, weights_hidden_output)
#aplicar a função de ativação
output = sigmoid(output_layer_in)
print('As saidas da rede são: {}'.format(output))
#backward pass
error = target - output
#calculo do termo do erro
output_error_term = error * output * (1 - output)
hidden_error = np.dot(weights_hidden_output, output_error_term)
hidden_error_term = hidden_error * hidden_layer_output * (1 - hidden_layer_output)
delta_w_h_o = learnrate * output_error_term * hidden_layer_output[:, None]
print(delta_w_h_o)
print('\n')
delta_w_i_h = learnrate * hidden_error_term * x[:, None]
print(delta_w_i_h)
print('\n')
weights_input_hidden = learnrate * delta_w_i_h
print('Peso da entrada oculta: {}'.format(weights_input_hidden))
print('\n')
weights_hidden_output = learnrate * delta_w_h_o
print('Peso da saída oculta: {}'.format(weights_hidden_output))
```
#### File: IA/Tarefas RNAs/gradienteDescendente.py
```python
import numpy as np
#função sigmoide
def sigmoid(x):
return 1/(1 + np.exp(-x))
#derivada da função sigmoide
def sigmoid_prime(x):
return sigmoid(x) * (1-sigmoid(x))
#taxa de aprendizado
learnrate = 0.5
x = np.array([1, 2, 3, 4])
y = np.array([0.5])#valor esperado
bies = 0.5
#pesos iniciais
w = np.random.randn(4)/10
epocas = 100
del_w = 0
for e in range(epocas):
h = np.dot(x, w) + bies
nn_output = sigmoid(h)
#erro calcular de rede neural
error = y - nn_output
#calcular termo do erro
erro_term = error * sigmoid_prime(h)
#atualizando o passo
del_w = learnrate * erro_term * x
#aplicando os novos pesos
w = w + del_w
print('Saída da rede: {}' .format(nn_output))
print('Erro: {}'.format(error))
if nn_output == 0.5:
break
```
#### File: IA/Tarefas RNAs/rna_mpl.py
```python
import numpy as np
#função sigmoide
def sigmoid(x):
return 1/(1 + np.exp(-x))
#arquitetura da MPL
n_input = 3
n_hidden = 4
n_output = 2
#vetor dos valores de entrada(aleatoria)
x = np.array([1, 2, 3])
#pesos camada oculta
weights_in_hidden = np.array([[0.2, 0.1, -0.9, 0.03],
[0.6, -0.8,0.9, 0.02],
[0.5, -0.6, 0.1, 0.01]])
#pesos camada de saida
weights_hidden_out = np.array([[-0.18, 0.11],
[-0.09, 0.05],
[-0.04, 0.05],
[-0.02, 0.07]])
#passagem forward pela rede
#camada oculta
#calcule a combinação linear de entradas e pesos sinápticos
#entrada camada oculta
hidden_layer_in = np.dot(x, weights_in_hidden)
#saída camada oculta
hidden_layer_out = sigmoid(hidden_layer_in)
#camada de saida
output_layer_in = np.dot(hidden_layer_out, weights_hidden_out)
#aplicar a função de ativação
output_layer_out = sigmoid(output_layer_in)
print('As saídas da rede são {}' .format(output_layer_out))
``` |
{
"source": "jovi-s/PeekingDuck",
"score": 3
} |
#### File: PeekingDuck/scripts/check_links.py
```python
import urllib.request
from pathlib import Path
from typing import Iterator, List, Tuple, Union
import markdown
from bs4 import BeautifulSoup
from texttable import Texttable
# Currently not in use because Sphinx generated html files
# do not show in the Peekingduck repo on github
#
# def get_html():
# # search path is hard coded with ref of this script locaiton
# return [
# path
# for path in walk(Path.cwd() / "docs" / "build" / "html")
# if path.suffix == ".html"
# ]
def walk(top: Path) -> Iterator[Path]:
"""Walks a given directory tree using pathlib.Path.
Returns:
(Iterator): An iterator of the file paths in the directory tree
"""
for path in top.iterdir():
if path.is_dir():
yield from walk(path)
continue
yield path.resolve()
def get_md_rst_paths() -> List[Path]:
"""Returns all .md and .rst files in the repository"""
return [path for path in walk(Path.cwd()) if path.suffix in (".md", ".rst")]
def check_for_faulty_links(
file_paths: List[Path],
) -> List[Tuple[Path, str, Union[int, Path]]]:
"""Returns faulty links from documentation files in the repository.
Parse the provided .md and .rst files for faulty hyperlinks or faulty
relative path links. For URLs, the current implementation only returns links
which give HTTP 404.
Args:
file_paths (List[Path]): File paths of all .md and .rst files in the
repository.
Returns:
(List[Tuple[Path, str, Union[int, Path]]]): A list of file paths in
which faulty links are found, the corresponding faulty links, and
the root folder/request error code.
"""
faulty_links: List[Tuple[Path, str, Union[int, Path]]] = []
for path in file_paths:
print(f"===== Checking {path}")
with open(path, "r", encoding="utf-8") as infile:
content = infile.read()
content_html = markdown.markdown(content)
soup = BeautifulSoup(content_html, "html.parser")
img_links = [
tag["src"]
for tag in soup.find_all(
lambda tag: tag.name == "img" and tag.get("src")
)
]
href_links = [
tag["href"]
for tag in soup.find_all(
lambda tag: tag.name == "a" and tag.get("href")
)
]
# "." filters out section links, split("#")[0] to filter out URI
# fragments
all_links = [
link.split("#")[0]
for link in filter(lambda link: "." in link, img_links + href_links)
]
for link in all_links:
if link.startswith("http"):
try:
# Validated the URL to start with "http"
urllib.request.urlopen(link) # nosec
except urllib.error.HTTPError as error:
# In this implementation only 404 is flagged for broken links
# 404 = http page not found error
# if statement can be removed/adjusted to flag multiple error
# codes such as 404,403,408...
if error.code == 404:
# path is the current file being parsed
# link is the link found in the current parsed file
# e.code is the execption code
rel_path = path.relative_to(Path.cwd())
faulty_links.append((rel_path, link, error.code))
else:
if not (path.parent / link).exists():
# path is the current file being parsed
# link is the link found in the current parsed file
# root is the root folder of the filepath of current file
condition = ["/peekingduck", "pipeline", "nodes"]
if link.split(".")[:3] != condition:
rel_path = path.relative_to(Path.cwd())
faulty_links.append((rel_path, link, rel_path.parent))
print(f"Checked {path}")
return faulty_links
def print_output(faulty_links: List[Tuple[Path, str, Union[int, Path]]]) -> None:
"""Displays the list of file paths and faulty links in a table
Args:
faulty_links (List[Tuple[Path, str, Union[int, Path]]]): A list of file
paths in which faulty links are found, the corresponding faulty
links, and the root folder/request error code.
"""
print("\nTable of broken links\n")
table = Texttable()
table.set_cols_width([25, 25, 20])
table.header(("Filepath", "Broken_Link", "Root_Folder / Request Error Code"))
table.add_rows(faulty_links, False)
print(table.draw())
if __name__ == "__main__":
MD_RST_PATHS = get_md_rst_paths()
print("\nCHECKING FILES")
print("-" * 50)
FAULTY_LINKS = check_for_faulty_links(sorted(MD_RST_PATHS))
print_output(FAULTY_LINKS)
``` |
{
"source": "jovi-s/supermarket-object-detection",
"score": 3
} |
#### File: src/utils/subsample_images.py
```python
import os
import shutil
import random
def subsample(main_path, sub_path, min_size=10, max_size=20, num_classes=27, seed=42):
random.seed(seed)
train_image_names = os.listdir(main_path + 'images/train/')
val_image_names = os.listdir(main_path + 'images/val/')
for mode, filenames in zip(['train', 'val'], [train_image_names, val_image_names]):
counts = [0] * num_classes
selected = set()
copied = set()
while min(counts) < min_size:
# Select a random image file
filename = random.choice(filenames)
while filename in selected:
filename = random.choice(filenames)
selected.add(filename)
# Access the correspinding label file
label_filename = 'labels/' + mode + '/' + filename.split('.')[0] + '.txt'
super_ids = []
with open(main_path + label_filename, 'r') as f:
# Count the object instances in the file
for line in f:
super_ids.append(int(line.split()[0]))
# If mode is 'train' and any category already exceeded the max_size, skip the image
exceeded = False
if mode == 'train':
for idx in super_ids:
if counts[idx] >= max_size:
exceeded = True
break
if not exceeded:
copied.add(filename)
for idx in super_ids:
counts[idx] += 1
# Copy selected image and label file to sub_path folder
shutil.copy(main_path + 'images/' + mode + '/' + filename, # source
sub_path + 'images/' + mode + '/' + filename) # destination
shutil.copy(main_path + label_filename, # source
sub_path + label_filename) # destination
print(len(copied), counts)
main_path = 'D:/mvtec_yolo/'
sub_path = 'D:/mvtec_yolo_small/'
subsample(main_path, sub_path, min_size=15, max_size=60)
``` |
{
"source": "jovit/mc-346",
"score": 4
} |
#### File: mc-346/python/exs1.py
```python
def pares(l):
return [x for x in l if x % 2 == 0]
# criar uma lista com os valores nas posicoes pares
def posicoes_pares(l):
return [x for ind, x in enumerate(l) if ind % 2 == 0]
# criar um dicionario com a contagem de cada elemento numa lista
def conta_elems(l):
dic = {}
for x in l:
if x in dic:
dic[x] = dic[x] + 1
else:
dic[x] = 1
return dic
# qual é a chave associada ao maior valor num dicionario
def encontra_chave_maior(dic):
maior=-1
maior_chave=-1
for k,x in dic.items():
if x > maior:
maior_chave = k
return maior_chave
# qual o elemento mais comum numa lista
def most_common(l):
elem_count = conta_elems(l)
common_key = encontra_chave_maior(elem_count)
return common_key
# uma lista é sublista de outra?
# dado 2 strings o fim de um é igual ao comeco do outro? (do projeto de prolog)
print(pares([1,2,3,4,5,6]))
print(posicoes_pares([1,3,5,7,9,11]))
print(conta_elems([1,1,1,2,2,2,3,3,3,4,4,4,4,4,4]))
print(encontra_chave_maior({1:50, 2:10, 3:67}))
```
#### File: mc-346/python/exs3.py
```python
import time
# decorator para imprimir o tempo de execucao
def exectime(f):
def wrapper(*args):
currenttime = time.time()
x = f(*args)
print("Time elapsed:", time.time()-currenttime)
return x
return wrapper
# decorator para construir um string com linhas para a hora e argumentos e saida de cada chamada da funcao. O string sera acessado via atributo
class calllog:
def __init__(self, f):
self.f = f
self.callstring = ""
def __call__(self,*args):
self.callstring += "hora " + str(args)
return self.f(*args)
# decorator para memoizar a funcao. Memoizacao é criar um dicionario que se lembra dos valores de entrada e de saida da funcao ja executado. Se um desses valores de entrada for re-executado, a funcao nao sera re-executada - ela apenas retorna o valor de saida memoizado
class memoize:
def __init__(self, f):
self.f = f
self.dic = {}
def __call__(self,*args):
try:
return self.dic[args]
except:
print("miss")
x = self.f(*args)
self.dic[args] = x
return x
# decorator para log argumentos e horario num arquivo (append no arquivo) dado como argumento do decorator (ver o primer on decorators)
@exectime
@calllog
def printstuff():
for x in range(10000):
print(x)
return None
@exectime
@memoize
def times2(x):
for x in range(100000000):
None
return x * 2
cc = calllog(printstuff)
printstuff()
cc()
cc()
print(cc.callstring)
print(times2(10))
print(times2(5))
print(times2(4))
print(times2(10))
``` |
{
"source": "jovobe/compose",
"score": 2
} |
#### File: tests/unit/cli_test.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import shutil
import tempfile
from io import StringIO
import docker
import py
import pytest
from docker.constants import DEFAULT_DOCKER_API_VERSION
from .. import mock
from .. import unittest
from ..helpers import build_config
from compose.cli.command import get_project
from compose.cli.command import get_project_name
from compose.cli.docopt_command import NoSuchCommand
from compose.cli.errors import UserError
from compose.cli.main import TopLevelCommand
from compose.const import IS_WINDOWS_PLATFORM
from compose.project import Project
class CLITestCase(unittest.TestCase):
def test_default_project_name(self):
test_dir = py._path.local.LocalPath('tests/fixtures/simple-composefile')
with test_dir.as_cwd():
project_name = get_project_name('.')
assert 'simple-composefile' == project_name
def test_project_name_with_explicit_base_dir(self):
base_dir = 'tests/fixtures/simple-composefile'
project_name = get_project_name(base_dir)
assert 'simple-composefile' == project_name
def test_project_name_with_explicit_uppercase_base_dir(self):
base_dir = 'tests/fixtures/UpperCaseDir'
project_name = get_project_name(base_dir)
assert 'uppercasedir' == project_name
def test_project_name_with_explicit_project_name(self):
name = 'explicit-project-name'
project_name = get_project_name(None, project_name=name)
assert 'explicit-project-name' == project_name
@mock.patch.dict(os.environ)
def test_project_name_from_environment_new_var(self):
name = 'namefromenv'
os.environ['COMPOSE_PROJECT_NAME'] = name
project_name = get_project_name(None)
assert project_name == name
def test_project_name_with_empty_environment_var(self):
base_dir = 'tests/fixtures/simple-composefile'
with mock.patch.dict(os.environ):
os.environ['COMPOSE_PROJECT_NAME'] = ''
project_name = get_project_name(base_dir)
assert 'simple-composefile' == project_name
@mock.patch.dict(os.environ)
def test_project_name_with_environment_file(self):
base_dir = tempfile.mkdtemp()
try:
name = 'namefromenvfile'
with open(os.path.join(base_dir, '.env'), 'w') as f:
f.write('COMPOSE_PROJECT_NAME={}'.format(name))
project_name = get_project_name(base_dir)
assert project_name == name
# Environment has priority over .env file
os.environ['COMPOSE_PROJECT_NAME'] = 'namefromenv'
assert get_project_name(base_dir) == os.environ['COMPOSE_PROJECT_NAME']
finally:
shutil.rmtree(base_dir)
def test_get_project(self):
base_dir = 'tests/fixtures/longer-filename-composefile'
project = get_project(base_dir)
assert project.name == 'longer-filename-composefile'
assert project.client
assert project.services
def test_command_help(self):
with mock.patch('sys.stdout', new=StringIO()) as fake_stdout:
TopLevelCommand.help({'COMMAND': 'up'})
assert "Usage: up" in fake_stdout.getvalue()
def test_command_help_nonexistent(self):
with pytest.raises(NoSuchCommand):
TopLevelCommand.help({'COMMAND': 'nonexistent'})
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason="requires dockerpty")
@mock.patch('compose.cli.main.RunOperation', autospec=True)
@mock.patch('compose.cli.main.PseudoTerminal', autospec=True)
@mock.patch.dict(os.environ)
def test_run_interactive_passes_logs_false(self, mock_pseudo_terminal, mock_run_operation):
os.environ['COMPOSE_INTERACTIVE_NO_CLI'] = 'true'
mock_client = mock.create_autospec(docker.APIClient)
mock_client.api_version = DEFAULT_DOCKER_API_VERSION
mock_client._general_configs = {}
project = Project.from_config(
name='composetest',
client=mock_client,
config_data=build_config({
'service': {'image': 'busybox'}
}),
)
command = TopLevelCommand(project)
with pytest.raises(SystemExit):
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--label': [],
'--user': None,
'--no-deps': None,
'--detach': False,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
'--use-aliases': None,
'--publish': [],
'--volume': [],
'--rm': None,
'--name': None,
'--workdir': None,
})
_, _, call_kwargs = mock_run_operation.mock_calls[0]
assert call_kwargs['logs'] is False
def test_run_service_with_restart_always(self):
mock_client = mock.create_autospec(docker.APIClient)
mock_client.api_version = DEFAULT_DOCKER_API_VERSION
mock_client._general_configs = {}
project = Project.from_config(
name='composetest',
client=mock_client,
config_data=build_config({
'service': {
'image': 'busybox',
'restart': 'always',
}
}),
)
command = TopLevelCommand(project)
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--label': [],
'--user': None,
'--no-deps': None,
'--detach': True,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
'--use-aliases': None,
'--publish': [],
'--volume': [],
'--rm': None,
'--name': None,
'--workdir': None,
})
assert mock_client.create_host_config.call_args[1]['restart_policy']['Name'] == 'always'
command = TopLevelCommand(project)
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--label': [],
'--user': None,
'--no-deps': None,
'--detach': True,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
'--use-aliases': None,
'--publish': [],
'--volume': [],
'--rm': True,
'--name': None,
'--workdir': None,
})
assert not mock_client.create_host_config.call_args[1].get('restart_policy')
def test_command_manual_and_service_ports_together(self):
project = Project.from_config(
name='composetest',
client=None,
config_data=build_config({
'service': {'image': 'busybox'},
}),
)
command = TopLevelCommand(project)
with pytest.raises(UserError):
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--label': [],
'--user': None,
'--no-deps': None,
'--detach': True,
'-T': None,
'--entrypoint': None,
'--service-ports': True,
'--use-aliases': None,
'--publish': ['80:80'],
'--rm': None,
'--name': None,
})
``` |
{
"source": "jovsa/jovsatools",
"score": 3
} |
#### File: Module-2/minitorch/modules.py
```python
# from .tensor import rand
# from .functions import matmul, conv2d
# from .module import Module, Parameter
# class tLinear(Module):
# def __init__(self, in_size, out_size):
# super().__init__()
# self.weights = Parameter(rand((in_size, out_size)))
# self.bias = Parameter(rand((out_size,)))
# self.out_size = out_size
# def forward(self, x):
# batch, in_size = x.shape
# return (
# self.weights.value.view(1, in_size, self.out_size)
# * x.view(batch, in_size, 1)
# ).sum(1).view(batch, self.out_size) + self.bias.value.view(1, self.out_size)
# class tLinear2(Module):
# def __init__(self, in_size, out_size):
# super().__init__()
# self.weights = Parameter(rand((in_size, out_size)))
# self.bias = Parameter(rand((out_size,)))
# self.out_size = out_size
# def forward(self, x):
# batch, in_size = x.shape
# return matmul(x, self.weights.value) + self.bias.value.view(1, self.out_size)
# class Dropout(Module):
# def __init__(self, rate):
# super().__init__()
# self.rate = rate
# def forward(self, x):
# return (rand(x.shape) / 2 + 0.5 < self.rate) * x
# class Conv2d(Module):
# def __init__(self, in_features, out_features, size):
# super().__init__()
# size1 = [size[0], size[1], in_features, out_features]
# size2 = [size[0], size[1], out_features]
# self.weights = Parameter(rand(size1))
# self.bias = Parameter(rand(size2))
# def forward(self, x):
# return conv2d(x, self.weights.value, self.bias.value)
# # class MaxPool2d(Module):
# # def __init__(self, in_features, out_features, size):
# # super().__init__()
# # def forward(self, x):
# # return conv2d(x, self.weights.value, self.bias.value)
```
#### File: Module-4/minitorch/nn.py
```python
import numpy as np
from .fast_ops import FastOps
from .tensor import rand, Function
from . import operators
from .tensor_data import (
count,
index_to_position,
broadcast_index,
MAX_DIMS,
)
from numba import njit, prange
max_reduce = FastOps.reduce(operators.max, -1e9)
def argmax(input, dim):
"""
Compute the argmax as a 1-hot tensor.
Args:
input (:class:`Tensor`): input tensor
dim (int): dimension to apply argmax
Returns:
:class:`Tensor` : tensor with 1 on highest cell in dim, 0 otherwise
"""
out = max_reduce(input, [dim])
return out == input
class Max(Function):
@staticmethod
def forward(ctx, input, dim):
"Forward of max should be max reduction"
out = max_reduce(input, [dim])
ctx.save_for_backward(input, out)
return out
@staticmethod
def backward(ctx, grad_output):
"Backward of max should be argmax (see above)"
input, out = ctx.saved_values
return (out == input) * grad_output, None
max = Max.apply
def softmax(input, dim):
r"""
Compute the softmax as a tensor.
.. math::
z_i = \frac{e^{x_i}}{\sum_i e^{x_i}}
Args:
input (:class:`Tensor`): input tensor
dim (int): dimension to apply argmax
Returns:
:class:`Tensor` : softmax tensor
"""
e = input.exp()
partition = e.sum(dim=dim)
return e / partition
def logsoftmax(input, dim):
r"""
Compute the log of the softmax as a tensor.
.. math::
z_i = x_i - \log \sum_i e^{x_i}
See https://en.wikipedia.org/wiki/LogSumExp#log-sum-exp_trick_for_log-domain_calculations
Args:
input (:class:`Tensor`): input tensor
dim (int): dimension to apply argmax
Returns:
:class:`Tensor` : log of softmax tensor
"""
e = input
mx = max(e, dim)
lse = (e - mx).exp().sum(dim=dim).log() + mx
return e - lse
def tile(input, kernel):
"""
Reshape an image tensor for 2D pooling
Args:
input (:class:`Tensor`): batch x channel x height x width
kernel ((int, int)): height x width of pooling
Returns:
(:class:`Tensor`, int, int) : Tensor of size batch x channel x new_height x new_width x kernel_height x kernel_width as well as the new_height and new_width value.
"""
batch, channel, height, width = input.shape
kh, kw = kernel
assert height % kh == 0
assert width % kw == 0
new_width = width // kw
new_height = height // kh
x = input.view(batch, channel, new_height, kh, new_width, kw)
x = x.permute(0, 1, 2, 4, 3, 5).contiguous()
x = x.view(batch, channel, new_height, new_width, kh * kw)
return x, new_height, new_width
def maxpool2d(input, kernel):
"""
Tiled max pooling 2D
Args:
input (:class:`Tensor`): batch x channel x height x width
kernel ((int, int)): height x width of pooling
Returns:
:class:`Tensor` : pooled tensor
"""
batch, channel, height, width = input.shape
x, new_height, new_width = tile(input, kernel)
return max(x, 4).view(batch, channel, new_height, new_width)
def avgpool2d(input, kernel):
"""
Tiled average pooling 2D
Args:
input (:class:`Tensor`): batch x channel x height x width
kernel ((int, int)): height x width of pooling
Returns:
:class:`Tensor` : pooled tensor
"""
batch, channel, height, width = input.shape
x, new_height, new_width = tile(input, kernel)
return x.mean(dim=4).view(batch, channel, new_height, new_width)
count = njit()(count)
index_to_position = njit()(index_to_position)
broadcast_index = njit()(broadcast_index)
@njit(parallel=True)
def tensor_conv2d(
output,
output_shape,
output_strides,
out_size,
input,
input_shape,
input_strides,
weight,
weight_shape,
weight_strides,
reverse,
):
"""
2D Convolution implementation.
Args:
out (array): storage for `out` tensor.
out_shape (array): shape for `out` tensor.
out_strides (array): strides for `out` tensor.
out_size (int): size of the `out` tensor.
input (array): storage for `input` tensor.
input_shape (array): shape for `input` tensor.
input_strides (array): strides for `input` tensor.
weight (array): storage for `input` tensor.
weight_shape (array): shape for `input` tensor.
weight_strides (array): strides for `input` tensor.
reverse (bool): Compute forward (False) or backward conv
"""
batch, in_channels, height, width = input_shape
_, _, kh, kw = weight_shape
for i in prange(out_size):
out_index = np.zeros(MAX_DIMS, np.int32)
count(i, output_shape, out_index)
b = out_index[0]
oc = out_index[1]
h = out_index[2]
w = out_index[3]
for dh in range(kh):
for dw in range(kw):
ih, iw = h + dh, w + dw
if reverse:
ih, iw = h - dh, w - dw
if ih < 0 or ih >= height or iw < 0 or iw >= width:
continue
for ic in range(in_channels):
s1 = input_strides
term1 = input[s1[0] * b + s1[1] * ic + s1[2] * ih + s1[3] * iw]
s2 = weight_strides
term2 = weight[s2[0] * oc + s2[1] * ic + s2[2] * dh + s2[3] * dw]
output[i] += term1 * term2
@njit(parallel=True)
def _conv2d_back_weight(
grad_output,
grad_output_shape,
grad_output_strides,
input,
input_shape,
input_strides,
grad_weight,
grad_weight_shape,
grad_weight_strides,
grad_weight_size,
):
batch, in_channels, height, width = input_shape
for i in prange(grad_weight_size):
grad_weight_index = np.zeros(MAX_DIMS, np.int32)
count(i, grad_weight_shape, grad_weight_index)
oc = grad_weight_index[0]
ic = grad_weight_index[1]
dh = grad_weight_index[2]
dw = grad_weight_index[3]
for h in range(height):
for w in range(width):
ih, iw = h - dh, w - dw
if ih < 0 or ih >= height or iw < 0 or iw >= width:
continue
for b in range(batch):
s1 = input_strides
term1 = input[s1[0] * b + s1[1] * ic + s1[2] * h + s1[3] * w]
s2 = grad_output_strides
term2 = grad_output[
s2[0] * b + s2[1] * oc + s2[2] * ih + s2[3] * iw
]
grad_weight[i] += term1 * term2
class Conv2dFun(Function):
@staticmethod
def forward(ctx, input, weight):
"""
Args:
input (:class:`tensor`) : batch x in_channel x h x w
weight (:class:`tensor`) : out_channel x in_channel x kh x kw
"""
ctx.save_for_backward(input, weight)
batch, in_channels, h, w = input.shape
out_channels, in_channels2, kh, kw = weight.shape
assert in_channels == in_channels2
output = input.zeros((batch, out_channels, h, w))
tensor_conv2d(
*output.tuple(), output.size, *input.tuple(), *weight.tuple(), False
)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_values
out_channels, in_channels, kh, kw = weight.shape
grad_weight = grad_output.zeros(weight.shape)
_conv2d_back_weight(
*grad_output.tuple(),
*input.tuple(),
*grad_weight.tuple(),
grad_weight.size,
)
grad_input = grad_output.zeros(input.shape)
new_weight = weight.permute(1, 0, 2, 3)
tensor_conv2d(
*grad_input.tuple(),
grad_input.size,
*grad_output.tuple(),
*new_weight.tuple(),
True,
)
return grad_input, grad_weight
conv2d = Conv2dFun.apply
def dropout(input, rate, ignore=False):
"""
Dropout dimensions based on random noise
Args:
input (:class:`Tensor`): input tensor
rate (float): probability of dropping out each dimension
ignore (bool): skip
Returns:
:class:`Tensor` : tensor with dropout dimensions
"""
if ignore:
return input
r = rand(input.shape)
drop = rate < r
return input * drop
```
#### File: minitorch/minitorch/operators.py
```python
import math
## Task 0.1
## Mathematical operators
def mul(x, y):
":math:`f(x, y) = x * y`"
# ASSIGN0.1
return x * y
# END ASSIGN0.1
def id(x):
":math:`f(x) = x`"
# ASSIGN0.1
return x
# END ASSIGN0.1
def add(x, y):
":math:`f(x, y) = x + y`"
# ASSIGN0.1
return x + y
# END ASSIGN0.1
def neg(x):
":math:`f(x) = -x`"
# ASSIGN0.1
return -x
# END ASSIGN0.1
def lt(x, y):
":math:`f(x) =` 1.0 if x is less than y else 0.0"
# ASSIGN0.1
return 1.0 if x < y else 0.0
# END ASSIGN0.1
def eq(x, y):
":math:`f(x) =` 1.0 if x is equal to y else 0.0"
# ASSIGN0.1
return 1.0 if x == y else 0.0
# END ASSIGN0.1
def max(x, y):
":math:`f(x) =` x if x is greater than y else y"
# ASSIGN0.1
return x if x > y else y
# END ASSIGN0.1
def sigmoid(x):
r"""
:math:`f(x) = \frac{1.0}{(1.0 + e^{-x})}`
(See `<https://en.wikipedia.org/wiki/Sigmoid_function>`_ .)
Calculate as
:math:`f(x) = \frac{1.0}{(1.0 + e^{-x})}` if x >=0 else :math:`\frac{e^x}{(1.0 + e^{x})}`
for stability.
"""
# ASSIGN0.1
if x >= 0:
return 1.0 / (1.0 + math.exp(-x))
else:
return math.exp(x) / (1.0 + math.exp(x))
# END ASSIGN0.1
def relu(x):
"""
:math:`f(x) =` x if x is greater than 0, else 0
(See `<https://en.wikipedia.org/wiki/Rectifier_(neural_networks)>`_ .)
"""
# ASSIGN0.1
return x if x > 0 else 0.0
# END ASSIGN0.1
def relu_back(x, y):
":math:`f(x) =` y if x is greater than 0 else 0"
# ASSIGN0.1
return y if x > 0 else 0.0
# END ASSIGN0.1
EPS = 1e-6
def log(x):
":math:`f(x) = log(x)`"
return math.log(x + EPS)
def exp(x):
":math:`f(x) = e^{x}`"
return math.exp(x)
def log_back(a, b):
return b / (a + EPS)
def inv(x):
":math:`f(x) = 1/x`"
return 1.0 / x
def inv_back(a, b):
return -(1.0 / a ** 2) * b
## Task 0.3
## Higher-order functions.
def map(fn):
"""
Higher-order map.
.. image:: figs/Ops/maplist.png
See `<https://en.wikipedia.org/wiki/Map_(higher-order_function)>`_
Args:
fn (one-arg function): Function from one value to one value.
Returns:
function : A function that takes a list, applies `fn` to each element, and returns a
new list
"""
# ASSIGN0.3
def _map(ls):
ret = []
for x in ls:
ret.append(fn(x))
return ret
return _map
# END ASSIGN0.3
def negList(ls):
"Use :func:`map` and :func:`neg` to negate each element in `ls`"
return map(neg)(ls)
def zipWith(fn):
"""
Higher-order zipwith (or map2).
.. image:: figs/Ops/ziplist.png
See `<https://en.wikipedia.org/wiki/Map_(higher-order_function)>`_
Args:
fn (two-arg function): combine two values
Returns:
function : takes two equally sized lists `ls1` and `ls2`, produce a new list by
applying fn(x, y) one each pair of elements.
"""
# ASSIGN0.3
def _zipWith(ls1, ls2):
ret = []
for x, y in zip(ls1, ls2):
ret.append(fn(x, y))
return ret
return _zipWith
# END ASSIGN0.3
def addLists(ls1, ls2):
"Add the elements of `ls1` and `ls2` using :func:`zipWith` and :func:`add`"
return zipWith(add)(ls1, ls2)
def reduce(fn, start):
r"""
Higher-order reduce.
.. image:: figs/Ops/reducelist.png
Args:
fn (two-arg function): combine two values
start (float): start value :math:`x_0`
Returns:
function : function that takes a list `ls` of elements
:math:`x_1 \ldots x_n` and computes the reduction :math:`fn(x_3, fn(x_2,
fn(x_1, x_0)))`
"""
# ASSIGN0.3
def _reduce(ls):
val = start
for l in ls:
val = fn(val, l)
return val
return _reduce
# END ASSIGN0.3
def sum(ls):
"""
Sum up a list using :func:`reduce` and :func:`add`.
"""
# ASSIGN0.3
return reduce(add, 0.0)(ls)
# END ASSIGN0.3
def prod(ls):
"""
Product of a list using :func:`reduce` and :func:`mul`.
"""
# ASSIGN0.3
return reduce(mul, 1.0)(ls)
# END ASSIGN0.3
``` |
{
"source": "jovsa/jTorch",
"score": 3
} |
#### File: jTorch/project/run_mnist.py
```python
from mnist import MNIST
import jtorch
import visdom
import numpy
vis = visdom.Visdom()
mndata = MNIST("data/")
images, labels = mndata.load_training()
BACKEND = jtorch.make_tensor_functions(jtorch.FastOps)
RATE = 0.01
HIDDEN = 20
BATCH = 16
class Network(jtorch.Module):
def __init__(self):
super().__init__()
self.layer1 = MMLinear(784, HIDDEN)
self.layer2 = MMLinear(HIDDEN, HIDDEN)
self.layer3 = MMLinear(HIDDEN, 1)
def forward(self, x):
# ASSIGN1.5
h = self.layer1.forward(x).relu()
h = self.layer2.forward(h).relu()
return self.layer3.forward(h).sigmoid()
# END ASSIGN2.5
class MMLinear(jtorch.Module):
def __init__(self, in_size, out_size):
super().__init__()
r = jtorch.rand((in_size, out_size))
r.type_(BACKEND)
self.weights = jtorch.Parameter(0.1 * (r - 0.5))
r = jtorch.rand((out_size,))
r.type_(BACKEND)
self.bias = jtorch.Parameter(0.1 * (r - 0.5))
self.out_size = out_size
def forward(self, x):
# ASSIGN3.5
batch, in_size = x.shape
return jtorch.matmul(
x.view(batch, 1, in_size),
self.weights.value.view(1, in_size, self.out_size),
).view(batch, self.out_size) + self.bias.value.view(1, self.out_size)
# END ASSIGN3.5
class Conv2d(jtorch.Module):
def __init__(self, in_channels, out_channels, kh, kw):
super().__init__()
r = jtorch.rand((out_channels, in_channels, kh, kw))
r.type_(BACKEND)
self.weights = jtorch.Parameter(0.1 * (r - 0.5))
r = jtorch.rand((out_channels, 1, 1))
r.type_(BACKEND)
self.bias = jtorch.Parameter(0.1 * (r - 0.5))
def forward(self, input):
out = jtorch.Conv2dFun.apply(input, self.weights.value) + self.bias.value
return out
class Network2(jtorch.Module):
def __init__(self):
super().__init__()
self.conv1 = Conv2d(1, 4, 3, 3)
self.conv2 = Conv2d(4, 8, 3, 3)
self.linear1 = MMLinear(392, 64)
self.linear2 = MMLinear(64, 1)
# For vis
self.mid = None
self.out = None
def forward(self, x):
x = self.conv1(x).relu()
self.mid = x
x = self.conv2(x).relu()
self.out = x
x = jtorch.avgpool2d(x, (4, 4))
x = self.linear1(x.view(BATCH, 392)).relu()
x = self.linear2(x).sigmoid()
return x
ys = []
X = []
for i in range(10000):
y = labels[i]
if y == 3 or y == 5:
ys.append(1.0 if y == 3 else 0.0)
X += images[i]
val_ys = []
val_x = []
for i in range(10000, 10500):
y = labels[i]
if y == 3 or y == 5:
val_ys.append(1.0 if y == 3 else 0.0)
val_x += images[i]
vis.images(
numpy.array(val_x).reshape((len(val_ys), 1, 28, 28))[:BATCH], win="val_images"
)
model = Network2()
losses = []
for epoch in range(250):
total_loss = 0.0
cur = 0
for i, j in enumerate(range(0, len(ys), BATCH)):
if len(ys) - j <= BATCH:
continue
y = jtorch.tensor(ys[j : j + BATCH], (BATCH,))
x = jtorch.tensor(X[cur : cur + 28 * 28 * BATCH], (BATCH, 28 * 28))
x.requires_grad_(True)
y.requires_grad_(True)
y.type_(BACKEND)
x.type_(BACKEND)
# Forward
out = model.forward(x.view(BATCH, 1, 28, 28)).view(BATCH)
prob = (out * y) + (out - 1.0) * (y - 1.0)
loss = -prob.log()
(loss.sum().view(1)).backward()
total_loss += loss[0]
losses.append(total_loss)
# Update
for p in model.parameters():
if p.value.grad is not None:
p.update(p.value - RATE * (p.value.grad / float(BATCH)))
if i % 10 == 0:
correct = 0
y = jtorch.tensor(val_ys[:BATCH], (BATCH,))
x = jtorch.tensor(val_x[: (BATCH * 28 * 28)], (BATCH, 28 * 28))
out = model.forward(x.view(BATCH, 1, 28, 28)).view(BATCH)
for i in range(BATCH):
if y[i] == 1 and out[i] > 0.5:
correct += 1
if y[i] == 0 and out[i] < 0.5:
correct += 1
for channel in range(4):
vis.images(
-1 * model.mid.to_numpy()[:, channel : channel + 1],
win=f"mid_images_{channel}",
opts=dict(nrow=4, caption=f"mid_images_{channel}"),
)
for channel in range(8):
vis.images(
-1 * model.out.to_numpy()[:, channel : channel + 1],
win=f"out_images_{channel}",
opts=dict(nrow=4, caption=f"out_images_{channel}"),
)
print("Epoch ", epoch, " loss ", total_loss, "correct", correct)
# im = f"Epoch: {epoch}"
# data.graph(im, lambda x: model.forward(jtorch.tensor(x, (1, 2)))[0, 0])
# plt.plot(losses, c="blue")
# vis.matplot(plt, win="loss")
total_loss = 0.0
cur += 28 * 28 * BATCH
```
#### File: jTorch/tests/test_cuda_ops.py
```python
import jtorch
import pytest
from hypothesis import given
from .strategies import tensors, shaped_tensors, assert_close
from .test_tensor import one_arg, two_arg, reduce
# TESTS are the same as test_tensor with different backend
CudaTensorFunctions = jtorch.make_tensor_functions(jtorch.CudaOps)
@given(tensors(backend=CudaTensorFunctions))
@pytest.mark.task3_3
@pytest.mark.parametrize("fn", one_arg)
def test_one_args(fn, t1):
t2 = fn[1](t1)
for ind in t2._tensor.indices():
assert_close(t2[ind], fn[1](jtorch.Scalar(t1[ind])).data)
@given(shaped_tensors(2, backend=CudaTensorFunctions))
@pytest.mark.task3_3
@pytest.mark.parametrize("fn", two_arg)
def test_two_args(fn, ts):
t1, t2 = ts
t3 = fn[1](t1, t2)
for ind in t3._tensor.indices():
assert t3[ind] == fn[1](jtorch.Scalar(t1[ind]), jtorch.Scalar(t2[ind])).data
@given(tensors(backend=CudaTensorFunctions))
@pytest.mark.task3_3
@pytest.mark.parametrize("fn", one_arg)
def test_one_derivative(fn, t1):
jtorch.grad_check(fn[1], t1)
@given(tensors(backend=CudaTensorFunctions))
@pytest.mark.task3_3
@pytest.mark.parametrize("fn", reduce)
def test_reduce(fn, t1):
jtorch.grad_check(fn[1], t1)
@given(shaped_tensors(2, backend=CudaTensorFunctions))
@pytest.mark.task3_3
@pytest.mark.parametrize("fn", two_arg)
def test_two_grad(fn, ts):
t1, t2 = ts
jtorch.grad_check(fn[1], t1, t2)
@given(shaped_tensors(2, backend=CudaTensorFunctions))
@pytest.mark.task3_3
@pytest.mark.parametrize("fn", two_arg)
def test_two_grad_broadcast(fn, ts):
t1, t2 = ts
jtorch.grad_check(fn[1], t1, t2)
# broadcast check
jtorch.grad_check(fn[1], t1.sum(0), t2)
jtorch.grad_check(fn[1], t1, t2.sum(0))
```
#### File: jTorch/tests/test_functions.py
```python
import jtorch
import pytest
from .strategies import assert_close
@pytest.mark.task3_1
def test_mm():
a = jtorch.rand((2, 3))
b = jtorch.rand((3, 4))
c = jtorch.matmul(a, b)
c2 = (a.view(2, 3, 1) * b.view(1, 3, 4)).sum(1).view(2, 4)
print(c)
print(c2)
for ind in c._tensor.indices():
assert_close(c[ind], c2[ind])
@pytest.mark.task3_1
def test_broad_mm():
a = jtorch.rand((2, 2, 3))
b = jtorch.rand((2, 3, 4))
c = jtorch.matmul(a, b)
c2 = (a.view(2, 2, 3, 1) * b.view(2, 1, 3, 4)).sum(2).view(2, 2, 4)
print(c)
print(c2)
for ind in c._tensor.indices():
assert_close(c[ind], c2[ind])
@pytest.mark.task3_4
def test_cuda_mm():
a = jtorch.rand((2, 2, 3))
b = jtorch.rand((2, 3, 4))
c = jtorch.cuda_matmul(a, b)
c2 = (a.view(2, 2, 3, 1) * b.view(2, 1, 3, 4)).sum(2).view(2, 2, 4)
print(c)
print(c2)
for ind in c._tensor.indices():
assert_close(c[ind], c2[ind])
```
#### File: jTorch/tests/test_module.py
```python
import jtorch
import pytest
VAL = 40
class Module1(jtorch.Module):
def __init__(self):
super().__init__()
self.module_a = Module2(5)
self.module_b = Module2(10)
self.parameter_a = jtorch.Parameter(VAL)
VAL_A = 50
VAL_B = 100
class Module2(jtorch.Module):
def __init__(self, extra=0):
super().__init__()
self.parameter_a = jtorch.Parameter(VAL_A)
self.parameter_b = jtorch.Parameter(VAL_B)
self.non_parameter = 10
for i in range(extra):
self.add_parameter(f"extra_parameter_{i}", None)
@pytest.mark.task0_4
def test_module():
"Check the properties of a single module"
module = Module2()
module.eval()
assert module.mode == "eval"
module.train()
assert module.mode == "train"
assert len(module.parameters()) == 2
module = Module2(10)
assert len(module.parameters()) == 12
module = Module2(5)
named_parameters = module.named_parameters()
assert named_parameters["parameter_a"].value == VAL_A
assert named_parameters["parameter_b"].value == VAL_B
assert named_parameters["extra_parameter_0"].value is None
@pytest.mark.task0_4
def test_stacked_module():
"Check the properties of a stacked module"
module = Module1()
print(module)
module.eval()
assert module.mode == "eval"
assert module.module_a.mode == "eval"
assert module.module_b.mode == "eval"
module.train()
assert module.mode == "train"
assert module.module_a.mode == "train"
assert module.module_b.mode == "train"
assert len(module.parameters()) == 1 + 7 + 12
named_parameters = module.named_parameters()
assert named_parameters["parameter_a"].value == VAL
assert named_parameters["module_a.parameter_a"].value == VAL_A
assert named_parameters["module_a.parameter_b"].value == VAL_B
assert named_parameters["module_b.parameter_a"].value == VAL_A
assert named_parameters["module_b.parameter_b"].value == VAL_B
class ModuleA1(jtorch.Module):
def __init__(self):
super().__init__()
self.p1 = jtorch.Parameter(5)
self.a = ModuleA2()
self.b = ModuleA3()
class ModuleA2(jtorch.Module):
def __init__(self):
super().__init__()
self.p2 = jtorch.Parameter(10)
class ModuleA3(jtorch.Module):
def __init__(self):
super().__init__()
self.c = ModuleA4()
class ModuleA4(jtorch.Module):
def __init__(self):
super().__init__()
self.p3 = jtorch.Parameter(15)
@pytest.mark.task0_4
def test_stacked_module2():
np = ModuleA1().named_parameters()
assert np["p1"].value == 5
assert np["a.p2"].value == 10
assert np["b.c.p3"].value == 15
``` |
{
"source": "Jovvik/Thesis",
"score": 3
} |
#### File: Jovvik/Thesis/dotdict.py
```python
from typing import Any
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
def __getattr__(*args: Any) -> Any:
val = dict.get(*args)
return dotdict(val) if type(val) is dict else val
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
``` |
{
"source": "Jow1e/jupy",
"score": 2
} |
#### File: jupy/jupy/easy_func_access.py
```python
from .tensor import apply_forward
def square(z):
return apply_forward([z], "square")
def log(z):
return apply_forward([z], "log")
def mse(y_hat, y):
return apply_forward([y_hat, y], "mse")
def cross_entropy(y_hat, y):
return apply_forward([y_hat, y], "cross_entropy")
def linear(z, weight, bias):
return apply_forward([z, weight, bias], "linear")
def sigmoid(z):
return apply_forward([z], "sigmoid")
def prelu(z, slope):
return apply_forward([z, slope], "prelu")
```
#### File: jupy/jupy/functional.py
```python
import numpy as np
# this file contains everything for calculating gradient
# e.g. functions (forward, backward)
# "gradient graph" (invisible -> we see only independent nodes as GradNode)
def add_forward(input_1, input_2):
return input_1 + input_2, None
def add_backward(grad, inputs, cache):
return grad, grad
def sub_forward(input_1, input_2):
return input_1 - input_2, None
def sub_backward(grad, inputs, cache):
return grad, -grad
def mul_forward(input_1, input_2):
return input_1 * input_2, None
def mul_backward(grad, inputs, cache):
input_1, input_2 = inputs
return grad * input_2, grad * input_1
def neg_forward(input):
return -input, None
def neg_backward(grad, inputs, cache):
return -grad,
def div_forward(input_1, input_2):
return input_1 / input_2, None
def div_backward(grad, inputs, cache):
input_1, input_2 = inputs
temp = 1 / np.square(input_2)
return grad * temp * input_2, -grad * input_1 * temp
def square_forward(input):
return np.square(input), None
def square_backward(grad, inputs, cache):
return 2 * grad * inputs[0],
def pow_forward(input, power):
out = input ** power
return out, out
def pow_backward(grad, inputs, cache):
input, power = inputs
return grad * power * (input ** (power - 1)), grad * np.log(power) * cache
def log_forward(input):
return np.log(input), None
def log_backward(grad, inputs, cache):
return grad / inputs[0],
``` |
{
"source": "jowabels/vt-shiznit",
"score": 3
} |
#### File: jowabels/vt-shiznit/vt-shiz.py
```python
import requests
import json
import os, sys
import time
conf = "config.json"
cache = []
fo = open(conf, "r")
conf_json = json.loads(fo.read())
fo.close()
def main():
mykey = conf_json["apikey"]
hash_file = sys.argv[1]
if not os.path.exists(hash_file):
print "The text file you provided does not exists!"
sys.exit()
outfile = r"{0}\{1}_output".format(os.path.dirname(hash_file), os.path.basename(hash_file))
cache = {}
with open(hash_file, "r") as fa, open(outfile, "a") as out:
for line in fa:
myline = line.strip().split(",")
file_path = myline[0]
file_sha1 = myline[-1]
#time.sleep(1)
try:
if file_sha1 not in cache:
params = {"apikey" : mykey, "resource" : "{0}".format(file_sha1)}
response = requests.get("https://www.virustotal.com/vtapi/v2/file/report", params=params)
if response.status_code == 200:
json_response = response.json()
if json_response["response_code"]:
print "{0},{1},{2},{3}/{4}\n".format(file_path, file_sha1, json_response["scan_date"], json_response["positives"], json_response["total"])
out.write("{0},{1},{2},{3}/{4}\n".format(file_path, file_sha1, json_response["scan_date"], json_response["positives"], json_response["total"]))
cache[file_sha1] = json_response
else:
print "{0},{1},NONE,NONE\n".format(file_path, file_sha1)
out.write("{0},{1},NONE,NONE\n".format(file_path, file_sha1))
else:
print "Request HTTP code: {0}".format(response.status_code)
pass
else:
print "{0},{1},{2}/{3}\n".format(file_path, file_sha1, cache[file_sha1]["positives"], cache[file_sha1]["total"])
out.write("{0},{1},{2}/{3}\n".format(file_path, file_sha1, cache[file_sha1]["positives"], cache[file_sha1]["total"]))
except Exception as e:
print e
pass
outjson = r"{0}\{1}_json".format(os.path.dirname(hash_file), os.path.basename(hash_file))
with open(outjson, "w") as oj:
json.dump(cache, oj)
if __name__ == "__main__":
if len(sys.argv) != 2:
print "\nUsage:\n\tpython vt-shiz.py [path to text file of hashes]\n"
sys.exit()
main()
``` |
{
"source": "JoWagnerAtLimagoDe/RUVPython",
"score": 3
} |
#### File: Dekorator/Dekorator/Dekorator.py
```python
def my_decorator(func):
def wrapper():
print("before Advice")
func()
print("After Returning Advice")
return wrapper
@my_decorator
def say_muh():
print("Muh")
#say_muh = my_decorator(say_muh)
say_muh()
def log_decorator(func):
def wrapper(*args, **kwargs):
print("log it")
retval = func(*args, **kwargs)
print("after log")
return retval
return wrapper
@log_decorator
def add(a,b):
return a + b
@log_decorator
def sub(a,b):
return a - b
print(add(3,4))
```
#### File: Nimmspiel/Games/TakeGameModul.py
```python
from Games.Game import Game
class TakeGame(Game):
def __init__(self):
self.__steine = 23
self.__spielende = False
def play(self):
while not self.__spielende:
self.__exceuteTurns ()
def __exceuteTurns(self):
self.__spielerzug()
self.__computerzug()
def __spielerzug(self):
while True:
zug = int(input("Es gibt {steine} Steine. Bitte nehmen Sie 1,2 oder 3.".format(steine = self.__steine)))
if zug >= 1 and zug <= 3:
break
print("Ungültiger Zug")
self.__steine -= zug
def __computerzug(self):
if self.__steine <= 0:
print("Du Loser")
self.__spielende = True
return
if self.__steine == 1:
print("Du hast nur Glück gehabt")
self.__spielende = True
return
zuege = (3,1,1,2)
zug = zuege[self.__steine % 4]
print ("Computer nimmt {zug} Steine".format(zug = zug))
self.__steine -= zug
``` |
{
"source": "Jo-wang/disentangle",
"score": 3
} |
#### File: disvae/models/encoders.py
```python
import numpy as np
import torch
from torch import nn
# import torch.functional as F
import torch.nn.functional as F
# ALL encoders should be called Enccoder<Model>
def get_encoder(model_type):
model_type = model_type.lower().capitalize()
return eval("Encoder{}".format(model_type))
class EncoderBurgess(nn.Module):
def __init__(self, img_size,
latent_dim=10):
r"""Encoder of the model proposed in [1].
Parameters
----------
img_size : tuple of ints
Size of images. E.g. (1, 32, 32) or (3, 64, 64).
latent_dim : int
Dimensionality of latent output.
Model Architecture (transposed for decoder)
------------
- 4 convolutional layers (each with 32 channels), (4 x 4 kernel), (stride of 2)
- 2 fully connected layers (each of 256 units)
- Latent distribution:
- 1 fully connected layer of 20 units (log variance and mean for 10 Gaussians)
References:
[1] Burgess, <NAME>., et al. "Understanding disentangling in
$\beta$-VAE." arXiv preprint arXiv:1804.03599 (2018).
"""
super(EncoderBurgess, self).__init__()
# Layer parameters
hid_channels = 32
kernel_size = 4
hidden_dim = 256
self.latent_dim = latent_dim
self.img_size = img_size
# Shape required to start transpose convs
self.reshape = (hid_channels, kernel_size, kernel_size)
n_chan = self.img_size[0]
# Convolutional layers
cnn_kwargs = dict(stride=2, padding=1)
self.conv1 = nn.Conv2d(n_chan, hid_channels, kernel_size, **cnn_kwargs)
self.conv2 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.conv3 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
# If input image is 64x64 do fourth convolution
if self.img_size[1] == self.img_size[2] == 64:
self.conv_64 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
# Fully connected layers
self.lin1 = nn.Linear(np.product(self.reshape), hidden_dim)
self.lin2 = nn.Linear(hidden_dim, hidden_dim)
# Fully connected layers for mean and variance
self.mu_logvar_gen = nn.Linear(hidden_dim, self.latent_dim * 2)
def forward(self, x):
batch_size = x.size(0)
# Convolutional layers with ReLu activations
x = torch.relu(self.conv1(x))
x = torch.relu(self.conv2(x))
x = torch.relu(self.conv3(x))
if self.img_size[1] == self.img_size[2] == 64:
x = torch.relu(self.conv_64(x))
# Fully connected layers with ReLu activations
x = x.view((batch_size, -1))
x = torch.relu(self.lin1(x))
x = torch.relu(self.lin2(x))
# Fully connected layer for log variance and mean
# Log std-dev in paper (bear in mind)
mu_logvar = self.mu_logvar_gen(x)
mu, logvar = mu_logvar.view(-1, self.latent_dim, 2).unbind(-1)
return mu, logvar
class ConvEncoder(nn.Module):
def __init__(self, output_dim): # latent output dimensions
super(ConvEncoder, self).__init__()
self.latent_dim = output_dim
self.conv1 = nn.Conv2d(3, 64, kernel_size=5, stride=1, padding=2)
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=5, stride=1, padding=2)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2)
self.bn3 = nn.BatchNorm2d(128)
self.fc1 = nn.Linear(8192, 3072)
self.bn1_fc = nn.BatchNorm1d(3072)
self.fc2 = nn.Linear(3072, 2048)
self.bn2_fc = nn.BatchNorm1d(2048)
self.mu_logvar_gen = nn.Linear(2048, output_dim * 2)
# setup the non-linearity
self.act = nn.LeakyReLU(inplace=True)
def forward(self, inputs):
assert len(inputs.shape) == 4
batch_size, channel, width, height = inputs.size()
h = inputs.contiguous().view(-1, channel, width, height)
h = F.max_pool2d(self.act(self.bn1(self.conv1(h))), stride=2, kernel_size=3, padding=1)
h = F.max_pool2d(self.act(self.bn2(self.conv2(h))), stride=2, kernel_size=3, padding=1)
h = self.act(self.bn3(self.conv3(h)))
# [CHECK] did not add dropout so far
h = h.view(batch_size, -1)
h = self.act(self.bn1_fc(self.fc1(h)))
h = self.act(self.bn2_fc(self.fc2(h)))
mu_logvar = self.mu_logvar_gen(h)
outputs = mu_logvar.view(batch_size, self.latent_dim, 2).unbind(-1)
return outputs
class DomainEncoder(nn.Module):
def __init__(self, num_domains, output_dim):
super(DomainEncoder, self).__init__()
self.latent_dim = output_dim
self.embed = nn.Embedding(num_domains, 512)
self.bn = nn.BatchNorm1d(512)
self.mu_logvar_gen = nn.Linear(512, output_dim * 2)
# setup the non-linearity
self.act = nn.LeakyReLU(inplace=True)
def forward(self, inputs):
batch_size = inputs.size()[0]
# inputs.cuda()
h = self.act(self.bn(self.embed(inputs)))
mu_logvar = self.mu_logvar_gen(h)
outputs = mu_logvar.view(batch_size, self.latent_dim, 2).unbind(-1)
return outputs
``` |
{
"source": "Jo-wang/LDBE",
"score": 2
} |
#### File: LDBE/trainer/ldbe_trainer.py
```python
import torch
from utils.optimize import adjust_learning_rate
from .base_trainer import BaseTrainer
from utils.flatwhite import *
from easydict import EasyDict as edict
import os.path as osp
from dataset import dataset
import neptune
import math
from PIL import Image
from utils.meters import AverageMeter, GroupAverageMeter
import torchvision.transforms.functional as tf
import torch.nn.functional as F
import operator
import pickle
import random
import copy
from utils.kmeans import kmeans_cluster
from utils.func import Acc, thres_cb_plabel, gene_plabel_prop, mask_fusion
from utils.pool import Pool
from utils.flatwhite import *
from trainer.base_trainer import *
from utils.shot import *
import time
criterion_nll = nn.NLLLoss()
def Entropy(input_):
bs = input_.size(0)
epsilon = 1e-7
entropy = -input_ * torch.log(input_ + epsilon)
entropy = torch.sum(entropy, dim=1)
return entropy
class EMA(object):
def __init__(self, model, alpha):
self.step = 0
self.model = copy.deepcopy(model)
self.alpha = alpha
def update(self, model):
decay = min(1 - 1 / (self.step + 1), self.alpha)
for ema_param, param in zip(self.model.parameters(), model.parameters()):
ema_param.data = decay * ema_param.data + (1 - decay) * param.data
self.step += 1
def generate_class_mask(pseudo_labels, pseudo_labels2):
labels = torch.unique(pseudo_labels) # all unique labels
labels_select = labels[torch.randperm(len(labels))][:len(labels) // 2] # randomly select half of labels
mask = (pseudo_labels.unsqueeze(-1) == labels_select).any(-1)
return mask.float()
def generate_unsup_data(data, target, logits=None):
batch_size, _, im_h, im_w = data.shape
device = data.device
new_data = []
new_target = []
new_logits = []
for i in range(batch_size):
mix_mask = generate_class_mask(target[i], target[(i + 1)% batch_size]).to(device)
new_data.append((data[i] * mix_mask + data[(i + 1) % batch_size] * (1 - mix_mask)).unsqueeze(0))
new_target.append((target[i] * mix_mask + target[(i + 1) % batch_size] * (1 - mix_mask)).unsqueeze(0))
new_logits.append((logits[i] * mix_mask + logits[(i + 1) % batch_size] * (1 - mix_mask)).unsqueeze(0))
new_data, new_target, new_logits = torch.cat(new_data), torch.cat(new_target), torch.cat(new_logits)
return new_data, new_target.long() , new_logits
def compute_unsupervised_loss(predict, target, logits, strong_threshold):
batch_size = predict.shape[0]
valid_mask = (target >= 0).float() # only count valid pixels
weighting = logits.view(batch_size, -1).ge(strong_threshold).sum(-1) / valid_mask.view(batch_size, -1).sum(-1)
loss = F.cross_entropy(predict, target, reduction='none', ignore_index=-1)
weighted_loss = torch.mean(torch.masked_select(weighting[:, None, None] * loss, loss > 0))
return weighted_loss
class Trainer(BaseTrainer):
def __init__(self, model_stu, config, writer):
self.model = model_stu
self.model.train()
self.ema = EMA(self.model,0.99)
self.config = config
self.writer = writer
def entropy_loss(self, p):
p = F.softmax(p, dim=1)
log_p = F.log_softmax(p, dim=1)
loss = -torch.sum(p * log_p, dim=1)
return loss
def dis_iter(self, batch):
img_s, label_s, _, _, name = batch
b, c, h, w = img_s.shape
img_s = img_s.cuda()
label_s = label_s.long().cuda()
with torch.no_grad():
pred_u, _ = self.ema.model(img_s)
pseudo_logits, pseudo_labels = torch.max(torch.softmax(pred_u, dim=1), dim=1)
pseudo_labels[label_s != 255] = label_s[label_s != 255] # use selected class-balanced label
pseudo_logits[label_s != 255] = 1.0
train_u_aug_data, train_u_aug_label, train_u_aug_logits = \
generate_unsup_data(img_s, pseudo_labels, pseudo_logits)
pred_stu, feat_stu = self.model(train_u_aug_data)
loss_s = compute_unsupervised_loss(pred_stu, train_u_aug_label, train_u_aug_logits, 0.97)
loss = loss_s
self.losses.loss_s = loss_s
loss.backward()
def iter(self, batch, r):
img_s, label_s, _, _, name = batch
b, c, h, w = img_s.shape
pred_s = self.model.forward(img_s.cuda())[0]
label_s = label_s.long().cuda()
if self.config.method == 'simsfss':
pred_s = pred_s.permute(0, 2, 3, 1).contiguous().view(-1, self.config.num_classes)
pred_s_softmax = F.softmax(pred_s, -1)
label_s = label_s.view(-1)
loss_s = F.cross_entropy(pred_s, label_s, ignore_index=255)
loss_e = self.entropy_loss(pred_s)
loss_e = loss_e.mean()
self.losses.loss_source = loss_s
self.losses.loss_entropy = loss_e
width = 3
k = self.config.num_classes // 2 + random.randint(-width, width)
_, labels_neg = torch.topk(pred_s_softmax, k, dim=1, sorted=True)
s_neg = torch.log(torch.clamp(1. - pred_s_softmax, min=1e-5, max=1.))
labels_neg = labels_neg[:, -1].squeeze().detach()
loss_neg = criterion_nll(s_neg, labels_neg)
self.losses.loss_neg = loss_neg
loss = loss_s + 1 * loss_e + 1 * loss_neg
loss.backward()
def train(self):
if self.config.neptune:
neptune.init(project_qualified_name="solacex/segmentation-DA")
neptune.create_experiment(params=self.config, name=self.config["note"])
if self.config.resume:
self.resume()
else:
self.round_start = 0
for r in range(self.round_start, self.config.round):
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
np.random.seed(1234)
random.seed(1234)
self.class_num = 19
self.model = self.model.train()
self.source_all = get_list(self.config.gta5.data_list)
self.target_all = get_list(self.config.cityscapes.data_list)
if self.config.method == 'ld' or self.config.method == "be":
start = time.clock()
print("cb_prop:{}".format(self.config.cb_prop))
self.cb_thres = self.gene_thres(self.config.cb_prop)
for i in self.cb_thres:
if self.cb_thres[i] > 0.999999999:
self.cb_thres[i] = 0.999999999
print(self.cb_thres)
self.save_pred(r)
self.plabel_path = osp.join(self.config.plabel, self.config.note, str(r))
# self.plabel_path = osp.join(self.config.plabel, self.config.note, str(r))
end = time.clock()
print('Running time: %s Seconds' % (end - start))
self.config.cb_prop += 0.05
else:
self.plabel_path = None
self.optim = torch.optim.SGD(
self.model.optim_parameters(self.config.learning_rate),
lr=self.config.learning_rate,
momentum=self.config.momentum,
weight_decay=self.config.weight_decay,
)
self.loader, _ = dataset.init_target_dataset(self.config, plabel_path=self.plabel_path,
target_selected=self.target_all)
self.config.num_steps = 5000
cu_iter = 0
self.gamma = 1.0 * (r + 1)
miou = self.validate()
for epoch in range(self.config.epochs):
for i_iter, batch in tqdm(enumerate(self.loader)):
# self.save_model('STNTHIA_source_only',0,0)
# print('done')
# self.model.module.disable_train()
# miou = self.validate()
cu_step = epoch * len(self.loader) + i_iter
self.model = self.model.train()
# self.model.module.enable_train()
self.losses = edict({})
self.optim.zero_grad()
adjust_learning_rate(self.optim, cu_step, self.config)
if self.config.method == 'ld':
self.iter(batch, r)
elif self.config.method == 'be':
self.dis_iter(batch)
self.optim.step()
self.ema.update(self.model)
if i_iter % self.config.print_freq == 0:
self.print_loss(i_iter)
if i_iter % self.config.val_freq == 0 and i_iter != 0:
# self.model.module.disable_train()
miou = self.validate()
if i_iter % self.config.save_freq == 0 and i_iter != 0:
self.save_model(self.config.source, cu_step, miou)
miou = self.validate()
self.config.learning_rate = self.config.learning_rate / (math.sqrt(2))
if self.config.neptune:
neptune.stop()
def resume(self):
iter_num = self.config.init_weight[-5] # .split(".")[0].split("_")[1]
iter_num = int(iter_num)
self.round_start = int(math.ceil((iter_num + 1) / self.config.epochs))
print("Resume from Round {}".format(self.round_start))
if self.config.lr_decay == "sqrt":
self.config.learning_rate = self.config.learning_rate / (
(math.sqrt(2)) ** self.round_start
)
def gene_thres(self, prop, num_cls=19):
print('[Calculate Threshold using config.cb_prop]') # r in section 3.3
probs = {}
freq = {}
loader = dataset.init_test_dataset(self.config, self.config.target, set="train", selected=self.target_all,
batchsize=1)
for index, batch in tqdm(enumerate(loader)):
img, label, _, _, _ = batch
with torch.no_grad():
pred = F.softmax(self.model.forward(img.cuda())[0], dim=1)
pred_probs = pred.max(dim=1)[0]
pred_probs = pred_probs.squeeze()
pred_label = torch.argmax(pred, dim=1).squeeze()
for i in range(num_cls):
cls_mask = pred_label == i
cnt = cls_mask.sum()
if cnt == 0:
continue
cls_probs = torch.masked_select(pred_probs, cls_mask)
cls_probs = cls_probs.detach().cpu().numpy().tolist()
cls_probs.sort()
if i not in probs:
probs[i] = cls_probs[::5] # reduce the consumption of memory
else:
probs[i].extend(cls_probs[::5])
growth = {}
thres = {}
for k in probs.keys():
cls_prob = probs[k]
cls_total = len(cls_prob)
freq[k] = cls_total
cls_prob = np.array(cls_prob)
cls_prob = np.sort(cls_prob)
index = int(cls_total * prop)
cls_thres = cls_prob[-index]
cls_thres2 = cls_prob[index]
thres[k] = cls_thres
print(thres)
return thres
def save_pred(self, round):
# Using the threshold to generate pseudo labels and save
print("[Generate pseudo labels]")
loader = dataset.init_test_dataset(self.config, self.config.target, set="train", selected=self.target_all)
interp = nn.Upsample(size=(1024, 2048), mode="bilinear", align_corners=True)
self.plabel_path = osp.join(self.config.plabel, self.config.note, str(round))
mkdir(self.plabel_path)
self.config.target_data_dir = self.plabel_path
self.pool = Pool() # save the probability of pseudo labels for the pixel-wise similarity matchinng, which is detailed around Eq. (9)
accs = AverageMeter() # Counter
props = AverageMeter() # Counter
cls_acc = GroupAverageMeter() # Class-wise Acc/Prop of Pseudo labels
self.mean_memo = {i: [] for i in range(self.config.num_classes)}
with torch.no_grad():
for index, batch in tqdm(enumerate(loader)):
image, label, _, _, name = batch
label = label.cuda()
img_name = name[0].split("/")[-1]
dir_name = name[0].split("/")[0]
img_name = img_name.replace("leftImg8bit", "gtFine_labelIds")
temp_dir = osp.join(self.plabel_path, dir_name)
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
output = self.model.forward(image.cuda())[0]
output = interp(output)
# pseudo labels selected by glocal threshold
mask, plabel = thres_cb_plabel(output, self.cb_thres, num_cls=self.config.num_classes)
# pseudo labels selected by local threshold
if round >= 0:
local_prop = self.config.cb_prop
mask2, plabel2 = gene_plabel_prop(output, local_prop)
mask, plabel = mask_fusion(output, mask, mask2)
self.pool.update_pool(output, mask=mask.float())
acc, prop, cls_dict = Acc(plabel, label, num_cls=self.config.num_classes)
cnt = (plabel != 255).sum().item()
accs.update(acc, cnt)
props.update(prop, 1)
cls_acc.update(cls_dict)
plabel = plabel.view(1024, 2048)
plabel = plabel.cpu().numpy()
plabel = np.asarray(plabel, dtype=np.uint8)
plabelz = Image.fromarray(plabel)
plabelz.save("%s/%s.png" % (temp_dir, img_name.split(".")[0]))
print('The Accuracy :{:.2%} and proportion :{:.2%} of Pseudo Labels'.format(accs.avg.item(), props.avg.item()))
if self.config.neptune:
neptune.send_metric("Acc", accs.avg)
neptune.send_metric("Prop", props.avg)
def save_model(self, source, iter, miou):
name = str(iter) + "_miou" + str(miou)
tmp_name = "_EntMin_".join((source, str(name))) + ".pth"
torch.save(self.model.state_dict(), osp.join(self.config["snapshot"], tmp_name))
``` |
{
"source": "Jo-wang/ProDA",
"score": 3
} |
#### File: ProDA/data/__init__.py
```python
import importlib
import numpy as np
import torch.utils.data
from data.base_dataset import BaseDataset
from data.augmentations import *
from data.DataProvider import DataProvider
# import data.cityscapes_dataset
def find_dataset_using_name(name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "data." + name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = name + '_loader'
for _name, cls in datasetlib.__dict__.items():
if _name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt, logger):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt, logger)
dataset = data_loader.load_data()
return dataset
def get_composed_augmentations(opt):
return Compose([RandomSized(opt.resize),
RandomCrop(opt.rcrop),
RandomHorizontallyFlip(opt.hflip)])
class CustomDatasetDataLoader():
def __init__(self, opt, logger):
self.opt = opt
self.logger = logger
# status == 'train':
source_train = find_dataset_using_name(opt.src_dataset)
data_aug = None if opt.noaug else get_composed_augmentations(opt)
self.source_train = source_train(opt, logger, augmentations=data_aug)
logger.info("{} source dataset has been created".format(self.source_train.__class__.__name__))
print("dataset {} for source was created".format(self.source_train.__class__.__name__))
self.source_train[0]
data_aug = None if opt.noaug else get_composed_augmentations(opt)
target_train = find_dataset_using_name(opt.tgt_dataset)
self.target_train = target_train(opt, logger, augmentations=data_aug, split='train')
logger.info("{} target dataset has been created".format(self.target_train.__class__.__name__))
print("dataset {} for target was created".format(self.target_train.__class__.__name__))
self.target_train[0]
## create train loader
self.source_train_loader = DataProvider(
dataset=self.source_train,
batch_size=opt.bs,
shuffle=not opt.noshuffle,
num_workers=int(opt.num_workers),
drop_last=True,
pin_memory=True,
)
self.target_train_loader = torch.utils.data.DataLoader(
self.target_train,
batch_size=opt.bs,
shuffle=not opt.noshuffle,
num_workers=int(opt.num_workers),
drop_last=not opt.no_droplast,
pin_memory=True,
)
# status == valid
self.source_valid = None
self.source_valid_loader = None
self.target_valid = None
self.target_valid_loader = None
target_valid = find_dataset_using_name(opt.tgt_dataset)
self.target_valid = target_valid(opt, logger, augmentations=None, split='val')
logger.info("{} target_valid dataset has been created".format(self.target_valid.__class__.__name__))
print("dataset {} for target_valid was created".format(self.target_valid.__class__.__name__))
self.target_valid_loader = torch.utils.data.DataLoader(
self.target_valid,
batch_size=opt.bs,
shuffle=False,
num_workers=int(opt.num_workers),
drop_last=False,
pin_memory=True,
)
def load_data(self):
return self
```
#### File: ProDA/data/randaugment.py
```python
import random
import numpy as np
import PIL
import PIL.ImageOps
import PIL.ImageEnhance
import PIL.ImageDraw
from PIL import Image
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
PARAMETER_MAX = 10
def AutoContrast(img, **kwarg):
return PIL.ImageOps.autocontrast(img), None
def Brightness(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Brightness(img).enhance(v), v
def Color(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Color(img).enhance(v), v
def Contrast(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Contrast(img).enhance(v), v
def Cutout(img, v, max_v, bias=0):
if v == 0:
return img
v = _float_parameter(v, max_v) + bias
v = int(v * min(img.size))
return CutoutAbs(img, v)
def CutoutAbs(img, v, **kwarg):
w, h = img.size
x0 = np.random.uniform(0, w)
y0 = np.random.uniform(0, h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = int(min(w, x0 + v))
y1 = int(min(h, y0 + v))
xy = (x0, y0, x1, y1)
# gray
color = (127, 127, 127)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img, xy
def Equalize(img, **kwarg):
return PIL.ImageOps.equalize(img), None
def Identity(img, **kwarg):
return img, None
def Invert(img, **kwarg):
return PIL.ImageOps.invert(img), None
def Posterize(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
return PIL.ImageOps.posterize(img, v), v
# def Rotate(img, v, max_v, bias=0):
# v = _int_parameter(v, max_v) + bias
# if random.random() < 0.5:
# v = -v
# #return img.rotate(v), v
# img_t = transforms.ToTensor()(img)
# H = img_t.shape[1]
# W = img_t.shape[2]
# theta = np.array([[np.cos(v/180*np.pi), -np.sin(v/180*np.pi), 0], [np.sin(v/180*np.pi), np.cos(v/180*np.pi), 0]]).astype(np.float)
# theta[0,1] = theta[0,1]*H/W
# theta[1,0] = theta[1,0]*W/H
# #theta = np.array([[np.cos(v/180*np.pi), -np.sin(v/180*np.pi)], [np.sin(v/180*np.pi), np.cos(v/180*np.pi)]]).astype(np.float)
# theta = torch.Tensor(theta).unsqueeze(0)
# # meshgrid_x, meshgrid_y = torch.meshgrid(torch.arange(W, dtype=torch.float), torch.arange(H, dtype=torch.float))
# # meshgrid = torch.stack((meshgrid_x.t()*2/W - 1, meshgrid_y.t()*2/H - 1), dim=-1).unsqueeze(0)
# # grid = torch.matmul(meshgrid, theta)
# # s_h = int(abs(H - W) // 2)
# # dim_last = s_h if H > W else 0
# # img_t = F.pad(img_t.unsqueeze(0), (dim_last, dim_last, s_h - dim_last, s_h - dim_last)).squeeze(0)
# grid = F.affine_grid(theta, img_t.unsqueeze(0).size())
# img_t = F.grid_sample(img_t.unsqueeze(0), grid, mode='bilinear').squeeze(0)
# # img_t = img_t[:,:,s_h:-s_h] if H > W else img_t[:,s_h:-s_h,:]
# img_t = transforms.ToPILImage()(img_t)
# return img_t, v
def Rotate(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.rotate(v, resample=Image.BILINEAR, fillcolor=(127,127,127)), v
def Sharpness(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Sharpness(img).enhance(v), v
def ShearX(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0), resample=Image.BILINEAR, fillcolor=(127,127,127)), v
def ShearY(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0), resample=Image.BILINEAR, fillcolor=(127,127,127)), v
def Solarize(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
return PIL.ImageOps.solarize(img, 256 - v), 256 - v
def SolarizeAdd(img, v, max_v, bias=0, threshold=128):
v = _int_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
img_np = np.array(img).astype(np.int)
img_np = img_np + v
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold), threshold
def TranslateX(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
v = int(v * img.size[0])
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0), resample=Image.BILINEAR, fillcolor=(127,127,127)), v
def TranslateY(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
v = int(v * img.size[1])
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v), resample=Image.BILINEAR, fillcolor=(127,127,127)), v
def _float_parameter(v, max_v):
return float(v) * max_v / PARAMETER_MAX
def _int_parameter(v, max_v):
return int(v * max_v / PARAMETER_MAX)
def fixmatch_augment_pool():
# FixMatch paper
augs = [(AutoContrast, None, None),
(Brightness, 0.9, 0.05),
(Color, 0.9, 0.05),
(Contrast, 0.9, 0.05),
(Equalize, None, None),
(Identity, None, None),
(Posterize, 4, 4),
(Rotate, 30, 0),
(Sharpness, 0.9, 0.05),
(ShearX, 0.3, 0),
(ShearY, 0.3, 0),
(Solarize, 256, 0),
(TranslateX, 0.3, 0),
(TranslateY, 0.3, 0)]
return augs
def my_augment_pool():
# Test
augs = [(AutoContrast, None, None),
(Brightness, 1.8, 0.1),
(Color, 1.8, 0.1),
(Contrast, 1.8, 0.1),
(Cutout, 0.2, 0),
(Equalize, None, None),
(Invert, None, None),
(Posterize, 4, 4),
(Rotate, 30, 0),
(Sharpness, 1.8, 0.1),
(ShearX, 0.3, 0),
(ShearY, 0.3, 0),
(Solarize, 256, 0),
(SolarizeAdd, 110, 0),
(TranslateX, 0.45, 0),
(TranslateY, 0.45, 0)]
return augs
class RandAugmentPC(object):
def __init__(self, n, m):
assert n >= 1
assert 1 <= m <= 10
self.n = n
self.m = m
self.augment_pool = my_augment_pool()
def __call__(self, img):
ops = random.choices(self.augment_pool, k=self.n)
for op, max_v, bias in ops:
prob = np.random.uniform(0.2, 0.8)
if random.random() + prob >= 1:
img = op(img, v=self.m, max_v=max_v, bias=bias)
img = CutoutAbs(img, 16)
return img
class RandAugmentMC(object):
def __init__(self, n, m):
assert n >= 1
assert 1 <= m <= 10
self.n = n
self.m = m
self.augment_pool = fixmatch_augment_pool()
def __call__(self, img, type='crc'):
aug_type = {'Hflip':False, 'ShearX':1e4, 'ShearY':1e4, 'TranslateX':1e4, 'TranslateY':1e4, 'Rotate':1e4, 'CutoutAbs':1e4}
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
#aug_type.append(['Hflip', True])
aug_type['Hflip'] = True
if type == 'cr' or type == 'crc':
ops = random.choices(self.augment_pool, k=self.n)
for op, max_v, bias in ops:
v = np.random.randint(1, self.m)
if random.random() < 0.5:
img, params = op(img, v=v, max_v=max_v, bias=bias)
if op.__name__ in ['ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']:
#aug_type.append([op.__name__, params])
aug_type[op.__name__] = params
if type == 'cc' or type == 'crc':
img, params = CutoutAbs(img, min(img.size[0], img.size[1]) // 3)
#aug_type.append([CutoutAbs.__name__, params])
aug_type['CutoutAbs'] = params
return img, aug_type
def affine_sample(tensor, v, type):
# tensor: B*C*H*W
# v: scalar, translation param
if type == 'Rotate':
theta = np.array([[np.cos(v/180*np.pi), -np.sin(v/180*np.pi), 0], [np.sin(v/180*np.pi), np.cos(v/180*np.pi), 0]]).astype(np.float)
elif type == 'ShearX':
theta = np.array([[1, v, 0], [0, 1, 0]]).astype(np.float)
elif type == 'ShearY':
theta = np.array([[1, 0, 0], [v, 1, 0]]).astype(np.float)
elif type == 'TranslateX':
theta = np.array([[1, 0, v], [0, 1, 0]]).astype(np.float)
elif type == 'TranslateY':
theta = np.array([[1, 0, 0], [0, 1, v]]).astype(np.float)
H = tensor.shape[2]
W = tensor.shape[3]
theta[0,1] = theta[0,1]*H/W
theta[1,0] = theta[1,0]*W/H
if type != 'Rotate':
theta[0,2] = theta[0,2]*2/H + theta[0,0] + theta[0,1] - 1
theta[1,2] = theta[1,2]*2/H + theta[1,0] + theta[1,1] - 1
theta = torch.Tensor(theta).unsqueeze(0)
grid = F.affine_grid(theta, tensor.size()).to(tensor.device)
tensor_t = F.grid_sample(tensor, grid, mode='nearest')
return tensor_t
if __name__ == '__main__':
randaug = RandAugmentMC(2, 10)
#path = r'E:\WorkHome\IMG_20190131_142431.jpg'
path = r'E:\WorkHome\0.png'
img = Image.open(path)
img_t = transforms.ToTensor()(img).unsqueeze(0)
#img_aug, aug_type = randaug(img)
#img_aug.show()
# v = 20
# img_pil = img.rotate(v)
# img_T = affine_sample(img_t, v, 'Rotate')
v = 0.12
img_pil = img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
img_T = affine_sample(img_t, v, 'ShearY')
img_ten = transforms.ToPILImage()(img_T.squeeze(0))
img_pil.show()
img_ten.show()
``` |
{
"source": "jowanpittevils/Databasemanager_Signalplotter",
"score": 2
} |
#### File: databasemanager/classes/channelinfo.py
```python
class ChannelInfo(object):
def __init__(self, name=None, fs=None, order=None):
self.name = name
self.fs = fs
self.order = order
def __str__(self):
return "channel: {}, fs: {}, ind: {})".format(
self.name,
self.fs,
self.order,
)
def __repr__(self):
return str(self)
```
#### File: databasemanager/classes/copyable.py
```python
from abc import ABC
import copy
class Copyable(ABC):
def copy(self, new_name=None):
res = copy.deepcopy(self)
if(new_name is not None):
res.name = new_name
return res
def copy_shallow(self):
return copy.copy(self)
```
#### File: databasemanager/classes/eventlist.py
```python
import numpy as np
from databasemanager.classes.event import Event
from databasemanager.classes.copyable import Copyable
from databasemanager.classes.noneablelist import NoneableList
from databasemanager.labelaggregator import majority_voting
class EventList(Copyable, list):
def __init__(self, liste:list = []):
super().__init__(liste)
if(liste is not None):
if(len(liste)>0):
assert(all([isinstance(item, Event) for item in liste]))
@property
def number_of_events(self):
return len(self)
@property
def has_none_start(self):
return any([event.start is None for event in self])
@property
def has_none_end(self):
return any([event.end is None for event in self])
@property
def has_overlap(self):
'''It is True if events have overlap together.'''
if(self.has_none_start or self.has_none_end):
return True
sevent = self.sorted()
starts = np.array([event.start for event in sevent])
ends = np.array([event.end for event in sevent])
assert(len(starts) == len(ends))
for i in range(len(starts)-1):
if(ends[i] > starts[i+1]):
return True
return False
def sort(self):
'''Sort the events based on the start time and return nothing'''
super().sort(key=lambda event: event.start)
def sorted(self):
'''Sort the events based on the start time one a copy of the list and return it.'''
res = self.copy_shallow()
res.sort()
return res
@staticmethod
def merge_eventlist(lists:list, duration_threshold_sec:float=1):
''' This function take a list of EventList and merge them together taking their annotations overlap into account.
This function does not perform any aggregation on the labels. Instead, the events in the returned list has a vectorized label
merging the labels respectively. The labels can also be previously vectorized by this function.
Hense, the length of label vector of the returning list equals the summation of lengths of the two label vectors. In case of missing label for some
events, the label vector takes None for the corresponding annotator.
It does not change the inputs.
parameter:
---------
lists: the list of eventlist to be merged
duration_threshold_sec: the minimum accepted event duration. After the merging process, the events shorter than
this threshold will be deleted.
return
------
an EventList resulted from the merging process
'''
if(len(lists)==0):
return None
res = lists[0]
for i in range(1, len(lists)):
res = EventList.__merge_two_eventlist_time(res,lists[i])
return res
@staticmethod
def __merge_two_eventlist_time(list1, list2, duration_threshold_sec:float=1):
''' This function take two EventList lists and merge them together taking their annotations overlap into account.
This function does not perform any aggregation on the labels. Instead, the events in the returned list has a vectorized label
merging the labels of list1 and list2 respectively. The labels of list1 and list2 can also be previously vectorized by this function.
Hense, the length of label vector of the returning list equals the summation of lengths of the two label vectors. In case of missing label for some
events, the label vector takes None for the corresponding annotator.
It does not change the inputs.
parameter:
---------
list1 / list2: the two event lists to be merged
duration_threshold_sec: the minimum accepted event duration. After the merging process, the events shorter than
this threshold will be deleted.
return
------
an EventList resulted from the merging process
'''
if(list1.has_none_end or list2.has_none_end):
raise ValueError("The events with 'None'-end are not supported in merging.")
if(list1.has_none_start or list2.has_none_start):
raise ValueError("The events with 'None'-start are not supported in merging.")
if(list1.has_overlap or list2.has_overlap):
raise ValueError("The events with intera overlap are not supported in merging.")
merged = EventList.__merge_two_eventlist_label(list1, list2)
merged.sort()
res = EventList()
while(True):
if(len(merged)==0):
break
current_event = merged[0]
if(len(merged)==1):
res.append(current_event)
break
next_event = merged[1]
if(current_event.has_overlap(next_event, False)):
merged.remove(current_event)
merged.remove(next_event)
ls = EventList.__split_event(current_event, next_event)
current_event = ls[0]
next_event = ls[-1]
merged = ls + merged
res.append(current_event)
merged.remove(current_event)
res = EventList([r for r in res if r.duration>=duration_threshold_sec])
return res
@staticmethod
def __split_event0(event1:Event, event2:Event):
''' It takes two events and check the overlap. In case of overlap it makes a third event indicating the overlap period.
It returns an EventList that has 2 (in case of no overlap) or 3 (in case of overlap) events. The label of the events must
be previouslu vectorized to None-form (using the 'make_label_list' function). In case of overlap, first copies of event1/2
are made, then the start/end are corrected, and next added to the returning list.
It does not change the inputs.
return
------
EvenList object having 2/3 events
'''
event1 = event1.copy_shallow()
event2 = event2.copy_shallow()
res = EventList()
if(event1.start > event2.start):
(event1, event2) = (event2, event1)
res.append(event1)
d = event2.start - event1.end
if(d<0): #has overlap
newEvent = Event(event2.start, event1.end, EventList.__merge_labels(event1.label, event2.label), None,event1.annotation)
event1.end = newEvent.start
event2.start = newEvent.end
res.append(newEvent)
res.append(event2)
return res
@staticmethod
def __split_event(event1:Event, event2:Event):
''' It takes two events and check the overlap. In case of overlap it makes a third event indicating the overlap period.
It returns an EventList that has 2 (in case of no overlap) or 3 (in case of overlap) events.
The label of the events must be previouslu vectorized to None-form (using the 'make_label_list' function). In case of overlap, first copies of event1/2
are made, then the start/end are corrected, and next added to the returning list.
It does not change the inputs.
return
------
EvenList object having 2/3 events
'''
utimes = list(set([event1.start, event1.end, event2.start, event2.end]))
utimes.sort()
res = EventList([])
for i in range(0, len(utimes)-1):
st = utimes[i]
en = utimes[i+1]
lbl1 = EventList._get_label_if_overlap(event1, st,en)
lbl2 = EventList._get_label_if_overlap(event2, st,en)
lbl = EventList.__merge_labels(lbl1, lbl2)
res.append(Event(st, en,lbl,None, event1.annotation))
return res
@staticmethod
def _get_label_if_overlap(event:Event, start:float, end:float):
'''It returns the event label if it has overlap with the given range, otherwise None.'''
if(event.has_overlap_range(start, end, False)):
return event.label
else:
return None
@staticmethod
def __merge_labels(label1:list, label2:list):
'''It merges two vectorized event labels. The None vecros should have no overlap.
It does not change the inputs.
'''
if(label1 is None):
return label2
if(label2 is None):
return label1
N = len(label1)
assert(N == len(label2))
res = EventList()
for i in range(N):
res.append(EventList.__take_not_None(label1[i],label2[i]))
return res
@staticmethod
def __take_not_None(val1, val2):
'''It returns any one the values that is not None. If both are not 'None, it returns None.'''
if(val1 is not None):
return val1
else:
return val2
@staticmethod
def __merge_two_eventlist_label(list1, list2):
''' It take two EventList lists and merge all events together without taking any overlap into account.
If the labels of any of the input lists have not been vectorized, it will be done here.
them together taking their annotationsoverlap into account.
The length of label vector of the returning list equals the summation of the lengths of two label vectors. In case of missing label for some
events, the label vector takes None for the corresponding annotator.
It does not change the inputs. The input lists must have at least one event.
return
------
an EventList resulted from the merging process. It has not been sorted
'''
assert(len(list1)>0)
assert(len(list2)>0)
list1 = list1.copy_shallow()
list2 = list2.copy_shallow()
if(isinstance(list1[0].label,str)):
l1 = 1
elif(isinstance(list1[0].label,list)):
l1 = len(list1[0].label)
else:
l1 = 1
if(isinstance(list2[0].label,str)):
l2 = 1
elif(isinstance(list2[0].label,list)):
l2 = len(list2[0].label)
else:
l2 = 1
L = l1+l2
EventList.__vetorize_label(list1, L, [0,l1])
EventList.__vetorize_label(list2, L, [l1,L])
merged = EventList(list1 + list2)
return merged
@staticmethod
def __vetorize_label(eventlist, label_list_len:int, label_position):
'''It converts the labels of the eventlist in to a vector while extra elements are None. It is applied directly on the egiven list.
Parameters:
-----------
eventlist: the list in which the labels should be vectorized
label_list_len: the output length of the label vector
label_position: a list (or int) with two numbers indicating the start and end (python end: n+1) index of the vector that correspons to this event.
If it is an int it is supposed to tbe [label_position, label_position + 1].
It returns nothing.'''
if(not isinstance(label_position,list)):
label_position = NoneableList([label_position, label_position+1])
for event in eventlist:
nonelist = NoneableList([None]*label_list_len)
if(not isinstance(event.label, list)):
event.label = NoneableList([event.label])
nonelist[label_position[0]:label_position[1]] = event.label
event.label = nonelist
def aggregate(
self,
label_tied_order:list = None,
minimum_duration_threshold_sec:float = 1,
confidence_ratio:float = 0,
remove_Nones:bool = True,
combine_equals:bool = True,
):
'''
This function aggregated all annotations into one using majority voting.
parameters:
-----------
label_tied_order: is a list of label string defining the priority of labels if the aggregation is tied. If it is None, the tied annotations are broken randomly.
minimum_duration_threshold_sec: the events smaller than this thresholds will be set to None after the aggregation.
confidence_ratio: the events having less aggrement than this ratio will be set to None after the aggregation.
remove_Nones: If it is true, the events that have None label after the aggregation, will be removed.
combine_equals: if it is True, the consecutive labels that are exactly following each other in time domain (end1 == start2) and
having equal labels will be merged together. In this case, their confidences must not be necessarily equal. The confidence of the merged event
is resulted from a weighted averaging of the confidences of the two events proportional to their durations.
return:
-------
eventList
'''
event_list = majority_voting(self, label_tied_order)
event_list.set_None_short_events(minimum_duration_threshold_sec)
event_list.set_None_low_confident_events(confidence_ratio)
if(combine_equals):
event_list = event_list.combine_equals()
if(remove_Nones):
event_list = event_list.remove_Unknowns()
return event_list
def set_None_short_events(self, minimum_duration_threshold_sec:float):
''' This function sets the label of the events that are shorter than the given minimum_duration_threshold_sec to None and return nothing.'''
for event in self:
if event.duration < minimum_duration_threshold_sec:
event.label = None
event.label_confidence = None
def set_None_low_confident_events(self, confidence_ratio:float):
''' This function sets the label of the events that have a confidence less than the given confidence_ratio to None and return nothing.'''
for event in self:
if (event.label_confidence is None) or (event.label_confidence < confidence_ratio):
event.label = None
event.label_confidence = None
def combine_equals(self, ignored_interval:float = 0):
'''
This function merges the consecutive labels that are exactly following each other in time domain (end1 == start2)
and having equal labels. Their confidences must not be necessarily equal. The confidence of the merged event
is resulted from a weighted averaging of the confidences of the two events proportional to their durations.
It does not change the input list, all events will be deep-copied.
parameters:
----------
ignored_interval: that maximum interval (in seconds) between two equally labelled events that should be ignored to be merged.
return:
-------
eventlist
'''
orig_list = self.copy_shallow()
res = EventList([])
if(len(orig_list)==0):
return res
for i in range(0,len(orig_list)-1):
if(
(orig_list[i].label == orig_list[i+1].label) and
((orig_list[i].end + ignored_interval) >= orig_list[i+1].start)
):
orig_list[i+1].start = orig_list[i].start
orig_list[i+1].label_confidence = self._get_averaged_confidence(orig_list[i],orig_list[i+1])
else:
res.append(orig_list[i])
res.append(orig_list[-1])
return res
@staticmethod
def _get_averaged_confidence(event1:Event, event2:Event)->float:
if(event1.label_confidence is None):
return event2.label_confidence
if(event2.label_confidence is None):
return event1.label_confidence
if(event1.duration == 0 and event2.duration == 0):
return (event1.label_confidence + event2.label_confidence)/2
sm = (event1.duration*event1.label_confidence) + (event2.duration*event2.label_confidence)
dv = (event1.duration + event2.duration)
return sm / dv
def remove_Unknowns(self, unknown_label:str=None):
'''
This function removes the events with None label and return the remaining.
It does not change the input list, but it returns the same events in a new eventList list. So it does not deep-copy.
'''
if(unknown_label is not None):
for event in self:
if(event.label == unknown_label):
event.label = None
return EventList([event for event in self if (event.label is not None)])
def add_background_label(self, label:str, start:float = 0, end:float = None, annotation=None):
'''
It adds events in all intervals (in the given start-end range) where no event was assign by the annotators.
It can be very usefull in 2-class problems where only one label is annotated (e.g. seizures in seizure detection problems).
This functions will sort the events.
It returns nothing.
parameters:
-----------
label: the label of the background (e.g. non-seizure)
start: the start of recording that should be logically 0 (unless in some special usecases). If it sets to None, it will be set to the start of the first event.
end: the end of recording. If it sets to None, it will be set to the end of the last event.
annotation: the annotation object to which the background events belong
'''
if(len(self)==0):
if(end is None):
raise ValueError("The end is None and there is no event in the list to be set!")
self.append(Event(start, end, label, None, annotation))
return
self.sort()
if(start is None):
if(self[0].start is None):
raise ValueError("The start is None and the first event has a None start!")
start = self[0].start
if(end is None):
if(self[-1].end is None):
raise ValueError("The end is None and the last event has a None end!")
end = self[-1].end
extra = EventList()
if(start < self[0].start):
extra.append(Event(start, self[0].start, label, None, annotation))
if(end > self[-1].end):
extra.append(Event(self[-1].end, end, label, None, annotation))
for i in range(0, len(self)-1):
if(self[i].end < self[i+1].start):
extra.append(Event(self[i].end, self[i+1].start, label, None, annotation))
self.extend(extra)
self.sort()
```
#### File: databasemanager/classes/event.py
```python
from databasemanager.classes.queryable import Queryable
from databasemanager.classes.eventextrainfo import EventExtraInfo
import math
class Event(Queryable):
#override copyabledoubleside
def reassign(self):
pass
''' Start, End, and Durations are in seconds but can be a float including ms.'''
_time_splitter = ':'
def __init__(self, start, end, label, extrainfo, annotation, label_confidence=1):
self.start = start
self.end = end
self.label = label
self.label_confidence = label_confidence
self.extrainfo = extrainfo
self.annotation = annotation
self.reassign()
@property
def stop(self):
return self.end
@property
def duration(self):
if(self.start is None):
return -math.inf
if(self.end is None):
return math.inf
return self.end - self.start
@property
def start_time(self):
if(self.start is None):
return 'None'
return Event._to_time_format(self.start)
@property
def end_time(self):
if(self.end is None):
return 'None'
return Event._to_time_format(self.end)
@property
def duration_time(self):
if(self.start is None):
return 'inf'
if(self.end is None):
return 'inf'
return Event._to_time_format(self.duration)
@staticmethod
def _to_time_format(sec):
if(sec is None):
return 'None'
if(sec is math.inf or sec is -math.inf):
return str(sec)
z = sec
h = int(z//3600)
z -= (h*3600)
m = int(z//60)
z -= (m*60)
s = int(z//1)
z -= s
ss = str(h) + Event._time_splitter + str(m)+Event._time_splitter + str(s)
if(z>0):
ss += Event._time_splitter + str(z)[0:3] #3 digits of ms if exists
return ss
def __str__(self):
conf_str = "({}%)".format(round(self.label_confidence*100)) if self.label_confidence is not None else ""
return "<Event: {}{} from {} ({}) to {} ({}) (dur: {} ({}))>".format(
str(self.label),
conf_str,
self.start_time,
self.start,
self.end_time,
self.end,
self.duration_time,
self.duration,
)
def __repr__(self):
return str(self)
def summary(self):
print(str(self))
@staticmethod
def make_event(event_tsv, annotation):
max_column = 4
min_column = 3
if(len(event_tsv)==0):
return None
if(len(event_tsv)<min_column):
print('??: '+str(event_tsv))
if(len(event_tsv)>max_column):
event_tsv = event_tsv[:(max_column-1)]+[','.join(event_tsv[(max_column-1):])]
if(event_tsv[0].lower() == 'none'):
start = None
elif(Event.__is_number(event_tsv[0])):
start = float(event_tsv[0])
elif(Event.__is_time_format(event_tsv[0])):
r = Event.__try_load_time_format(event_tsv[0])
start = r[0]*3600 + r[1]*60 + r[2]
else:
raise BaseException('the start should be in seconds or in ''h:m:s'' format while it is {} (file: {})'.format(event_tsv[0], annotation.name))
if(event_tsv[1].lower() == 'none'):
end = None
elif(Event.__is_number(event_tsv[1])):
end = float(event_tsv[1])
elif(Event.__is_time_format(event_tsv[1])):
r = Event.__try_load_time_format(event_tsv[1])
end = r[0]*3600 + r[1]*60 + r[2]
else:
raise BaseException('the end should be in seconds or in ''h:m:s'' format while it is {} (file: {})'.format(event_tsv[0], annotation.name))
if(event_tsv[2].lower() == 'none' or event_tsv[2].lower() == ''):
label = None
else:
label = event_tsv[2]
commentTag = (event_tsv[3] if (len(event_tsv)>3) else "")
extrainfo = EventExtraInfo(commentTag)
return Event(start, end, label, extrainfo, annotation)
@staticmethod
def __is_number(s):
try:
s = float(s) # for int, long and float
return True
except ValueError:
return False
@staticmethod
def __is_time_format(s):
r = Event.__try_load_time_format(s)
return (True if r is not None else False)
@staticmethod
def __try_load_time_format(s):
ss = s.split(Event._time_splitter)
try:
h = float(ss[0])
m = float(ss[1])
s = float(ss[2])
return (h,m,s)
except:
return None
def has_overlap(self, other_event, accept_end_end):
'''
It returns True if the event has overlap with the other.
parameters:
-----------
other_event: to be compared with
accept_end_end: to return true if end of one is exactly the start of the other
'''
return self.has_overlap_range(other_event.start, other_event.end, accept_end_end)
def has_overlap_range(self, start, stop, accept_end_end):
'''
It returns True if the event has overlap with the range.
parameters:
-----------
start: start time in seconds
stop: end time in seconds
accept_end_end: to return true if end of one is exactly the start of the other
'''
ma = max(self.start, start)
mi = min(self.end, stop)
if(ma == mi):
return accept_end_end
return (ma < mi)
```
#### File: databasemanager/classes/path.py
```python
import glob
import collections
from os import walk
from os.path import join
from pathlib import Path as PathLibClass
class Path():
'''
This class organizes all path and folders of the database.
There are two ways for making a Path object:
-- set the root, in which there is a 'Data' and a 'Datasets' folder.
-- set data_folder and datasets_folder.
for each case, the other(s) must be None.
parameters:
-----------
root: the path of the root folder
data_path: the path of the data folder (if root is not set)
datasets_path: the path of the datasets folder (if root is not set)
General rules of the database structure:
subjects folder Name: [string]number. must ends with number. e.g. a123, 123
recording file Name: subjectName+letter where letter ranges between 'a' to 'z'. It must starts with the subjectname and ends with a single letter
(maximum 28 rec is possible now). In case of a single recording, the letter can be dropped. e.g. a123c.edf, 123a.edf, 123.edf.
annotation file Name: recordingName+number where the number is a single digit starting from 1. must starts with the corresponding recording file name and ends with number.
In case of a single annotation, the file name can be exactly as the recording file name (the extensions are different).
e.g. a123.tsv, a123c.tsv, 123a_2.tsv
'''
DATAFOLDER = 'Data'
DATASETSFOLDER = 'Datasets'
DATASETFILEEXTENSION = '.tsv'
ANNOTATIONFILEEXTENSION = '.tsv'
EXTRAFOLDER = 'Extra'
RECORDINGSUFFIX = '_r'
ANNOTATIONSUFFIX = '_a'
@classmethod
def _get_path(cls, path, has_session_folder:bool = False):
if(isinstance(path, cls)):
return path
elif(isinstance(path, str)):
return Path(path, has_session_folder=has_session_folder)
else:
raise BaseException('Path must be a string or an object of '+str(cls))
@classmethod
def get_defaulte_data_folder_fullpath(cls, root):
'''returns the full path of the 'Data' folder based on the 'root' attribute.'''
return join(root, cls.DATAFOLDER)
@classmethod
def get_defaulte_datasets_folder_fullpath(cls, root):
'''returns the full path of the 'Datasets' folder based on the 'root' attribute.'''
return join(root, cls.DATASETSFOLDER)
@classmethod
def get_defaulte_extra_folder_fullpath(cls, root):
'''returns the full path of the 'Extra' folder based on the 'root' attribute.'''
return join(root, cls.EXTRAFOLDER)
def __init__(self, root, data_path=None, datasets_path=None):
if(root is not None):
assert((data_path is None ) and (datasets_path is None))
self.root = root
self.__data_path = self.get_defaulte_data_folder_fullpath(root)
self.__datasets_path = self.get_defaulte_datasets_folder_fullpath(root)
else:
assert((data_path is not None ) and (datasets_path is not None))
self.__data_path = data_path
self.__datasets_path = datasets_path
self.root = None
@property
def datafolder_fullpath(self):
'''returns the full path of the 'Data' folder.'''
return self.__data_path
@property
def datasetsfolder_fullpath(self):
'''returns the full path of the 'Datasets' folder.'''
return self.__datasets_path
@property
def extrafolder_fullpath(self):
'''returns the full path of the 'Extra' folder if the root has been set.'''
if(self.root is None):
return None
return self.get_extrafolder_fullpath(self.root)
@property
def datasets_list_fullpath(self):
'''returns a list of all fullpath corresponding the existing datasets.'''
folderS = join(self.datasetsfolder_fullpath, "*" + Path.DATASETFILEEXTENSION)
files = [f for f in glob.glob(folderS, recursive=False)]
return files
@property
def datasets_list_names(self):
'''returns a list of all existing dataset names.'''
names = [PathLibClass(f).stem for f in self.datasets_list_fullpath]
return names
def get_dataset_fullpath(self, name):
'''returns the full path of a dataset based on the given dataset name.'''
return join(self.datasetsfolder_fullpath, name + self.DATASETFILEEXTENSION)
def get_subject_fullpath(self, name):
'''returns the full path of the subject data folder based on the subject name.'''
return join(self.datafolder_fullpath, name)
def get_annotation_fullpath(self, subject_name, annotation_name):
'''returns the full path of an annotation file.'''
p = self.get_subject_fullpath(subject_name)
return join(p, annotation_name + self.ANNOTATIONFILEEXTENSION)
def get_recording_fullpath(self, subject_name,recording_name_with_extension):
'''returns the full path of an recording file.'''
p = self.get_subject_fullpath(subject_name)
return join(p,recording_name_with_extension)
def get_all_subject_names(self):
'''returns a list of all folders (subject names) in the 'Data' folder.'''
return self.get_all_folder_names(self.datafolder_fullpath)
@staticmethod
def get_all_folder_names(dir):
res = []
for (_, dirs, _) in walk(dir, topdown=False):
for name in dirs:
res.append(name)
return res
def get_recordings_details(self, subject_name):
''' returns a sorted list of tuples as (fullpath,recname,extension,recordingSuffix)
e.g. ('c:\\data\\a1\\a1.edf', 'a1_r1','.edf',1)
the output is sorted by the recording suffix '_rx'.
parameters:
-----------
subjectName: the name of the target subject
'''
filepath = self.get_subject_fullpath(subject_name)
folderS = join(filepath, "*")
full = [(f,PathLibClass(f).stem, PathLibClass(f).suffix)
for f in glob.glob(folderS, recursive=False)
if PathLibClass(f).suffix.lower() != self.ANNOTATIONFILEEXTENSION.lower()]
compnames = []
for f in full:
fullpath = f[0]
recname = f[1]
extension = f[2]
ind0 = recname.rfind(self.RECORDINGSUFFIX)
if(ind0 == -1):
raise ValueError("All recording files must end with '_r' following the recording index, e.g. 'sub1_r5.edf'. Error on: " + recname)
ind = ind0 + len(self.RECORDINGSUFFIX)
suffix = int(recname[ind:])
compnames.append((fullpath, recname, extension, suffix))
compnames.sort(key = lambda comp: comp[3])
return compnames
def get_annotations_details(self, subject_name, recording_name):
''' returns a sorted list of tuples as (fullpath,annotationname,annotationsuffix) where a is the fullpath, b is the real file name, and c is the annotator suffix.
Note that if the annotation does not end with a number it is supposed to end with '1' as the first annotation file.
e.g. ('c:\\amir1.tsv', 'amir1a.tsv', 'amir1a2.tsv')
It is sorted by the annotation number of the sorting file names.
parameters:
-----------
subjectName: the name of the target subject
recordingName: the name of the real recording file name
'''
recs = self.get_recordings_details(subject_name)
rec = [c for c in recs if c[1].lower() == recording_name.lower()]
assert(len(rec)==1)
rec = rec[0]
f = self.get_subject_fullpath(subject_name)
folderS = join(f, "*")
full = [(f,PathLibClass(f).stem)
for f in glob.glob(folderS, recursive=False)
if PathLibClass(f).suffix.lower() == self.ANNOTATIONFILEEXTENSION.lower()]
compnames = []
for f in full:
fullpath = f[0]
annotname = f[1]
ind0 = annotname.rfind(self.ANNOTATIONSUFFIX)
if(ind0 == -1):
raise ValueError("All annotation files must end with '_a' following the annotation index, e.g. 'sub1_r5_a1.edf'. Error on: " + annotname)
recnameofannot = annotname[:ind0]
if(recnameofannot != recording_name):
continue
ind = ind0 + len(self.ANNOTATIONSUFFIX)
suff = int(annotname[ind:])
compnames.append((fullpath, annotname, suff))
compnames.sort(key = lambda comp: comp[2])
return compnames
```
#### File: databasemanager/classes/subject.py
```python
from pathlib import Path as PathLibClass
from databasemanager.classes.path import Path
from databasemanager.classes.recordingmaker import RecordingMaker
from databasemanager.classes.MNEfetchableinterface import MNEFetchableInterface
from databasemanager.classes.queryable import Queryable
from databasemanager.classes.listextension import ListExtension
from databasemanager.operators.operatorbase import OperatorBase
from databasemanager.operators.resampler import Resampler
from databasemanager.operators.montagemaker import MontageMaker
from databasemanager.operators.rereferencer import Rereferencer
class Subject(MNEFetchableInterface, Queryable):
# override MNEFetchableInterface.fetch_mne_list()
def fetch_mne_list(self, set_annotations: bool = True):
mne_list = []
temp_none = [mne_list.extend(recording.fetch_mne_list(set_annotations)) for recording in self.recordings]
return mne_list
#override copyabledoubleside
def reassign(self):
if(self.recordings is not None):
for rec in self.recordings:
rec.subject = self
rec.reassign()
def __init__(self, path, name, annotation_warning=True):
if(isinstance(path, Path)):
self.path = path
else:
raise BaseException('Path must be an object of Path')
self.name = name
self.recordings = self.__load_recordings()
self.annotation_warning = annotation_warning
self.reassign()
def add_operator(self, op:OperatorBase):
'''
It gets an operator and assigns it to all of its recordings.
'''
assert(isinstance(op, OperatorBase))
for r in self.recordings:
r.add_operator(op.copy())
def __load_recordings(self):
recording_files = self.path.get_recordings_details(self.name)
recordings = []
for i,r in enumerate(recording_files):
rec_file_name = r[1]
rec_file_extension = r[2]
anns = self.path.get_annotations_details(self.name, rec_file_name)
if(anns is None or len(anns)==0):
print('Warning: {0} has no annotation.'.format(rec_file_name))
else:
annotation_list = zip([a[1] for a in anns],[a[2] for a in anns])
rec = RecordingMaker.make(self.path, self.name, rec_file_name,rec_file_extension, annotation_list)
if(rec.has_content):
recordings.append(rec)
return recordings
def __len__(self):
if(self.recordings is None):
return 0
return len(self.recordings)
@property
def number_of_recordings(self):
return len(self)
@property
def number_of_annotations(self):
return sum(rec.number_of_annotations for rec in self.recordings)
@property
def number_of_events(self):
return sum(rec.number_of_events for rec in self.recordings)
@property
def duration_sec(self)->float:
dur_sec = [rec.duration_sec for rec in self.recordings]
return sum(dur_sec)
@property
def fs(self):
if(self.number_of_recordings==0):
return None
if(self.number_of_recordings==1):
return self.recordings[0].fs
fs = ListExtension()
for rec in self.recordings:
fs.extend_append(rec.fs,'ignore')
return fs.similar_in_list()
@property
def number_of_channels(self):
if(self.number_of_recordings==0):
return None
if(self.number_of_recordings==1):
return self.recordings[0].number_of_channels
ch = ListExtension()
for rec in self.recordings:
ch.extend_append(rec.number_of_channels,'ignore')
return ch.similar_in_list()
@property
def fs_output(self):
'''It returns the sampling frequency of the loaded data and can be affected by resampling operators (the last one).'''
fs_output = ListExtension([r.fs_output for r in self.recordings])
return fs_output.similar_in_list()
@property
def number_of_channels_output(self):
'''It returns the number of channels of the loaded data and can be affected by montagmaker operators (the last one).'''
res = ListExtension([rec.number_of_channels_output for rec in self.recordings])
return res.similar_in_list()
@property
def unique_labels(self):
if(self.recordings is None):
return None
lbl = ListExtension()
non = [lbl.extend_append(rec.unique_labels,'add') for rec in self.recordings]
return set(lbl)
def __str__(self):
return "<Subject {0} with {1} recordings.>".format(self.name, len(self))
def summary(self):
print(str(self))
```
#### File: databasemanager/operators/filterbase.py
```python
import numpy as np
from abc import ABC, abstractmethod, abstractproperty
from databasemanager.operators.operatorbase import OperatorBase
from databasemanager.operators.interfaces.fsneededinterface import FsNeededInterface
from databasemanager.operators.interfaces.timeaxisneededinterface import TimeAxisNeededInterface
from databasemanager.operators.filtercorrector import FilterCorrector
class FilterBase(OperatorBase, FsNeededInterface, TimeAxisNeededInterface, ABC):
#override from FsNeededInterface.set_fs()
def set_fs(self, fs):
self.fs = fs
if(self.axis is not None):
self.set_filter()
#override from TimeAxisNeededInterface.set_axis()
def set_axis(self, axis):
self.axis = axis
if(self.fs is not None):
self.set_filter()
def __init__(self, output_type:str='valid'):
'''
output_type: {'valid', 'same'} (case-insensitive)
-- same: return the output as the same size of the input. depending on the filter approach it may have edge distortion. This distortion values are reachable by correction_samples
-- valid: only returns the output that are valid by filtering. in this case the outputs size is smaller (x.shape - correction_samples) than the input size.
'''
self.output_type = output_type.lower()
self.corrector = None
self.axis = None
self.fs = None
# override Operation.apply(x)
def apply(self, x):
return self.filter(x)
def filter(self, x:np.ndarray):
y = self._filter_raw(x)
if(self.corrector is not None):
y = self.corrector.apply(y)
return y
def set_filter(self):
self._init_filter()
if(self.output_type == 'valid'):
self.corrector = FilterCorrector(self)
self.corrector.set_axis(self.axis)
@abstractmethod
def _filter_raw(self, x:np.ndarray):
pass
@abstractmethod
def _init_filter(self):
pass
@abstractproperty
def filter_length(self):
pass
```
#### File: databasemanager/operators/operatorlist.py
```python
from databasemanager.operators.operatorbase import OperatorBase
from databasemanager.operators.filterbase import FilterBase
from databasemanager.operators.filtercorrector import FilterCorrector
from databasemanager.classes.virtuallist import VirtualList
class OperatorList(VirtualList):
def __init__(self, master_list):
super().__init__()
self.init_virtual_list(master_list, OperatorBase)
def summary(self):
for op in self:
print(str(op))
```
#### File: Databasemanager_Signalplotter/src/example_plotter.py
```python
from database_explorer_plotter.extra_functions import plot_browser
from databasemanager import *
root = "A:\\db\\toyDB"
data_root = "A:\\db\\toyDB\\data"
db = Database(root)
ds = db.load_dataset('all')
recording = ds.subjects[1].recordings[0]
def callback():
print("Callback working!")
plot_browser(recording = recording, window=30, start=0, y=None, title=None,fs=1,sens=None,channel_names=None, callback=callback(), verbose = True)
```
#### File: src/qt_designer/additional_qtclasses.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class QLabel_Clickable(QtWidgets.QLabel):
clicked=QtCore.pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
def mousePressEvent(self, ev):
self.clicked.emit()
class lineEdit_autocomplete(QtWidgets.QLineEdit):
model = QtCore.QStringListModel()
completer = QtWidgets.QCompleter()
def start(self, subject_names):
self.model.setStringList(subject_names)
self.completer.setModel(self.model)
self.setCompleter(self.completer)
class lineEdit_autocomplete2(QtWidgets.QLineEdit):
model = QtCore.QStringListModel()
completer = QtWidgets.QCompleter()
def start(self, subject_names):
self.model.setStringList(subject_names)
self.completer.setModel(self.model)
self.setCompleter(self.completer)
```
#### File: src/qt_designer/base_GUI.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
import os
class Ui_DB_explorer(object):
def setupUi(self, DB_explorer):
icon = QtGui.QIcon()
plg_dir = os.path.dirname(__file__)
icon.addPixmap(QtGui.QPixmap(plg_dir+"\\heartbeat.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
DB_explorer.setWindowIcon(icon)
DB_explorer.setObjectName("DB_explorer")
DB_explorer.resize(900, 450)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(DB_explorer.sizePolicy().hasHeightForWidth())
DB_explorer.setSizePolicy(sizePolicy)
DB_explorer.setMinimumSize(QtCore.QSize(900, 450))
self.centralwidget = QtWidgets.QWidget(DB_explorer)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.Dataset_label = QLabel_Clickable(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Dataset_label.sizePolicy().hasHeightForWidth())
self.Dataset_label.setSizePolicy(sizePolicy)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.Dataset_label.setPalette(palette)
font = QtGui.QFont()
font.setUnderline(True)
self.Dataset_label.setFont(font)
self.Dataset_label.setOpenExternalLinks(True)
self.Dataset_label.setObjectName("Dataset_label")
self.horizontalLayout_4.addWidget(self.Dataset_label)
self.label_11 = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_11.sizePolicy().hasHeightForWidth())
self.label_11.setSizePolicy(sizePolicy)
self.label_11.setMinimumSize(QtCore.QSize(1, 20))
self.label_11.setText("")
self.label_11.setObjectName("label_11")
self.horizontalLayout_4.addWidget(self.label_11)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.groupBox_3.setPalette(palette)
font = QtGui.QFont()
font.setUnderline(False)
self.groupBox_3.setFont(font)
self.groupBox_3.setObjectName("groupBox_3")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_3)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_10 = QtWidgets.QLabel(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_10.sizePolicy().hasHeightForWidth())
self.label_10.setSizePolicy(sizePolicy)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.label_10.setPalette(palette)
font = QtGui.QFont()
font.setUnderline(False)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.horizontalLayout_5.addWidget(self.label_10)
self.lineEdit = lineEdit_autocomplete(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit.sizePolicy().hasHeightForWidth())
self.lineEdit.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setUnderline(False)
self.lineEdit.setFont(font)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout_5.addWidget(self.lineEdit)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.subject_list = QtWidgets.QListWidget(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.subject_list.sizePolicy().hasHeightForWidth())
self.subject_list.setSizePolicy(sizePolicy)
self.subject_list.setMinimumSize(QtCore.QSize(1, 50))
font = QtGui.QFont()
font.setUnderline(False)
self.subject_list.setFont(font)
self.subject_list.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustIgnored)
self.subject_list.setObjectName("subject_list")
self.verticalLayout.addWidget(self.subject_list)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.verticalLayout_2.addLayout(self.horizontalLayout_6)
self.gridLayout.addLayout(self.verticalLayout_2, 0, 0, 1, 1)
self.verticalLayout_3.addWidget(self.groupBox_3)
self.horizontalLayout_8.addLayout(self.verticalLayout_3)
spacerItem = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.verticalLayout_5.setObjectName("verticalLayout_5")
spacerItem1 = QtWidgets.QSpacerItem(20, 26, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_5.addItem(spacerItem1)
self.groupBox_4 = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_4.sizePolicy().hasHeightForWidth())
self.groupBox_4.setSizePolicy(sizePolicy)
self.groupBox_4.setObjectName("groupBox_4")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox_4)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_9 = QtWidgets.QLabel(self.groupBox_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_9.sizePolicy().hasHeightForWidth())
self.label_9.setSizePolicy(sizePolicy)
self.label_9.setMaximumSize(QtCore.QSize(16777215, 19))
self.label_9.setObjectName("label_9")
self.horizontalLayout_7.addWidget(self.label_9)
self.lineEdit_2 = lineEdit_autocomplete2(self.groupBox_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit_2.sizePolicy().hasHeightForWidth())
self.lineEdit_2.setSizePolicy(sizePolicy)
self.lineEdit_2.setObjectName("lineEdit_2")
self.horizontalLayout_7.addWidget(self.lineEdit_2)
self.verticalLayout_4.addLayout(self.horizontalLayout_7)
self.recordings_list = QtWidgets.QListWidget(self.groupBox_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.recordings_list.sizePolicy().hasHeightForWidth())
self.recordings_list.setSizePolicy(sizePolicy)
self.recordings_list.setMinimumSize(QtCore.QSize(1, 50))
self.recordings_list.setObjectName("recordings_list")
self.verticalLayout_4.addWidget(self.recordings_list)
self.gridLayout_2.addLayout(self.verticalLayout_4, 0, 0, 1, 1)
self.verticalLayout_5.addWidget(self.groupBox_4)
self.horizontalLayout_8.addLayout(self.verticalLayout_5)
spacerItem2 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem2)
self.verticalLayout_9 = QtWidgets.QVBoxLayout()
self.verticalLayout_9.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.verticalLayout_9.addLayout(self.horizontalLayout_9)
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setMinimumSize(QtCore.QSize(0, 0))
self.groupBox.setObjectName("groupBox")
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.annotations_list = QtWidgets.QListWidget(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.annotations_list.sizePolicy().hasHeightForWidth())
self.annotations_list.setSizePolicy(sizePolicy)
self.annotations_list.setMinimumSize(QtCore.QSize(0, 50))
self.annotations_list.setMaximumSize(QtCore.QSize(16777215, 100))
self.annotations_list.setObjectName("annotations_list")
self.verticalLayout_10.addWidget(self.annotations_list)
self.verticalLayout_8.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.label = QtWidgets.QLabel(self.groupBox_2)
self.label.setObjectName("label")
self.verticalLayout_11.addWidget(self.label)
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_7.setContentsMargins(1, 1, 1, 1)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.verticalLayout_6.addLayout(self.horizontalLayout_10)
self.events_list = QtWidgets.QListWidget(self.groupBox_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.events_list.sizePolicy().hasHeightForWidth())
self.events_list.setSizePolicy(sizePolicy)
self.events_list.setMinimumSize(QtCore.QSize(0, 50))
self.events_list.setObjectName("events_list")
self.verticalLayout_6.addWidget(self.events_list)
self.verticalLayout_7.addLayout(self.verticalLayout_6)
self.pushButton_8 = QtWidgets.QPushButton(self.groupBox_2)
self.pushButton_8.setObjectName("pushButton_8")
self.verticalLayout_7.addWidget(self.pushButton_8)
self.verticalLayout_11.addLayout(self.verticalLayout_7)
self.verticalLayout_8.addWidget(self.groupBox_2)
self.verticalLayout_9.addLayout(self.verticalLayout_8)
self.horizontalLayout_8.addLayout(self.verticalLayout_9)
DB_explorer.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(DB_explorer)
self.menubar.setGeometry(QtCore.QRect(0, 0, 900, 25))
self.menubar.setObjectName("menubar")
DB_explorer.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(DB_explorer)
self.statusbar.setObjectName("statusbar")
DB_explorer.setStatusBar(self.statusbar)
self.retranslateUi(DB_explorer)
QtCore.QMetaObject.connectSlotsByName(DB_explorer)
def retranslateUi(self, DB_explorer):
_translate = QtCore.QCoreApplication.translate
DB_explorer.setWindowTitle(_translate("DB_explorer", "Database explorer"))
self.Dataset_label.setText(_translate("DB_explorer", "Dataset:"))
self.groupBox_3.setTitle(_translate("DB_explorer", "Subject"))
self.label_10.setText(_translate("DB_explorer", "Find:"))
self.groupBox_4.setTitle(_translate("DB_explorer", "Recordings"))
self.label_9.setText(_translate("DB_explorer", "Find:"))
self.groupBox.setTitle(_translate("DB_explorer", "Annotations"))
self.groupBox_2.setTitle(_translate("DB_explorer", "Events"))
self.label.setText(_translate("DB_explorer", "label start-end [s]"))
self.pushButton_8.setText(_translate("DB_explorer", "Temporal Profile"))
from qt_designer.additional_qtclasses import QLabel_Clickable, lineEdit_autocomplete, lineEdit_autocomplete2
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
DB_explorer = QtWidgets.QMainWindow()
ui = Ui_DB_explorer()
ui.setupUi(DB_explorer)
DB_explorer.show()
sys.exit(app.exec_())
```
#### File: src/qt_designer/dataset_selector.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
import os
class Ui_dataset(object):
def setupUi(self, dataset):
icon = QtGui.QIcon()
plg_dir = os.path.dirname(__file__)
icon.addPixmap(QtGui.QPixmap(plg_dir+"\\heartbeat.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
dataset.setWindowIcon(icon)
dataset.setObjectName("dataset")
dataset.resize(190, 170)
dataset.setMinimumSize(QtCore.QSize(190, 170))
self.centralwidget = QtWidgets.QWidget(dataset)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listWidget.sizePolicy().hasHeightForWidth())
self.listWidget.setSizePolicy(sizePolicy)
self.listWidget.setMinimumSize(QtCore.QSize(1, 1))
self.listWidget.setObjectName("listWidget")
self.horizontalLayout.addWidget(self.listWidget)
dataset.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(dataset)
self.menubar.setGeometry(QtCore.QRect(0, 0, 190, 25))
self.menubar.setObjectName("menubar")
dataset.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(dataset)
self.statusbar.setObjectName("statusbar")
dataset.setStatusBar(self.statusbar)
self.retranslateUi(dataset)
QtCore.QMetaObject.connectSlotsByName(dataset)
def retranslateUi(self, dataset):
_translate = QtCore.QCoreApplication.translate
dataset.setWindowTitle(_translate("dataset", "Dataset_selector"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
dataset = QtWidgets.QMainWindow()
ui = Ui_dataset()
ui.setupUi(dataset)
dataset.show()
sys.exit(app.exec_())
```
#### File: signalplotter/qt/makePyUI.py
```python
def makeUI(uiNames):
import sys, os
print('Check the pwd first, It must be at .../SignalPlotter/qt.')
print(os.getcwd())
p0 = os.path.dirname(sys.executable)
for uiName in (uiNames):
print('===== for: ',uiName,' ======')
p1 = '"'+p0+'\Scripts\pyuic5.exe'+'" '
p1 += ' -x "' + uiName + '.ui"'
p1 += ' -o "' + uiName + '.py"'
print(p1)
import subprocess
res = subprocess.call(p1) != 0
print('Done.')
print('Is there any error: ', res)
uiNames = ['plotter_uiDesign']
makeUI(uiNames)
# %%
```
#### File: signalplotter/qt/plotter_ui_Thread.py
```python
from threading import Thread
import threading
class plotter_ui(Thread):
def __init__(self, MainWindow, x, title=None,fs=1, sens=None, channelNames=None):
Thread.__init__(self)
self.lock = threading.Lock()
self.MainWindow = MainWindow
self.x = x
self.title = title
self.fs = fs
self.sens = sens
self.channelNames = channelNames
def run(self):
self.plotter_ui = plotter_ui(self.MainWindow, self.x, self.title, self.fs, self.sens, self.channelNames)
``` |
{
"source": "joway/dockerfiles",
"score": 3
} |
#### File: resource_limit/multi_container/multi_proc.py
```python
import os
from datetime import datetime
TASK_NUM = int(os.environ.get('TASK_NUM', 4))
def log(msg):
print("[%s] proc-%s" % (datetime.now(), msg))
def run():
proc = os.getpid()
while True:
v = 10 ** 10000000
log('%s ' % (proc,))
if __name__ == '__main__':
# p = Pool()
# for i in range(TASK_NUM):
# p.apply_async(run)
# p.close()
# p.join()
run()
``` |
{
"source": "joway/git-lint",
"score": 2
} |
#### File: git-lint/gitlint/git.py
```python
import os.path
import subprocess
import gitlint.utils as utils
def repository_root():
"""Returns the root of the repository as an absolute path."""
try:
root = subprocess.check_output(['git', 'rev-parse', '--show-toplevel'],
stderr=subprocess.STDOUT).strip()
# Convert to unicode first
return root.decode('utf-8')
except subprocess.CalledProcessError:
return None
def last_commit():
"""Returns the SHA1 of the last commit."""
try:
root = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
stderr=subprocess.STDOUT).strip()
# Convert to unicode first
return root.decode('utf-8')
except subprocess.CalledProcessError:
return None
def _remove_filename_quotes(filename):
"""Removes the quotes from a filename returned by git status."""
if filename.startswith('"') and filename.endswith('"'):
return filename[1:-1]
return filename
def modified_files(root, tracked_only=False, commit=None):
"""Returns a list of files that has been modified since the last commit.
Args:
root: the root of the repository, it has to be an absolute path.
tracked_only: exclude untracked files when True.
commit: SHA1 of the commit. If None, it will get the modified files in the
working copy.
Returns: a dictionary with the modified files as keys, and additional
information as value. In this case it adds the status returned by
git status.
"""
assert os.path.isabs(root), "Root has to be absolute, got: %s" % root
if commit:
return _modified_files_with_commit(root, commit)
# Convert to unicode and split
status_lines = subprocess.check_output([
'git', 'status', '--porcelain', '--untracked-files=all',
'--ignore-submodules=all']).decode('utf-8').split(os.linesep)
modes = ['M ', ' M', 'A ', 'AM', 'MM']
if not tracked_only:
modes.append(r'\?\?')
modes_str = '|'.join(modes)
modified_file_status = utils.filter_lines(
status_lines,
r'(?P<mode>%s) (?P#.+)' % modes_str,
groups=('filename', 'mode'))
return dict((os.path.join(root, _remove_filename_quotes(filename)), mode)
for filename, mode in modified_file_status)
def _modified_files_with_commit(root, commit):
# Convert to unicode and split
status_lines = subprocess.check_output(
['git', 'diff-tree', '-r', '--root', '--no-commit-id', '--name-status',
commit]).decode('utf-8').split(os.linesep)
modified_file_status = utils.filter_lines(
status_lines,
r'(?P<mode>A|M)\s(?P#.+)',
groups=('filename', 'mode'))
# We need to add a space to the mode, so to be compatible with the output
# generated by modified files.
return dict((os.path.join(root, _remove_filename_quotes(filename)),
mode + ' ') for filename, mode in modified_file_status)
def modified_lines(filename, extra_data, commit=None):
"""Returns the lines that have been modifed for this file.
Args:
filename: the file to check.
extra_data: is the extra_data returned by modified_files. Additionally, a
value of None means that the file was not modified.
commit: the complete sha1 (40 chars) of the commit. Note that specifying
this value will only work (100%) when commit == last_commit (with
respect to the currently checked out revision), otherwise, we could miss
some lines.
Returns: a list of lines that were modified, or None in case all lines are
new.
"""
if extra_data is None:
return []
if extra_data not in ('M ', ' M', 'MM'):
return None
if commit is None:
commit = '0' * 40
commit = commit.encode('utf-8')
# Split as bytes, as the output may have some non unicode characters.
blame_lines = subprocess.check_output(
['git', 'blame', '--porcelain', filename]).split(
os.linesep.encode('utf-8'))
modified_line_numbers = utils.filter_lines(
blame_lines,
commit + br' (?P<line>\d+) (\d+)',
groups=('line',))
return list(map(int, modified_line_numbers))
``` |
{
"source": "joway/kubernetes-network-benchmark",
"score": 2
} |
#### File: kubernetes-network-benchmark/bench/server.py
```python
import time
from lemon.app import Lemon
from lemon.context import Context
async def handler(ctx: Context):
server_recv = int(time.time() * 1000)
# do something
server_resp = int(time.time() * 1000)
ctx.body = {
'server_recv': server_recv,
'server_resp': server_resp,
}
app = Lemon()
app.use(handler)
app.listen(port=9999)
``` |
{
"source": "joway/Leetcode",
"score": 4
} |
#### File: array/easy/maxProfit.py
```python
class Solution(object):
"""
Problem:
https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
Example:
Input: [7, 1, 5, 3, 6, 4]
Output: 5
"""
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
profit = 0
if not prices:
return profit
min_so_far = prices[0]
for i in range(1, len(prices)):
profit = max(profit, prices[i] - min_so_far)
min_so_far = min(prices[i], min_so_far)
return profit
if __name__ == '__main__':
prices = [7,1,5,3,6,4]
result = Solution().maxProfit(prices)
print(result)
```
#### File: array/easy/moveZeroes.py
```python
class Solution(object):
"""
Problem:
https://leetcode.com/problems/move-zeroes/
Example:
given nums = [0, 1, 0, 3, 12],
after calling your function, nums should be [1, 3, 12, 0, 0]
"""
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
cur = 0
non_zero = len(nums) - 1
while cur < non_zero:
if nums[cur] == 0:
for i in range(cur + 1, non_zero + 1):
nums[i - 1] = nums[i]
nums[non_zero] = 0
non_zero -= 1
else:
cur += 1
if __name__ == '__main__':
nums = [0, 1, 0, 3, 12]
result = Solution().moveZeroes(nums)
print(nums)
```
#### File: array/easy/plusOne.py
```python
class Solution(object):
"""
Problem:
https://leetcode.com/problems/plus-one/
Example:
input: [1,2,9,9]
output: [1,3,0,0]
"""
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
carry = 1
remainder = 0
for i in range(len(digits) - 1 , -1 , -1):
r = digits[i] + carry + remainder
if r >= 10:
carry = 1
remainder = r % 10
else:
carry = 0
remainder = 0
digits[i] = r % 10
if carry and digits[0] == 0:
digits = [1] + digits
return digits
if __name__ == '__main__':
digits = [9]
result = Solution().plusOne(digits)
print(result)
```
#### File: array/easy/removeDuplicates.py
```python
class Solution(object):
"""
Problem:
https://leetcode.com/problems/remove-duplicates-from-sorted-array/
Example:
nums = [1,1,2]
>>
Your function should return length = 2, with the first two elements of
nums being 1 and 2 respectively. It doesn't matter what you leave
beyond the new length.
"""
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums[:] = sorted(list(set(nums)))
return len(nums)
if __name__ == '__main__':
nums = [1,1,2]
result = Solution().removeDuplicates(nums)
print(nums)
print(result)
``` |
{
"source": "joway/lemon",
"score": 2
} |
#### File: lemon/examples/catch_error.py
```python
from lemon.app import Lemon
from lemon.context import Context
async def err_middleware(ctx: Context, nxt):
ctx.body = {
'msg': 'err_middleware'
}
try:
await nxt()
except Exception:
ctx.body = {
'msg': 'error handled'
}
async def middleware_exception(ctx: Context):
raise Exception('error')
app = Lemon(debug=True)
app.use(err_middleware)
app.use(middleware_exception)
app.listen()
```
#### File: lemon/examples/cookies.py
```python
from lemon.app import Lemon
from lemon.context import Context
async def handle(ctx: Context):
my_cookie = ctx.req.cookies.get('my_cookie')
ctx.body = {
'my_cookie': my_cookie,
}
app = Lemon(debug=True)
app.use(handle)
app.listen()
```
#### File: lemon/examples/raw_uvicorn.py
```python
import uvicorn
class EchoBody():
def __init__(self, scope):
self.scope = scope
async def read_body(self, receive):
"""
Read and return the entire body from an incoming ASGI message.
"""
body = b''
more_body = True
while more_body:
message = await receive()
body += message.get('body', b'')
more_body = message.get('more_body', False)
return body
async def __call__(self, receive, send):
body = await self.read_body(receive)
await send({
'type': 'http.response.start',
'status': 200,
'headers': [
[b'content-type', b'text/plain'],
]
})
await send({
'type': 'http.response.body',
'body': body,
})
uvicorn.run(EchoBody, host='127.0.0.1', port='9998')
```
#### File: lemon/examples/router.py
```python
from lemon.app import Lemon
from lemon.context import Context
from lemon.router import SimpleRouter
async def middleware(ctx: Context, nxt):
ctx.body = {
'msg': 'hello lemon'
}
await nxt()
async def handler1(ctx: Context):
ctx.body['ack'] = 'yeah !'
async def handler2(ctx: Context):
ctx.body['ack'] = 'yeah !'
ctx.body['data'] = ctx.req.json
app = Lemon(debug=True)
router = SimpleRouter()
router.get('/handler1', middleware, handler1)
router.post('/handler2', middleware, handler2)
app.use(router.routes())
app.listen()
```
#### File: lemon/examples/throw.py
```python
from lemon.app import Lemon
from lemon.context import Context
async def middleware(ctx: Context):
ctx.throw(status=403, body={
'msg': '403'
})
app = Lemon(debug=True)
app.use(middleware)
app.listen()
```
#### File: lemon/lemon/asgi.py
```python
import typing
from werkzeug.datastructures import ImmutableMultiDict
from lemon.parsers import parse_http_body
from lemon.request import Request, HttpHeaders
class ASGIRequest:
def __init__(self, scope):
self.scope = scope
async def __call__(self, receive: typing.Callable, send: typing.Callable):
# receive body
body = b''
more_body = True
while more_body:
message = await receive()
body += message.get('body', b'')
more_body = message.get('more_body', False)
# parse headers
http_headers = HttpHeaders(raw_headers=self.scope['headers'])
# parse data
data = parse_http_body(headers=http_headers, body=body) if body else ImmutableMultiDict()
# create request
return Request(
http_version=self.scope['http_version'],
method=self.scope['method'],
scheme=self.scope['scheme'],
path=self.scope['path'],
query_string=self.scope['query_string'].decode('utf-8'),
headers=http_headers,
body=body,
data=data,
client=self.scope['client'],
server=self.scope['server'],
)
```
#### File: lemon/lemon/context.py
```python
import typing
from lemon.exception import GeneralException
from lemon.request import Request
from lemon.response import Response
class Context:
"""The Context object store the current request and response .
Your can get all information by use ctx in your handler function .
"""
def __init__(self) -> None:
self.req: typing.Optional[Request] = None
self.res: Response = Response()
# store middleware communication message
self.state: dict = {}
self.params: typing.Optional[dict] = None
def __setattr__(self, key, value) -> None:
# alias
if key == 'body':
self.res.body = value
elif key == 'status':
self.res.status = value
else:
self.__dict__[key] = value
def __getattr__(self, item) -> typing.Any:
# alias
if item == 'body':
return self.res.body
elif item == 'status':
return self.res.status
return self.__dict__[item]
@staticmethod
def throw(status: int, body: typing.Union[str, dict] = None) -> None:
"""Throw the status and response body"""
raise GeneralException(status=status, body=body)
```
#### File: lemon/lemon/middleware.py
```python
import re
import traceback
import typing
from lemon.config import settings
from lemon.context import Context
from lemon.exception import GeneralException
async def exception_middleware(ctx: Context, nxt: typing.Callable) -> typing.Any:
"""Catch the final exception"""
try:
return await nxt()
except GeneralException as e:
ctx.body = e.body
ctx.status = e.status
if settings.LEMON_DEBUG:
traceback.print_exc()
except Exception as e:
ctx.status = 500
ctx.body = ctx.body or {
'error': 'unknown error',
}
traceback.print_exc()
async def cors_middleware(ctx: Context, nxt: typing.Callable):
# settings
LEMON_CORS_ORIGIN_WHITELIST = settings.LEMON_CORS_ORIGIN_WHITELIST
LEMON_CORS_ORIGIN_REGEX_WHITELIST = settings.LEMON_CORS_ORIGIN_REGEX_WHITELIST
LEMON_CORS_ORIGIN_ALLOW_ALL = settings.LEMON_CORS_ORIGIN_ALLOW_ALL
LEMON_CORS_ALLOW_METHODS = settings.LEMON_CORS_ALLOW_METHODS
LEMON_CORS_ALLOW_HEADERS = settings.LEMON_CORS_ALLOW_HEADERS
LEMON_CORS_EXPOSE_HEADERS = settings.LEMON_CORS_EXPOSE_HEADERS
LEMON_CORS_ALLOW_CREDENTIALS = settings.LEMON_CORS_ALLOW_CREDENTIALS
LEMON_CORS_MAX_AGE = settings.LEMON_CORS_MAX_AGE
if ctx.req is None:
return
headers = ctx.req.headers
origin = headers.get('origin', None)
origin_header = origin
# pass request
if origin is None:
return await nxt()
if LEMON_CORS_ORIGIN_ALLOW_ALL:
origin_header = '*'
# preflight request
if ctx.req.method == 'OPTIONS':
acrm = headers.get('access-control-request-method', None)
acrh = headers.get('access-control-request-headers', None)
if acrm is None:
return await nxt()
matched = False
if LEMON_CORS_ORIGIN_ALLOW_ALL:
matched = True
else:
for domain in LEMON_CORS_ORIGIN_WHITELIST:
if domain == origin:
matched = True
for domain_pattern in LEMON_CORS_ORIGIN_REGEX_WHITELIST:
if re.match(domain_pattern, origin):
matched = True
if matched is False:
ctx.status = 200
return
ctx.res.headers['access-control-allow-origin'] = origin_header
if LEMON_CORS_ALLOW_CREDENTIALS:
ctx.res.headers['access-control-allow-credentials'] = 'true'
ctx.res.headers['access-control-max-age'] = LEMON_CORS_MAX_AGE
ctx.res.headers['access-control-allow-methods'] = ','.join(LEMON_CORS_ALLOW_METHODS)
ctx.res.headers['access-control-allow-headers'] = ','.join(LEMON_CORS_ALLOW_HEADERS) or acrh
# stop request
ctx.status = 204
return
# cross origin request
ctx.res.headers['access-control-allow-origin'] = origin_header
if LEMON_CORS_ALLOW_CREDENTIALS:
ctx.res.headers['access-control-allow-credentials'] = 'true'
if LEMON_CORS_EXPOSE_HEADERS:
ctx.res.headers['access-control-expose-headers'] = ','.join(LEMON_CORS_EXPOSE_HEADERS)
return await nxt()
```
#### File: lemon/lemon/request.py
```python
import typing
from urllib.parse import parse_qs
from werkzeug.http import parse_cookie
from lemon.const import MIME_TYPES
class HttpHeaders(dict):
def __init__(self, raw_headers=None, *args, **kwargs):
super(HttpHeaders, self).__init__(*args, **kwargs)
if raw_headers:
for h in raw_headers:
self.__setitem__(h[0].decode(), h[1].decode())
def __setitem__(self, key: str, value):
return super(HttpHeaders, self).__setitem__(key.lower(), str(value))
def __getitem__(self, key: str):
return super(HttpHeaders, self).__getitem__(key.lower())
def set(self, key: str, value):
return self.__setitem__(key, value)
def to_raw(self):
raw_headers = []
for k in self:
raw_headers.append([
k.encode(), self[k].encode(),
])
return raw_headers
class Request:
"""The Request object store the current request's fully information
Example usage:
ctx.req
"""
def __init__(
self,
http_version: str,
method: str,
scheme: str,
path: str,
query_string: bytes,
headers: HttpHeaders,
body: bytes,
data: typing.Optional[typing.Dict],
client: tuple,
server: tuple,
) -> None:
self.http_version = http_version
self.method = method.upper()
self.scheme = scheme
self.path = path
self.query_string = query_string
self.headers = headers
self.body = body
self.data = data
self.client = client
self.server = server
# for cache
self._json = None
self._query: typing.Optional[dict] = None
@property
def protocol(self) -> str:
"""http or https
"""
return self.scheme
@property
def secure(self) -> bool:
"""is using https protocol
"""
return self.scheme == 'https'
@property
def host(self) -> str:
"""HTTP_HEADERS['Host']
"""
return self.headers.get('host', '')
@property
def content_type(self) -> str:
"""HTTP_HEADERS['Content-Type']
"""
return self.headers.get('content-type', MIME_TYPES.TEXT_PLAIN)
@property
def query(self) -> dict:
if self._query is None:
_q = parse_qs(self.query_string)
self._query = {k: _q[k][0] for k in _q}
return self._query
@property
def form(self) -> typing.Optional[typing.Any]:
return self.data
@property
def cookies(self) -> dict:
return parse_cookie(self.headers.get('cookie'))
```
#### File: lemon/lemon/response.py
```python
import json
from lemon.const import MIME_TYPES, CHARSETS
from lemon.request import HttpHeaders
class Response:
def __init__(
self, status: int = 200, headers: HttpHeaders = None,
body: str = None, content_type: str = MIME_TYPES.APPLICATION_JSON,
charset: str = CHARSETS.UTF8,
) -> None:
self.status = status
self.headers = headers if headers else HttpHeaders()
self.body = body or ''
self.content_type = content_type
self.charset = charset
@property
def raw_content_type(self):
content_type = '{type}; {charset}'.format(
type=self.content_type, charset=self.charset
)
return [b'content-type', content_type.encode()]
@property
def raw_headers(self):
content_type_header = self.raw_content_type
_raw_headers = self.headers.to_raw()
_raw_headers.append(content_type_header)
return _raw_headers
@property
def raw_body(self) -> bytes:
_raw_body = b''
if isinstance(self.body, dict):
_raw_body = json.dumps(self.body).encode()
elif isinstance(self.body, str):
_raw_body = self.body.encode()
elif isinstance(self.body, bytes):
_raw_body = self.body
return _raw_body
```
#### File: lemon/tests/test_cors.py
```python
import pytest
from lemon.app import Lemon
from lemon.const import HTTP_METHODS
from lemon.context import Context
from tests import BasicHttpTestCase
@pytest.mark.asyncio
class TestCors(BasicHttpTestCase):
async def test_cors_simple_request(self):
async def handle(ctx: Context):
ctx.body = {
'ack': 'ok',
}
self.app.use(handle)
# GET
req = await self.asgi_request(
self.app,
HTTP_METHODS.GET, '/',
headers=[
[b'origin', b'http://a.com'],
]
)
assert req.headers['access-control-allow-origin'] == 'http://a.com'
# POST
req = await self.asgi_request(
self.app,
HTTP_METHODS.POST, '/',
headers=[
[b'origin', b'http://a.com'],
]
)
assert req.headers['access-control-allow-origin'] == 'http://a.com'
# HEAD
req = await self.asgi_request(
self.app,
HTTP_METHODS.HEAD, '/',
headers=[
[b'origin', b'http://a.com'],
]
)
assert req.headers['access-control-allow-origin'] == 'http://a.com'
async def test_cors_preflight_request(self):
async def handle(ctx: Context):
ctx.body = {
'ack': 'ok',
}
app = Lemon(config={
'LEMON_CORS_ENABLE': True,
'LEMON_CORS_ALLOW_METHODS': ['GET', 'POST'],
'LEMON_CORS_ALLOW_CREDENTIALS': True,
'LEMON_CORS_ORIGIN_WHITELIST': [
'http://a.com',
],
'LEMON_CORS_ORIGIN_REGEX_WHITELIST': [
'http://b.com',
],
}, debug=True)
app.use(handle)
# EMPTY OPTIONS
req = await self.asgi_request(
app,
HTTP_METHODS.OPTIONS, '/',
headers=[
[b'origin', b'http://a.com'],
]
)
assert req.status_code == 200
assert req.json()['ack'] == 'ok'
req = await self.asgi_request(
app,
HTTP_METHODS.OPTIONS, '/',
headers=[
[b'origin', b'http://a.com'],
[b'access-control-request-method', b'POST'],
[b'access-control-request-headers', b'X-PINGOTHER, Content-Type'],
]
)
assert req.status_code == 204
assert req.headers['access-control-allow-origin'] == 'http://a.com'
assert req.headers['access-control-allow-methods'] == 'GET,POST'
assert req.headers['access-control-allow-headers'] == 'X-PINGOTHER, Content-Type'
req = await self.asgi_request(
app,
HTTP_METHODS.OPTIONS, '/',
headers=[
[b'origin', b'http://b.com'],
[b'access-control-request-method', b'POST'],
[b'access-control-request-headers', b'X-PINGOTHER, Content-Type'],
]
)
assert req.status_code == 204
assert req.headers['access-control-allow-origin'] == 'http://b.com'
assert req.headers['access-control-allow-methods'] == 'GET,POST'
assert req.headers['access-control-allow-headers'] == 'X-PINGOTHER, Content-Type'
req = await self.asgi_request(
app,
HTTP_METHODS.OPTIONS, '/',
headers=[
[b'origin', b'http://c.com'],
[b'access-control-request-method', b'POST'],
[b'access-control-request-headers', b'X-PINGOTHER, Content-Type'],
]
)
assert req.status_code == 200
async def test_cors_config(self):
async def handle(ctx: Context):
ctx.body = {
'ack': 'ok',
}
app = Lemon(config={
'LEMON_CORS_ENABLE': True,
'LEMON_CORS_ALLOW_METHODS': ['GET', 'POST'],
'LEMON_CORS_ALLOW_HEADERS': ['allow_header'],
'LEMON_CORS_EXPOSE_HEADERS': ['test_header'],
'LEMON_CORS_ALLOW_CREDENTIALS': True,
'LEMON_CORS_ORIGIN_WHITELIST': [
'http://a.com',
],
'LEMON_CORS_ORIGIN_REGEX_WHITELIST': [
r'^(https?://)?(\w+\.)?b\.com$',
],
'LEMON_CORS_MAX_AGE': 8640,
}, debug=True)
app.use(handle)
# preflight
req = await self.asgi_request(
app,
HTTP_METHODS.OPTIONS, '/',
headers=[
[b'origin', b'http://a.com'],
[b'access-control-request-method', b'POST'],
[b'access-control-request-headers', b'X-PINGOTHER, Content-Type'],
]
)
assert req.headers['access-control-allow-origin'] == 'http://a.com'
assert req.headers['access-control-allow-methods'] == 'GET,POST'
assert req.headers['access-control-allow-headers'] == 'allow_header'
assert req.headers['access-control-allow-credentials'] == 'true'
assert req.headers['access-control-max-age'] == '8640'
assert req.status == 204
req = await self.asgi_request(
app,
HTTP_METHODS.POST, '/',
headers=[
[b'origin', b'http://a.com'],
[b'x-pingother', b'xxx'],
]
)
assert req.headers['access-control-allow-origin'] == 'http://a.com'
assert req.headers['access-control-allow-credentials'] == 'true'
assert req.headers['access-control-expose-headers'] == 'test_header'
assert req.status == 200
async def test_cors_not_allowed_request(self):
async def handle(ctx: Context):
ctx.body = {
'ack': 'ok',
}
app = Lemon(config={
'LEMON_CORS_ENABLE': True,
'LEMON_CORS_ALLOW_METHODS': ['GET', 'POST'],
'LEMON_CORS_ORIGIN_WHITELIST': [
'http://a.com',
],
'LEMON_CORS_MAX_AGE': 8640,
}, debug=True)
app.use(handle)
req = await self.asgi_request(
app,
HTTP_METHODS.OPTIONS, '/',
headers=[
[b'origin', b'https://b.com'],
[b'access-control-request-method', b'POST'],
[b'access-control-request-headers', b'X-PINGOTHER, Content-Type'],
]
)
assert 'access-control-allow-origin' not in req.headers
assert req.status == 200
``` |
{
"source": "joway/net_scripts",
"score": 3
} |
#### File: joway/net_scripts/hznu.py
```python
import requests
from helpers import get_lan_ip, str_to_base64
from local_settings import HZNU_STU_NUM, HZNU_STU_PWD
api = 'http://172.31.1.30/include/auth_action.php'
IP = get_lan_ip()[:-1] + '1'
def init_data(username, password):
return {
'action': 'login',
'username': username,
'password': <PASSWORD>}' + str_to_base64(password),
'ac_id': '9',
'user_ip': IP,
'nas_ip': '',
'user_mac': '',
'url': '',
'ajax': 1,
}
data = init_data(HZNU_STU_NUM, HZNU_STU_PWD)
resp = requests.post(api, data=data)
if 'login_ok' in resp.content.decode():
print('Login success')
else:
print(resp.content.decode())
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.