id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
69867
|
import logging
import os
import sys
import pandas as pd
import torch
class Batch_evaluate:
'''
A class to evaluate a function by several batches to save memory.
Sometimes out=func(x) requires a large memory to compute, this class devide x into several batches and combine the outputs.
'''
def __init__(self, batch_size=None):
'''
batch_size: the size of each batch, `None` means starting from x.shape[0]
'''
self.batch_size = batch_size
def __call__(self, func, x):
'''
Evaluate a function with automatic batch size.
Args:
func: a function or a callable object
x: input of function `func`
Returns:
output of the function `func`, allow multiple outputs
'''
while True:
try:
if self.batch_size is None:
out = func(x)
else:
area_list = x.split(self.batch_size)
out_list = [func(area_batch) for area_batch in area_list]
if isinstance(out_list[0], torch.Tensor): # return a single output
out = torch.cat(out_list)
else: # return multiple outputs
out = tuple([torch.cat(o) for o in zip(*out_list)])
break
except RuntimeError as e:
if 'out of memory' in e.args[0]:
if self.batch_size is None:
self.batch_size = x.shape[0] // 2
else:
self.batch_size //= 2
print(f'{__name__} out of memory, change batch size to {self.batch_size}')
else:
raise e
return out
def get_logger(args):
logger = logging.getLogger(__name__)
file_handler = logging.FileHandler(os.path.join(args.log_root, args.log_name), mode='w')
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler) # comment this out if you do not want log file
print_handler = logging.StreamHandler(sys.stdout)
print_handler.setLevel(logging.DEBUG)
logger.addHandler(print_handler)
logger.setLevel(logging.DEBUG)
return logger
def unique(inputs, number=None):
"""
Select top `number` unique vectors.
torch.unique has multiple weird behaviors, such as
(1) not able to return tensors based on occurence (must be sorted),
(2) two close float tensors are regarded as the same (so one of them will be eliminated), sometimes different
Therefore, we use this function to select unique tensors and maintain occurrence.
Args:
inputs: torch.Tensor, size = (dim1, n_bar)
input vectors
number: int
number of unique tensors needed
Returns:
out: torch.Tensor, size = (number, n_bar)
"""
inputs_unique, idx = torch.unique(inputs, return_inverse=True, dim=0)
idx_unique = pd.unique(idx.cpu().numpy())
out = inputs_unique[idx_unique[:number]]
return out
|
69869
|
import torch
import torch.nn.functional as F
def compas_robustness_loss(x, aggregates, concepts, relevances):
"""Computes Robustness Loss for the Compas data
Formulated by Alvarez-Melis & Jaakkola (2018)
[https://papers.nips.cc/paper/8003-towards-robust-interpretability-with-self-explaining-neural-networks.pdf]
The loss formulation is specific to the data format
The concept dimension is always 1 for this project by design
Parameters
----------
x : torch.tensor
Input as (batch_size x num_features)
aggregates : torch.tensor
Aggregates from SENN as (batch_size x num_classes x concept_dim)
concepts : torch.tensor
Concepts from Conceptizer as (batch_size x num_concepts x concept_dim)
relevances : torch.tensor
Relevances from Parameterizer as (batch_size x num_concepts x num_classes)
Returns
-------
robustness_loss : torch.tensor
Robustness loss as frobenius norm of (batch_size x num_classes x num_features)
"""
batch_size = x.size(0)
num_classes = aggregates.size(1)
grad_tensor = torch.ones(batch_size, num_classes).to(x.device)
J_yx = torch.autograd.grad(outputs=aggregates, inputs=x, \
grad_outputs=grad_tensor, create_graph=True, only_inputs=True)[0]
# bs x num_features -> bs x num_features x num_classes
J_yx = J_yx.unsqueeze(-1)
# J_hx = Identity Matrix; h(x) is identity function
robustness_loss = J_yx - relevances
return robustness_loss.norm(p='fro')
def mnist_robustness_loss(x, aggregates, concepts, relevances):
"""Computes Robustness Loss for MNIST data
Formulated by Alvarez-Melis & Jaakkola (2018)
[https://papers.nips.cc/paper/8003-towards-robust-interpretability-with-self-explaining-neural-networks.pdf]
The loss formulation is specific to the data format
The concept dimension is always 1 for this project by design
Parameters
----------
x : torch.tensor
Input as (batch_size x num_features)
aggregates : torch.tensor
Aggregates from SENN as (batch_size x num_classes x concept_dim)
concepts : torch.tensor
Concepts from Conceptizer as (batch_size x num_concepts x concept_dim)
relevances : torch.tensor
Relevances from Parameterizer as (batch_size x num_concepts x num_classes)
Returns
-------
robustness_loss : torch.tensor
Robustness loss as frobenius norm of (batch_size x num_classes x num_features)
"""
# concept_dim is always 1
concepts = concepts.squeeze(-1)
aggregates = aggregates.squeeze(-1)
batch_size = x.size(0)
num_concepts = concepts.size(1)
num_classes = aggregates.size(1)
# Jacobian of aggregates wrt x
jacobians = []
for i in range(num_classes):
grad_tensor = torch.zeros(batch_size, num_classes).to(x.device)
grad_tensor[:, i] = 1.
j_yx = torch.autograd.grad(outputs=aggregates, inputs=x, \
grad_outputs=grad_tensor, create_graph=True, only_inputs=True)[0]
# bs x 1 x 28 x 28 -> bs x 784 x 1
jacobians.append(j_yx.view(batch_size, -1).unsqueeze(-1))
# bs x num_features x num_classes (bs x 784 x 10)
J_yx = torch.cat(jacobians, dim=2)
# Jacobian of concepts wrt x
jacobians = []
for i in range(num_concepts):
grad_tensor = torch.zeros(batch_size, num_concepts).to(x.device)
grad_tensor[:, i] = 1.
j_hx = torch.autograd.grad(outputs=concepts, inputs=x, \
grad_outputs=grad_tensor, create_graph=True, only_inputs=True)[0]
# bs x 1 x 28 x 28 -> bs x 784 x 1
jacobians.append(j_hx.view(batch_size, -1).unsqueeze(-1))
# bs x num_features x num_concepts
J_hx = torch.cat(jacobians, dim=2)
# bs x num_features x num_classes
robustness_loss = J_yx - torch.bmm(J_hx, relevances)
return robustness_loss.norm(p='fro')
def BVAE_loss(x, x_hat, z_mean, z_logvar):
""" Calculate Beta-VAE loss as in [1]
Parameters
----------
x : torch.tensor
input data to the Beta-VAE
x_hat : torch.tensor
input data reconstructed by the Beta-VAE
z_mean : torch.tensor
mean of the latent distribution of shape
(batch_size, latent_dim)
z_logvar : torch.tensor
diagonal log variance of the latent distribution of shape
(batch_size, latent_dim)
Returns
-------
loss : torch.tensor
loss as a rank-0 tensor calculated as:
reconstruction_loss + beta * KL_divergence_loss
References
----------
[1] Higgins, Irina, et al. "beta-vae: Learning basic visual concepts with
a constrained variational framework." (2016).
"""
# recon_loss = F.binary_cross_entropy(x_hat, x.detach(), reduction="mean")
recon_loss = F.mse_loss(x_hat, x.detach(), reduction="mean")
kl_loss = kl_div(z_mean, z_logvar)
return recon_loss, kl_loss
def mse_l1_sparsity(x, x_hat, concepts, sparsity_reg):
"""Sum of Mean Squared Error and L1 norm weighted by sparsity regularization parameter
Parameters
----------
x : torch.tensor
Input data to the encoder.
x_hat : torch.tensor
Reconstructed input by the decoder.
concepts : torch.Tensor
Concept (latent code) activations.
sparsity_reg : float
Regularizer (xi) for the sparsity term.
Returns
-------
loss : torch.tensor
Concept loss
"""
return F.mse_loss(x_hat, x.detach()) + sparsity_reg * torch.abs(concepts).sum()
def kl_div(mean, logvar):
"""Computes KL Divergence between a given normal distribution
and a standard normal distribution
Parameters
----------
mean : torch.tensor
mean of the normal distribution of shape (batch_size x latent_dim)
logvar : torch.tensor
diagonal log variance of the normal distribution of shape (batch_size x latent_dim)
Returns
-------
loss : torch.tensor
KL Divergence loss computed in a closed form solution
"""
batch_loss = 0.5 * (mean.pow(2) + logvar.exp() - logvar - 1).mean(dim=0)
loss = batch_loss.sum()
return loss
def zero_loss(*args, **kwargs):
"""Dummy loss that always returns zero.
Parameters
----------
args : list
Can take any number of positional arguments (without using them).
kwargs : dict
Can take any number of keyword arguments (without using them).
Returns
-------
loss : torch.tensor
torch.tensor(0)
"""
return torch.tensor(0)
|
69878
|
import pandas as pd
import bayesianpy
from bayesianpy.network import Builder as builder
import logging
import os
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
import seaborn as sns
def main():
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
bayesianpy.jni.attach(logger)
db_folder = bayesianpy.utils.get_path_to_parent_dir(__file__)
iris = pd.read_csv(os.path.join(db_folder, "data/iris.csv"), index_col=False)
network = bayesianpy.network.create_network()
cluster = builder.create_cluster_variable(network, 4)
node = builder.create_multivariate_continuous_node(network, iris.drop('iris_class',axis=1).columns.tolist(), "joint")
builder.create_link(network, cluster, node)
class_variable = builder.create_discrete_variable(network, iris, 'iris_class', iris['iris_class'].unique())
builder.create_link(network, cluster, class_variable)
head_variables = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
with bayesianpy.data.DataSet(iris, db_folder, logger) as dataset:
model = bayesianpy.model.NetworkModel(network, logger)
model.train(dataset)
queries = [bayesianpy.model.QueryConditionalJointProbability(
head_variables=[v],
tail_variables=['iris_class']) for v in head_variables]
(engine, _, _) = bayesianpy.model.InferenceEngine(network).create()
query = bayesianpy.model.SingleQuery(network, engine, logger)
results = query.query(queries, aslist=True)
jd = bayesianpy.visual.JointDistribution()
fig = plt.figure(figsize=(10,10))
for i, r in enumerate(list(results)):
ax = fig.add_subplot(2, 2, i+1)
jd.plot_distribution_with_variance(ax, iris, queries[i].get_head_variables(), r)
plt.show()
if __name__ == "__main__":
main()
|
69882
|
from core.models import ProviderType
from api.v2.serializers.details import ProviderTypeSerializer
from api.v2.views.base import AuthModelViewSet
class ProviderTypeViewSet(AuthModelViewSet):
"""
API endpoint that allows instance actions to be viewed or edited.
"""
queryset = ProviderType.objects.all()
serializer_class = ProviderTypeSerializer
http_method_names = ['get', 'head', 'options', 'trace']
|
69883
|
import dash_bootstrap_components as dbc
from dash import html
from .util import make_subheading
form = html.Div(
[
make_subheading("Form", "form"),
dbc.Form(
[
html.Div(
[
dbc.Label("Username"),
dbc.Input(
placeholder="Enter your username",
type="text",
),
dbc.FormText(
[
"Can't remember your username? ",
html.A(
"Click here.",
href="#",
className="text-muted",
style={"textDecoration": "underline"},
),
]
),
]
),
html.Div(
[
dbc.Label("Username"),
dbc.Input(
placeholder="Enter your password",
type="password",
),
dbc.FormText(
[
"Can't remember your password? ",
html.A(
"Click here.",
href="#",
className="text-muted",
style={"textDecoration": "underline"},
),
]
),
]
),
]
),
],
className="mb-4",
)
|
69909
|
import pytest
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse
from boards.models import Column, Board, Task
User = get_user_model()
@pytest.fixture
def board(create_user):
user = create_user()
uni_board = Board.objects.create(name="University", owner=user)
uni_board.members.add(user)
return uni_board
@pytest.fixture
def col_backlog(board):
return Column.objects.create(board=board, title="Backlog", column_order=1)
@pytest.fixture
def col_done(board):
return Column.objects.create(board=board, title="Done", column_order=2)
def test_order_columns(api_client_with_credentials, col_backlog, col_done):
"""
Order columns:
Backlog, Done -> Done, Backlog
"""
response = api_client_with_credentials.post(
reverse("sort-column"), {"order": [col_done.id, col_backlog.id]}
)
col_backlog.refresh_from_db()
col_done.refresh_from_db()
assert response.status_code == 200
assert col_done.column_order == 1
assert col_backlog.column_order == 2
def test_order_tasks_same_column(
api_client_with_credentials, column_factory, task_factory
):
"""
Order tasks (in one column):
Task1, Task2, Task3 -> Task3, Task1, Task2
"""
column = column_factory()
task1 = task_factory(column=column, task_order=1)
task2 = task_factory(column=column, task_order=2)
task3 = task_factory(column=column, task_order=3)
# Initial state
column.refresh_from_db()
assert list(column.tasks.all()) == [task1, task2, task3]
response = api_client_with_credentials.post(
reverse("sort-task"),
{
"board": column.board.id,
"tasks": {column.id: [task3.id, task1.id, task2.id]},
"order": [task3.id, task1.id, task2.id],
},
)
assert response.status_code == 200
# State after ordering
column.refresh_from_db()
assert list(column.tasks.all()) == [task3, task1, task2]
def test_order_tasks_between_two_columns(
api_client_with_credentials, board_factory, column_factory, task_factory
):
"""
Order tasks between two columns:
Column1: Task1, Task2, Task3
Column2: Task4, Task5
After order:
Column1: Task1, Task3
Column2: Task4, Task2, Task5
"""
board = board_factory()
column1 = column_factory(board=board)
column2 = column_factory(board=board)
task1 = task_factory(column=column1, task_order=1)
task2 = task_factory(column=column1, task_order=2)
task3 = task_factory(column=column1, task_order=3)
task4 = task_factory(column=column2, task_order=4)
task5 = task_factory(column=column2, task_order=5)
# Initial state
column1.refresh_from_db()
column2.refresh_from_db()
assert list(column1.tasks.all()) == [task1, task2, task3]
assert list(column2.tasks.all()) == [task4, task5]
response = api_client_with_credentials.post(
reverse("sort-task"),
{
"board": column1.board.id,
"tasks": {
column1.id: [task1.id, task3.id],
column2.id: [task4.id, task2.id, task5.id],
},
"order": [task1.id, task3.id, task4.id, task2.id, task5.id],
},
)
assert response.status_code == 200
# State after ordering
column1.refresh_from_db()
column2.refresh_from_db()
assert list(column1.tasks.all()) == [task1, task3]
assert list(column2.tasks.all()) == [task4, task2, task5]
def test_invalid_move_atomic(
api_client_with_credentials, board_factory, column_factory, task_factory
):
board = board_factory()
col1 = column_factory(board=board)
col2 = column_factory(board=board)
col3 = column_factory(board=board)
col1_task = task_factory(column=col1, task_order=1)
col2_task = task_factory(column=col2, task_order=2)
response = api_client_with_credentials.post(
reverse("sort-task"),
{
"board": board.id,
"tasks": {
col1.id: [col1_task.id, col2_task.id],
col3.id: [col1_task.id, col2_task.id],
},
"order": [col1_task.id, col2_task.id],
},
)
assert response.status_code == 400
# State should remain the same
col1.refresh_from_db()
col2.refresh_from_db()
col3.refresh_from_db()
assert list(col1.tasks.all()) == [col1_task]
assert list(col2.tasks.all()) == [col2_task]
assert list(col3.tasks.all()) == []
def test_can_not_order_tasks_between_two_boards(
api_client_with_credentials, board_factory, column_factory, task_factory
):
board1 = board_factory()
board2 = board_factory()
board1_col = column_factory(board=board1)
board2_col = column_factory(board=board2)
board1_task = task_factory(column=board1_col, task_order=1)
board2_task = task_factory(column=board2_col, task_order=2)
response = api_client_with_credentials.post(
reverse("sort-task"),
{
"board": board1.id,
"tasks": {
board1_col.id: [],
board2_col.id: [board1_task.id, board2_task.id],
},
"order": [board1_task.id, board2_task.id],
},
)
assert response.status_code == 400
def test_order_duplicate(api_client_with_credentials, col_done):
response = api_client_with_credentials.post(
reverse("sort-column"), {"order": [col_done.id, col_done.id]}
)
assert response.status_code == 400
@pytest.mark.parametrize(
"post_data,expected_status_code",
[
({"order": [1, 2]}, 200),
({"order": [1, 1]}, 400),
({"order": [-1]}, 400),
({"order": "nope"}, 400),
({"order": {"asd"}}, 400),
({"other": "bad data"}, 400),
({}, 400),
],
)
def test_order_column_status_code(
post_data, expected_status_code, api_client_with_credentials, board
):
Column.objects.create(id=1, board=board, title="col1")
Column.objects.create(id=2, board=board, title="col2")
response = api_client_with_credentials.post(reverse("sort-column"), post_data)
assert response.status_code == expected_status_code
def test_board_list(api_client, steve, amy, leo):
uni_board = Board.objects.create(name="University", owner=steve)
uni_board.members.set([steve, amy])
get_board_list = lambda: api_client.get(reverse("board-list"))
# Not authenticated
response = get_board_list()
assert response.status_code == 401
# Owner can see his own boards
api_client.force_authenticate(user=steve)
response = get_board_list()
assert response.status_code == 200
assert len(response.data) == 1
# Members can see the their boards
api_client.force_authenticate(user=amy)
response = get_board_list()
assert response.status_code == 200
assert len(response.data) == 1
# Not part of any boards, can't see any
api_client.force_authenticate(user=leo)
response = get_board_list()
assert response.status_code == 200
assert len(response.data) == 0
def test_board_detail(api_client, steve, amy, leo):
uni_board = Board.objects.create(name="University", owner=steve)
uni_board.members.set([steve, amy])
get_uni_board_detail = lambda: api_client.get(
reverse("board-detail", kwargs={"pk": uni_board.id})
)
# Not authenticated
response = get_uni_board_detail()
assert response.status_code == 401
# Owner can see his own board
api_client.force_authenticate(user=steve)
response = get_uni_board_detail()
assert response.status_code == 200
assert response.data["name"] == "University"
# Member can see the board
api_client.force_authenticate(user=amy)
response = get_uni_board_detail()
assert response.status_code == 200
assert response.data["name"] == "University"
# Not part of the board, can't see it
api_client.force_authenticate(user=leo)
response = get_uni_board_detail()
assert response.status_code == 404
def test_board_delete(api_client, steve, amy, leo):
uni_board = Board.objects.create(name="University", owner=steve)
uni_board.members.set([steve, amy])
delete_uni_board = lambda: api_client.delete(
reverse("board-detail", kwargs={"pk": uni_board.id})
)
# Not authenticated
response = delete_uni_board()
assert response.status_code == 401
assert Board.objects.filter(id=uni_board.id).exists()
# Not part of the board, can't see it
api_client.force_authenticate(user=leo)
response = delete_uni_board()
assert response.status_code == 404
assert Board.objects.filter(id=uni_board.id).exists()
# Member can't delete the board
api_client.force_authenticate(user=amy)
response = delete_uni_board()
assert response.status_code == 403
assert Board.objects.filter(id=uni_board.id).exists()
# Owner can see his own board
api_client.force_authenticate(user=steve)
response = delete_uni_board()
assert response.status_code == 204
assert not Board.objects.filter(id=uni_board.id).exists()
def test_board_create(api_client, steve, amy):
assert len(Board.objects.all()) == 0
create_board = lambda: api_client.post(reverse("board-list"), {"name": "Pets"})
# Not authenticated
response = create_board()
assert response.status_code == 401
assert len(Board.objects.all()) == 0
# Steve should be owner and member after creation
api_client.force_authenticate(user=steve)
response = create_board()
assert response.status_code == 201
assert len(Board.objects.all()) == 1
pets = Board.objects.get(name="Pets")
assert pets.owner == steve
assert list(pets.members.all()) == [steve]
# Amy should not see any boards
api_client.force_authenticate(user=amy)
response = api_client.get(reverse("board-list"))
assert response.status_code == 200
assert len(response.data) == 0
def test_board_invite_member(api_client, board_factory, steve, leo, amy):
board = board_factory(owner=steve)
board.members.set([leo, steve])
# Initially there are two members
assert len(board.members.all()) == 2
send_invite = lambda users_ids: api_client.post(
reverse("board-invite-member", kwargs={"pk": board.id}), {"users": users_ids}
)
# Not authenticated
response = send_invite([amy.id])
assert response.status_code == 401
assert len(board.members.all()) == 2
# Leo is not an owner and should not be able to invite others
api_client.force_authenticate(user=leo)
response = send_invite([amy.id])
assert response.status_code == 403
assert len(board.members.all()) == 2
# Steve as the owner should be able to successfully invite Amy
api_client.force_authenticate(user=steve)
response = send_invite([amy.id])
assert response.status_code == 200
assert len(board.members.all()) == 3
assert amy.id in list(map(lambda member: member.id, board.members.all()))
# Should handle adding an existing member
response = send_invite([steve.id])
assert response.status_code == 200
assert len(board.members.all()) == 3
# Should handle adding non existant user
response = send_invite([-1])
assert response.status_code == 400
assert len(board.members.all()) == 3
def test_board_remove_member(
api_client, board_factory, column_factory, task_factory, steve, leo, amy, mike
):
board = board_factory(owner=steve)
board.members.set([steve, leo, amy])
column = column_factory(board=board)
task = task_factory(column=column)
# Initially there are two members
assert len(board.members.all()) == 3
remove_member = lambda username: api_client.post(
reverse("board-remove-member", kwargs={"pk": board.id}), {"username": username}
)
# Not authenticated
response = remove_member(leo.username)
assert response.status_code == 401
assert len(board.members.all()) == 3
# Leo should not be able to remove Amy (Leo isn't the owner)
api_client.force_authenticate(user=leo)
response = remove_member(amy.username)
assert response.status_code == 403
assert len(board.members.all()) == 3
# Steve can't remove himself (the owner)
api_client.force_authenticate(user=steve)
response = remove_member(steve.username)
assert response.status_code == 400
assert len(board.members.all()) == 3
# Steve can't remove Mike (not a member of the board)
response = remove_member(mike.username)
assert response.status_code == 400
assert len(board.members.all()) == 3
# Steve can't remove a non existant user
response = remove_member("notvalidusername")
assert response.status_code == 400
assert len(board.members.all()) == 3
# Steve can remove Leo, should also remove Leo from tasks
task.assignees.set([leo])
assert len(task.assignees.all()) == 1
response = remove_member(leo.username)
assert response.status_code == 200
assert len(board.members.all()) == 2
assert leo.id not in list(map(lambda member: member.id, board.members.all()))
assert len(task.assignees.all()) == 0
def test_update_task_title(api_client, task_factory, steve, amy):
task = task_factory(title="Landing page design")
board = task.column.board
board.members.set([steve])
new_title = "Admin page permissions"
update_title = lambda: api_client.patch(
reverse("task-detail", kwargs={"pk": task.id}), {"title": new_title}
)
# Not authenticated
response = update_title()
assert response.status_code == 401
# Amy not a member, doesn't know about the task
api_client.force_authenticate(user=amy)
response = update_title()
assert response.status_code == 404
# Steve is a board member, can update
api_client.force_authenticate(user=steve)
response = update_title()
task.refresh_from_db()
assert response.status_code == 200
assert task.title == new_title
def test_delete_task(api_client, task_factory, steve, amy):
task = task_factory()
board = task.column.board
board.members.set([steve])
delete_task = lambda: api_client.delete(
reverse("task-detail", kwargs={"pk": task.id})
)
# Not authenticated
response = delete_task()
assert response.status_code == 401
# Amy not a member, doesn't know about the task
api_client.force_authenticate(user=amy)
response = delete_task()
assert response.status_code == 404
# Steve is a board member, can delete
api_client.force_authenticate(user=steve)
response = delete_task()
assert response.status_code == 204
assert not Task.objects.filter(id=task.id).exists()
def test_update_column_title(api_client, column_factory, steve, amy):
column = column_factory(title="On Hold")
board = column.board
board.members.set([steve])
new_title = "Ready"
update_column_title = lambda: api_client.patch(
reverse("column-detail", kwargs={"pk": column.id}), {"title": new_title}
)
# Not authenticated
response = update_column_title()
assert response.status_code == 401
# Amy not a member, doesn't know about the column
api_client.force_authenticate(user=amy)
response = update_column_title()
assert response.status_code == 404
# Steve is a board member, can update
api_client.force_authenticate(user=steve)
response = update_column_title()
column.refresh_from_db()
assert response.status_code == 200
assert column.title == new_title
def test_create_column(api_client, board_factory, steve, amy):
board = board_factory(name="Internals")
board.members.set([steve])
column_data = {"title": "Send verification email on Regiser", "board": board.id}
create_column = lambda post_data: api_client.post(reverse("column-list"), post_data)
# Not authenticated
response = create_column(column_data)
assert response.status_code == 401
# Amy not a member
api_client.force_authenticate(user=amy)
response = create_column(column_data)
assert response.status_code == 400
assert response.data[0] == "Must be a member of the board!"
# Steve is a board member, can create
api_client.force_authenticate(user=steve)
response = create_column(column_data)
assert response.status_code == 201
assert Column.objects.filter(title=column_data["title"]).exists()
def test_create_task(api_client, column_factory, steve, amy):
column = column_factory(title="Blocked")
board = column.board
board.members.set([steve])
task_data = {
"title": "Send verification email on Regiser",
"description": "<p>Send a verification email when a new user registers. "
"Email template is provided by Dave.</p><p><br></p><p>Use our main SMTP provider.</p>",
"column": column.id,
"labels": [],
"assignees": [steve.id],
"priority": "H",
}
create_task = lambda post_data: api_client.post(reverse("task-list"), post_data)
# Not authenticated
response = create_task(task_data)
assert response.status_code == 401
# Amy not a member
assert amy not in board.members.all()
api_client.force_authenticate(user=amy)
response = create_task(task_data)
assert response.status_code == 400
assert response.data[0] == "Must be a member of the board!"
# One of the assignees (amy) is not a member
api_client.force_authenticate(user=steve)
response = create_task({**task_data, "assignees": [steve.id, amy.id]})
assert response.status_code == 400
assert response.data[0] == "Can't assign someone who isn't a board member!"
# Steve is a board member, can create
api_client.force_authenticate(user=steve)
response = create_task(task_data)
assert response.status_code == 201
assert Task.objects.filter(title=task_data["title"]).exists()
def test_only_board_members_see_labels(
api_client, board_factory, label_factory, steve, amy
):
board = board_factory(name="Internals")
board.members.set([steve])
label = label_factory(name="Documentation", board=board)
get_label = lambda: api_client.get(reverse("label-detail", kwargs={"pk": label.id}))
# Steve is a board member, can get label
api_client.force_authenticate(user=steve)
response = get_label()
assert response.status_code == 200
# Amy is a not a board member, doesn't know about the label
api_client.force_authenticate(user=amy)
response = get_label()
assert response.status_code == 404
def test_add_labels_to_task(
api_client, board_factory, column_factory, task_factory, label_factory, steve, amy
):
board1 = board_factory()
board1.members.set([steve])
board2 = board_factory()
column1 = column_factory(board=board1)
label1 = label_factory(board=board1)
label2 = label_factory(board=board2)
task1 = task_factory(column=column1)
add_labels = lambda labels: api_client.patch(
reverse("task-detail", kwargs={"pk": task1.id}), {"labels": labels}
)
# Can't add a label when not a member
api_client.force_authenticate(user=amy)
response = add_labels([label1.id])
task1.refresh_from_db()
assert response.status_code == 404
assert len(task1.labels.all()) == 0
# Can't add a label from a different board
api_client.force_authenticate(user=steve)
response = add_labels([label1.id, label2.id])
task1.refresh_from_db()
assert response.status_code == 400
assert response.data[0] == "Can't set a label that doesn't belong to the board!"
assert len(task1.labels.all()) == 0
# Can add a label of this board as member
api_client.force_authenticate(user=steve)
response = add_labels([label1.id])
task1.refresh_from_db()
assert response.status_code == 200
assert [label.id for label in task1.labels.all()] == [label1.id]
def test_label_names_unique_per_board(
api_client, board_factory, label_factory, steve, amy
):
board = board_factory()
board.members.set([steve])
label1 = label_factory(board=board, name="Hotfix")
label_factory(board=board, name="Bug")
api_client.force_authenticate(user=steve)
response = api_client.patch(
reverse("label-detail", kwargs={"pk": label1.id}), {"name": "Bug"}
)
assert response.status_code == 400
|
69933
|
import io
from twisted.internet import reactor
from ygo.card import Card
from ygo.duel_reader import DuelReader
from ygo.parsers.duel_parser import DuelParser
from ygo.utils import process_duel
def msg_select_unselect_card(self, data):
data = io.BytesIO(data[1:])
player = self.read_u8(data)
finishable = self.read_u8(data)
cancelable = self.read_u8(data)
min = self.read_u8(data)
max = self.read_u8(data)
select_size = self.read_u8(data)
select_cards = []
for i in range(select_size):
code = self.read_u32(data)
loc = self.read_u32(data)
card = Card(code)
card.set_location(loc)
select_cards.append(card)
unselect_size = self.read_u8(data)
unselect_cards = []
for i in range(unselect_size):
code = self.read_u32(data)
loc = self.read_u32(data)
card = Card(code)
card.set_location(loc)
unselect_cards.append(card)
self.cm.call_callbacks('select_unselect_card', player, finishable, cancelable, min, max, select_cards, unselect_cards)
return data.read()
def select_unselect_card(self, player, finishable, cancelable, min, max, select_cards, unselect_cards):
pl = self.players[player]
pl.card_list = select_cards+unselect_cards
def prompt():
text = pl._("Check or uncheck %d to %d cards by entering their number")%(min, max)
if cancelable and not finishable:
text += "\n"+pl._("Enter c to cancel")
if finishable:
text += "\n"+pl._("Enter f to finish")
pl.notify(text)
for i, c in enumerate(pl.card_list):
name = self.cardlist_info_for_player(c, pl)
if c in select_cards:
state = pl._("unchecked")
else:
state = pl._("checked")
pl.notify("%d: %s (%s)" % (i+1, name, state))
pl.notify(DuelReader, f, no_abort="Invalid command", restore_parser=DuelParser)
def error(text):
pl.notify(text)
return prompt()
def f(caller):
if caller.text == 'c' and (cancelable and not finishable) or caller.text == 'f' and finishable:
self.set_responsei(-1)
reactor.callLater(0, process_duel, self)
return
try:
c = int(caller.text, 10)
except ValueError:
return error(pl._("Invalid command"))
if c < 1 or c > len(pl.card_list):
return error(pl._("Number not in range"))
buf = bytes([1, c - 1])
self.set_responseb(buf)
reactor.callLater(0, process_duel, self)
return prompt()
MESSAGES = {26: msg_select_unselect_card}
CALLBACKS = {'select_unselect_card': select_unselect_card}
|
69969
|
import numpy as np
import pytest
def test_camera_display_create():
from ctapipe.visualization.bokeh import CameraDisplay
CameraDisplay()
def test_camera_geom(example_event, example_subarray):
from ctapipe.visualization.bokeh import CameraDisplay
t = list(example_event.r0.tel.keys())[0]
geom = example_subarray.tel[t].camera.geometry
c_display = CameraDisplay(geom)
assert (c_display.cdsource.data["x"] == geom.pix_x.value).all()
assert (c_display.cdsource.data["y"] == geom.pix_y.value).all()
t = list(example_event.r0.tel.keys())[1]
geom = example_subarray.tel[t].camera.geometry
c_display.geom = geom
assert (c_display.cdsource.data["x"] == geom.pix_x.value).all()
assert (c_display.cdsource.data["y"] == geom.pix_y.value).all()
def test_camera_image(example_event, example_subarray):
from ctapipe.visualization.bokeh import CameraDisplay, intensity_to_hex
t = list(example_event.r0.tel.keys())[0]
geom = example_subarray.tel[t].camera.geometry
n_pixels = geom.pix_x.value.size
image = np.ones(n_pixels)
colors = intensity_to_hex(image)
with pytest.raises(ValueError):
CameraDisplay(None, image)
c_display = CameraDisplay(geom, image)
assert (c_display.cdsource.data["image"] == colors).all()
assert c_display.image_min == 0
assert c_display.image_max == 2
image[5] = 5
colors = intensity_to_hex(image)
c_display.image = image
assert (c_display.cdsource.data["image"] == colors).all()
assert c_display.image_min == image.min()
assert c_display.image_max == image.max()
def test_camera_enable_pixel_picker(example_event, example_subarray):
from ctapipe.visualization.bokeh import CameraDisplay
t = list(example_event.r0.tel.keys())[0]
geom = example_subarray.tel[t].camera.geometry
n_pixels = geom.pix_x.value.size
image = np.ones(n_pixels)
c_display = CameraDisplay(geom, image)
c_display.enable_pixel_picker(2)
assert len(c_display.active_pixels) == 2
c_display.enable_pixel_picker(3)
assert len(c_display.active_pixels) == 3
def test_fast_camera_display_create(example_event, example_subarray):
from ctapipe.visualization.bokeh import FastCameraDisplay
t = list(example_event.r0.tel.keys())[0]
geom = example_subarray.tel[t].camera.geometry
x = geom.pix_x.value
y = geom.pix_y.value
area = geom.pix_area.value
size = np.sqrt(area)
FastCameraDisplay(x, y, size)
def test_fast_camera_image(example_event, example_subarray):
from ctapipe.visualization.bokeh import FastCameraDisplay, intensity_to_hex
t = list(example_event.r0.tel.keys())[0]
geom = example_subarray.tel[t].camera.geometry
x = geom.pix_x.value
y = geom.pix_y.value
area = geom.pix_area.value
size = np.sqrt(area)
c_display = FastCameraDisplay(x, y, size)
image = np.ones(x.size)
colors = intensity_to_hex(image)
c_display.image = colors
assert (c_display.cdsource.data["image"] == colors).all()
def test_waveform_display_create():
from ctapipe.visualization.bokeh import WaveformDisplay
WaveformDisplay()
def test_waveform_values():
from ctapipe.visualization.bokeh import WaveformDisplay
wf = np.ones(30)
w_display = WaveformDisplay(wf)
assert (w_display.cdsource.data["samples"] == wf).all()
assert (w_display.cdsource.data["t"] == np.arange(wf.size)).all()
wf[5] = 5
w_display.waveform = wf
assert (w_display.cdsource.data["samples"] == wf).all()
assert (w_display.cdsource.data["t"] == np.arange(wf.size)).all()
def test_span():
from ctapipe.visualization.bokeh import WaveformDisplay
wf = np.ones(30)
w_display = WaveformDisplay(wf)
w_display.enable_time_picker()
w_display.active_time = 4
assert w_display.span.location == 4
w_display.active_time = -3
assert w_display.active_time == 0
assert w_display.span.location == 0
w_display.active_time = wf.size + 10
assert w_display.active_time == wf.size - 1
assert w_display.span.location == wf.size - 1
|
70018
|
import os
from pathlib import Path
from typing import Iterable
from .ext import Date
from .ext import make_weekday_calendar, parse_calendar, make_const_calendar
from .weekday import parse_weekdays
#-------------------------------------------------------------------------------
def load_calendar_file(path, *, name=None):
"""
Loads a calendar from the file at `path`.
The file has the following format::
START date
END date
date
date
...
Each 'date' is in YYYY-MM-DD format. Blank lines are ignored. Text on
each line following the date is ignored.
:param name:
The calendar name. If `None`, the file's stem name is used.
"""
path = Path(path)
with open(path, "r") as file:
cal = parse_calendar(file)
cal.name = path.stem if name is None else name
return cal
def load_business_calendar(holiday_path, weekdays=(0, 1, 2, 3, 4), *, name=None):
holiday_cal = load_calendar_file(holiday_path)
weekday_cal = make_weekday_calendar(holiday_cal.range, weekdays)
cal = weekday_cal & ~holiday_cal
cal.name = (
",".join( str(w) for w in weekdays ) + " except " + holiday_cal.name
if name is None
else name
)
return cal
def format_calendar(cal) -> Iterable[str]:
"""
Formats `cal` in the calendar file format.
"""
start, stop = cal.range
yield f"START {start}"
yield f"STOP {stop}"
yield ""
for date in cal.dates_array:
yield str(date)
def dump_calendar_file(cal, path):
"""
Writes `cal` as a calendar file at `path`.
"""
with open(path, "wt") as file:
for line in format_calendar(cal):
print(line, file=file)
#-------------------------------------------------------------------------------
class CalendarDir:
"""
A directory containing calendar files.
Each calendar file has the suffix '.cal'.
"""
SUFFIX = ".cal"
def __init__(self, path):
self.__path = Path(path)
@property
def path(self):
"""
The path to the calendar directory.
"""
return self.__path
# FIXME: Caching?
def __getitem__(self, name):
"""
Gets a calendar from a calendar file.
"""
path = (self.__path / name).with_suffix(self.SUFFIX)
try:
return load_calendar_file(path)
except FileNotFoundError:
raise KeyError(name)
# The initial calendar dir is the one shipped with Ora, or pointed to by
# ORA_CALENDARS if this is set.
try:
_CALENDAR_DIR = os.environ["ORA_CALENDARS"]
except KeyError:
_CALENDAR_DIR = Path(__file__).parent / "calendars"
_CALENDAR_DIR = CalendarDir(_CALENDAR_DIR)
def get_calendar_dir():
"""
Returns the global calendar directory.
"""
return _CALENDAR_DIR.path
def set_calendar_dir(path):
"""
Sets the global calendar directory.
"""
global _CALENDAR_DIR
_CALENDAR_DIR = CalendarDir(path)
def _get_special_calendar(name):
if name == "all":
return make_const_calendar((Date.MIN, Date.MAX), True)
if name == "none":
return make_const_calendar((Date.MIN, Date.MAX), False)
try:
weekdays = parse_weekdays(name)
except ValueError:
pass
else:
cal = make_weekday_calendar((Date.MIN, Date.MAX), weekdays)
# FIXME: Do this in make_weekday_calendar.
cal.name = name
return cal
raise LookupError(f"unknown calendar: {name}")
def get_calendar(name):
"""
Gets a calendar.
The name may be:
- "all" or "none"
- A weekday expression; see `parse_weekdays`.
- The name of a calendar in the global calendar directory.
"""
name = str(name)
try:
return _get_special_calendar(name)
except LookupError:
pass
return _CALENDAR_DIR[name]
|
70021
|
import pybullet as p
def render(height, width, view_matrix, projection_matrix,
shadow=1, light_direction=[1, 1, 1],
renderer=p.ER_BULLET_HARDWARE_OPENGL):
# ER_BULLET_HARDWARE_OPENGL
img_tuple = p.getCameraImage(width,
height,
view_matrix,
projection_matrix,
shadow=shadow,
lightDirection=light_direction,
renderer=renderer)
_, _, img, depth, segmentation = img_tuple
# import ipdb; ipdb.set_trace()
# Here, if I do len(img), I get 9216.
# img = np.reshape(np.array(img), (48, 48, 4))
img = img[:, :, :-1]
return img, depth, segmentation
def get_view_matrix(target_pos=(.75, -.2, 0), distance=0.9,
yaw=180, pitch=-20, roll=0, up_axis_index=2):
view_matrix = p.computeViewMatrixFromYawPitchRoll(
target_pos, distance, yaw, pitch, roll, up_axis_index)
return view_matrix
def get_projection_matrix(height, width, fov=60, near_plane=0.1, far_plane=2):
aspect = width / height
projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near_plane,
far_plane)
return projection_matrix
|
70022
|
import datetime
import threading
from django.utils.html import escape as html_escape
from mongoengine import EmbeddedDocument
try:
from mongoengine.base import ValidationError
except ImportError:
from mongoengine.errors import ValidationError
from mongoengine.base.datastructures import BaseList
from mongoengine.queryset import Q
from crits.core.class_mapper import class_from_id
from crits.core.form_consts import NotificationType
from crits.core.user import CRITsUser
from crits.core.user_tools import user_sources, get_subscribed_users
from crits.notifications.notification import Notification
from crits.notifications.processor import ChangeParser, MappedMongoFields
from crits.notifications.processor import NotificationHeaderManager
def create_notification(obj, username, message, source_filter=None,
notification_type=NotificationType.ALERT):
"""
Generate a notification -- based on mongo obj.
:param obj: The object.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param username: The user creating the notification.
:type username: str
:param message: The notification message.
:type message: str
:param source_filter: Filter on who can see this notification.
:type source_filter: list(str)
:param notification_type: The notification type (e.g. alert, error).
:type notification_type: str
"""
n = Notification()
n.analyst = username
obj_type = obj._meta['crits_type']
users = set()
if notification_type not in NotificationType.ALL:
notification_type = NotificationType.ALERT
n.notification_type = notification_type
if obj_type == 'Comment':
n.obj_id = obj.obj_id
n.obj_type = obj.obj_type
n.notification = "%s added a comment: %s" % (username, obj.comment)
users.update(obj.users) # notify mentioned users
# for comments, use the sources from the object that it is linked to
# instead of the comments's sources
obj = class_from_id(n.obj_type, n.obj_id)
else:
n.notification = message
n.obj_id = obj.id
n.obj_type = obj_type
if hasattr(obj, 'source'):
sources = [s.name for s in obj.source]
subscribed_users = get_subscribed_users(n.obj_type, n.obj_id, sources)
# Filter on users that have access to the source of the object
for subscribed_user in subscribed_users:
allowed_sources = user_sources(subscribed_user)
for allowed_source in allowed_sources:
if allowed_source in sources:
if source_filter is None or allowed_source in source_filter:
users.add(subscribed_user)
break
else:
users.update(get_subscribed_users(n.obj_type, n.obj_id, []))
users.discard(username) # don't notify the user creating this notification
n.users = list(users)
if not len(n.users):
return
try:
n.save()
except ValidationError:
pass
# Signal potentially waiting threads that notification information is available
for user in n.users:
notification_lock = NotificationLockManager.get_notification_lock(user)
notification_lock.acquire()
try:
notification_lock.notifyAll()
finally:
notification_lock.release()
def create_general_notification(username, target_users, header, link_url, message,
notification_type=NotificationType.ALERT):
"""
Generate a general notification -- not based on mongo obj.
:param obj: The object.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param username: The user creating the notification.
:type username: str
:param target_users: The list of users who will get the notification.
:type target_users: list(str)
:param header: The notification header message.
:type header: list(str)
:param link_url: A link URL for the header, specify None if there is no link.
:type link_url: str
:param message: The notification message.
:type message: str
:param notification_type: The notification type (e.g. alert, error).
:type notification_type: str
"""
if notification_type not in NotificationType.ALL:
notification_type = NotificationType.ALERT
n = Notification()
n.analyst = username
n.notification_type = notification_type
n.notification = message
n.header = header
n.link_url = link_url
for target_user in target_users:
# Check to make sure the user actually exists
user = CRITsUser.objects(username=target_user).first()
if user is not None:
n.users.append(target_user)
# don't notify the user creating this notification
n.users = [u for u in n.users if u != username]
if not len(n.users):
return
try:
n.save()
except ValidationError:
pass
# Signal potentially waiting threads that notification information is available
for user in n.users:
notification_lock = NotificationLockManager.get_notification_lock(user)
notification_lock.acquire()
try:
notification_lock.notifyAll()
finally:
notification_lock.release()
def generate_audit_notification(username, operation_type, obj, changed_fields,
what_changed, is_new_doc=False):
"""
Generate an audit notification on the specific change, if applicable.
This is called during an audit of the object, before the actual save
to the database occurs.
:param username: The user creating the notification.
:type username: str
:param operation_type: The type of operation (i.e. save or delete).
:type operation_type: str
:param obj: The object.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param changed_fields: A list of field names that were changed.
:type changed_fields: list of str
:param message: A message summarizing what changed.
:type message: str
:param is_new_doc: Indicates if the input obj is newly created.
:type is_new_doc: bool
"""
obj_type = obj._meta['crits_type']
supported_notification = __supported_notification_types__.get(obj_type)
# Check if the obj is supported for notifications
if supported_notification is None:
return
if operation_type == "save":
message = "%s updated the following attributes: %s" % (username,
what_changed)
elif operation_type == "delete":
header_description = generate_notification_header(obj)
message = "%s deleted the following: %s" % (username,
header_description)
if is_new_doc:
sources = []
if hasattr(obj, 'source'):
sources = [s.name for s in obj.source]
message = None
target_users = get_subscribed_users(obj_type, obj.id, sources)
header = generate_notification_header(obj)
link_url = None
if hasattr(obj, 'get_details_url'):
link_url = obj.get_details_url()
if header is not None:
header = "New " + header
create_general_notification(username,
target_users,
header,
link_url,
message)
process_result = process_changed_fields(message, changed_fields, obj)
message = process_result.get('message')
source_filter = process_result.get('source_filter')
if message is not None:
message = html_escape(message)
create_notification(obj, username, message, source_filter, NotificationType.ALERT)
def combine_source_filters(current_source_filters, new_source_filters):
"""
Combines sources together in a restrictive way, e.g. combines sources
like a boolean AND operation, e.g. the source must exist in both lists.
The only exception is if current_source_filters == None, in which case the
new_source_filters will act as the new baseline.
:type current_source_filters: list of source names
:param current_source_filters: list(str).
:type new_source_filters: list of source names
:param new_source_filters: list(str).
:returns: str: Returns a list of combined source names.
"""
combined_source_filters = []
if current_source_filters is None:
return new_source_filters
else:
for new_source_filter in new_source_filters:
if new_source_filter in current_source_filters:
combined_source_filters.append(new_source_filter)
return combined_source_filters
def process_changed_fields(initial_message, changed_fields, obj):
"""
Processes the changed fields to determine what actually changed.
:param message: An initial message to include.
:type message: str
:param changed_fields: A list of field names that were changed.
:type changed_fields: list of str
:param obj: The object.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:returns: str: Returns a message indicating what was changed.
"""
obj_type = obj._meta['crits_type']
message = initial_message
if message is None:
message = ''
source_filter = None
for changed_field in changed_fields:
# Fields may be fully qualified, e.g. source.1.instances.0.reference
# So, split on the '.' character and get the root of the changed field
base_changed_field = MappedMongoFields.get_mapped_mongo_field(obj_type, changed_field.split('.')[0])
new_value = getattr(obj, base_changed_field, '')
old_obj = class_from_id(obj_type, obj.id)
old_value = getattr(old_obj, base_changed_field, '')
change_handler = ChangeParser.get_changed_field_handler(obj_type, base_changed_field)
if change_handler is not None:
change_message = change_handler(old_value, new_value, base_changed_field)
if isinstance(change_message, dict):
if change_message.get('source_filter') is not None:
new_source_filter = change_message.get('source_filter')
source_filter = combine_source_filters(source_filter, new_source_filter)
change_message = change_message.get('message')
if change_message is not None:
message += "\n" + change_message[:1].capitalize() + change_message[1:]
else:
change_field_handler = ChangeParser.generic_single_field_change_handler
if isinstance(old_value, BaseList):
list_value = None
if len(old_value) > 0:
list_value = old_value[0]
elif len(new_value) > 0:
list_value = new_value[0]
if isinstance(list_value, basestring):
change_field_handler = ChangeParser.generic_list_change_handler
elif isinstance(list_value, EmbeddedDocument):
change_field_handler = ChangeParser.generic_list_json_change_handler
change_message = change_field_handler(old_value, new_value, base_changed_field)
if isinstance(change_message, dict):
if change_message.get('source_filter') is not None:
new_source_filter = change_message.get('source_filter')
combine_source_filters(source_filter, new_source_filter)
change_message = change_message.get('message')
if change_message is not None:
message += "\n" + change_message[:1].capitalize() + change_message[1:]
return {'message': message, 'source_filter': source_filter}
def get_notification_details(request, newer_than):
"""
Generate the data to render the notification dialogs.
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param newer_than: A filter that specifies that only notifications
newer than this time should be returned.
:type newer_than: str in ISODate format.
:returns: arguments (dict)
"""
username = request.user.username
notifications_list = []
notifications = None
latest_notification_time = None
lock = NotificationLockManager.get_notification_lock(username)
timeout = 0
# Critical section, check if there are notifications to be consumed.
lock.acquire()
try:
notifications = get_user_notifications(username, newer_than=newer_than)
if len(notifications) > 0:
latest_notification_time = str(notifications[0].created)
else:
# no new notifications -- block until time expiration or lock release
lock.wait(60)
# lock was released, check if there is any new information yet
notifications = get_user_notifications(username, newer_than=newer_than)
if len(notifications) > 0:
latest_notification_time = str(notifications[0].created)
finally:
lock.release()
if latest_notification_time is not None:
acknowledgement_type = request.user.get_preference('toast_notifications', 'acknowledgement_type', 'sticky')
if acknowledgement_type == 'timeout':
timeout = request.user.get_preference('toast_notifications', 'timeout', 30) * 1000
for notification in notifications:
obj = class_from_id(notification.obj_type, notification.obj_id)
if obj is not None:
link_url = obj.get_details_url()
header = generate_notification_header(obj)
else:
if notification.header is not None:
header = notification.header
else:
header = "%s %s" % (notification.obj_type, notification.obj_id)
if notification.link_url is not None:
link_url = notification.link_url
else:
link_url = None
notification_type = notification.notification_type
if notification_type is None or notification_type not in NotificationType.ALL:
notification_type = NotificationType.ALERT
notification_data = {
"header": header,
"message": notification.notification,
"date_modified": str(notification.created.strftime("%Y/%m/%d %H:%M:%S")),
"link": link_url,
"modified_by": notification.analyst,
"id": str(notification.id),
"type": notification_type,
}
notifications_list.append(notification_data)
return {
'notifications': notifications_list,
'newest_notification': latest_notification_time,
'server_time': str(datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")),
'timeout': timeout,
}
def get_notifications_for_id(username, obj_id, obj_type):
"""
Get notifications for a specific top-level object and user.
:param username: The user to search for.
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param obj_type: The top-level object type.
:type obj_type: str
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
return Notification.objects(users=username,
obj_id=obj_id,
obj_type=obj_type)
def remove_notification(obj_id):
"""
Remove an existing notification.
:param obj_id: The top-level ObjectId to find the notification to remove.
:type obj_id: str
:returns: dict with keys "success" (boolean) and "message" (str).
"""
notification = Notification.objects(id=obj_id).first()
if not notification:
message = "Could not find notification to remove!"
result = {'success': False, 'message': message}
else:
notification.delete()
message = "Notification removed successfully!"
result = {'success': True, 'message': message}
return result
def get_new_notifications():
"""
Get any new notifications.
"""
return Notification.objects(status="new")
def remove_user_from_notification(username, obj_id, obj_type):
"""
Remove a user from the list of users for a notification.
:param username: The user to remove.
:type username: str
:param obj_id: The ObjectId of the top-level object for this notification.
:type obj_id: str
:param obj_type: The top-level object type.
:type obj_type: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
Notification.objects(obj_id=obj_id,
obj_type=obj_type).update(pull__users=username)
return {'success': True}
def remove_user_from_notification_id(username, id):
"""
Remove a user from the list of users for a notification.
:param username: The user to remove.
:type username: str
:param obj_id: The ObjectId of the top-level object for this notification.
:type obj_id: str
:param obj_type: The top-level object type.
:type obj_type: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
Notification.objects(id=id).update(pull__users=username)
return {'success': True}
def remove_user_notifications(username):
"""
Remove a user from all notifications.
:param username: The user to remove.
:type username: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
Notification.objects(users=username).update(pull__users=username)
def get_user_notifications(username, count=False, newer_than=None):
"""
Get the notifications for a user.
:param username: The user to get notifications for.
:type username: str
:param count: Only return the count.
:type count:bool
:returns: int, :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
n = None
if newer_than is None or newer_than == None:
n = Notification.objects(users=username).order_by('-created')
else:
n = Notification.objects(Q(users=username) & Q(created__gt=newer_than)).order_by('-created')
if count:
return len(n)
else:
return n
__supported_notification_types__ = {
'Actor': 'name',
'Campaign': 'name',
'Certificate': 'md5',
'Comment': 'object_id',
'Domain': 'domain',
'Email': 'id',
'Event': 'id',
'Indicator': 'id',
'IP': 'ip',
'PCAP': 'md5',
'RawData': 'title',
'Sample': 'md5',
'Target': 'email_address',
}
class NotificationLockManager(object):
"""
Manager class to handle locks for notifications.
"""
__notification_mutex__ = threading.Lock()
__notification_locks__ = {}
@classmethod
def get_notification_lock(cls, username):
"""
@threadsafe
Gets a notification lock for the specified user, if it doesn't exist
then one is created.
"""
if username not in cls.__notification_locks__:
# notification lock doesn't exist for user, create new lock
cls.__notification_mutex__.acquire()
try:
# safe double checked locking
if username not in cls.__notification_locks__:
cls.__notification_locks__[username] = threading.Condition()
finally:
cls.__notification_mutex__.release()
return cls.__notification_locks__.get(username)
def generate_notification_header(obj):
"""
Generates notification header information based upon the object -- this is
used to preface the notification's context.
Could possibly be used for "Favorites" descriptions as well.
:param obj: The top-level object instantiated class.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`.
:returns: str with a human readable identification of the object
"""
generate_notification_header_handler = NotificationHeaderManager.get_header_handler(obj._meta['crits_type'])
if generate_notification_header_handler is not None:
return generate_notification_header_handler(obj)
else:
return "%s: %s" % (obj._meta['crits_type'], str(obj.id))
|
70027
|
from typing import List, Union, Dict
from unittest import TestCase
from unittest.mock import ANY, patch, Mock
from parameterized import parameterized
from samcli.lib.cookiecutter.question import Question, QuestionKind, Choice, Confirm, Info, QuestionFactory
class TestQuestion(TestCase):
_ANY_TEXT = "any text"
_ANY_KEY = "any key"
_ANY_OPTIONS = ["option1", "option2", "option3"]
_ANY_ANSWER = "any answer"
_ANY_NEXT_QUESTION_MAP = {
"option1": "key1",
"option2": "key2",
"option3": "key3",
}
_ANY_DEFAULT_NEXT_QUESTION_KEY = "default"
_ANY_KIND = QuestionKind.question
def setUp(self):
self.question = Question(
text=self._ANY_TEXT,
key=self._ANY_KEY,
default=self._ANY_ANSWER,
is_required=True,
allow_autofill=False,
next_question_map=self._ANY_NEXT_QUESTION_MAP,
default_next_question_key=self._ANY_DEFAULT_NEXT_QUESTION_KEY,
)
def get_question_with_default_from_cookiecutter_context_using_keypath(
self, key_path: List[Union[str, Dict]]
) -> Question:
return Question(
text=self._ANY_TEXT,
key=self._ANY_KEY,
default={"keyPath": key_path},
is_required=True,
next_question_map=self._ANY_NEXT_QUESTION_MAP,
default_next_question_key=self._ANY_DEFAULT_NEXT_QUESTION_KEY,
)
def test_creating_questions(self):
q = Question(text=self._ANY_TEXT, key=self._ANY_KEY)
self.assertEqual(q.text, self._ANY_TEXT)
self.assertEqual(q.key, self._ANY_KEY)
self.assertEqual(q.default_answer, "")
self.assertFalse(q.required)
self.assertEqual(q.next_question_map, {})
self.assertIsNone(q.default_next_question_key)
q = self.question
self.assertEqual(q.text, self._ANY_TEXT)
self.assertEqual(q.key, self._ANY_KEY)
self.assertEqual(q.default_answer, self._ANY_ANSWER)
self.assertTrue(q.required)
self.assertEqual(q.next_question_map, self._ANY_NEXT_QUESTION_MAP)
self.assertEqual(q.default_next_question_key, self._ANY_DEFAULT_NEXT_QUESTION_KEY)
def test_question_key_and_text_are_required(self):
with (self.assertRaises(TypeError)):
Question(text=self._ANY_TEXT)
with (self.assertRaises(TypeError)):
Question(key=self._ANY_KEY)
def test_get_next_question_key(self):
self.assertEqual(self.question.get_next_question_key("option1"), "key1")
self.assertEqual(self.question.get_next_question_key("option2"), "key2")
self.assertEqual(self.question.get_next_question_key("option3"), "key3")
self.assertEqual(self.question.get_next_question_key("any-option"), self._ANY_DEFAULT_NEXT_QUESTION_KEY)
self.question.set_default_next_question_key("new_default")
self.assertEqual(self.question.get_next_question_key(None), "new_default")
@patch("samcli.lib.cookiecutter.question.click")
def test_ask(self, mock_click):
mock_click.prompt.return_value = self._ANY_ANSWER
answer = self.question.ask({})
self.assertEqual(answer, self._ANY_ANSWER)
mock_click.prompt.assert_called_once_with(text=self.question.text, default=self.question.default_answer)
@patch("samcli.lib.cookiecutter.question.click")
def test_ask_resolves_from_cookiecutter_context(self, mock_click):
# Setup
expected_default_value = Mock()
previous_question_key = "this is a question"
previous_question_answer = "this is an answer"
context = {
"['x', 'this is an answer']": expected_default_value,
previous_question_key: previous_question_answer,
}
question = self.get_question_with_default_from_cookiecutter_context_using_keypath(
["x", {"valueOf": previous_question_key}]
)
# Trigger
question.ask(context=context)
# Verify
mock_click.prompt.assert_called_once_with(text=self.question.text, default=expected_default_value)
@patch("samcli.lib.cookiecutter.question.click")
def test_ask_resolves_from_cookiecutter_context_non_exist_key_path(self, mock_click):
# Setup
context = {}
question = self.get_question_with_default_from_cookiecutter_context_using_keypath(["y"])
# Trigger
question.ask(context=context)
# Verify
mock_click.prompt.assert_called_once_with(text=self.question.text, default=None)
def test_ask_resolves_from_cookiecutter_context_non_exist_question_key(self):
# Setup
expected_default_value = Mock()
previous_question_key = "this is a question"
previous_question_answer = "this is an answer"
context = {
"['x', 'this is an answer']": expected_default_value,
previous_question_key: previous_question_answer,
}
question = self.get_question_with_default_from_cookiecutter_context_using_keypath(
["x", {"valueOf": "non_exist_question_key"}]
)
# Trigger
with self.assertRaises(KeyError):
question.ask(context=context)
@parameterized.expand([("this should have been a list"), ([1],), ({},)])
def test_ask_resolves_from_cookiecutter_context_with_key_path_not_a_list(self, key_path):
# Setup
context = {}
question = self.get_question_with_default_from_cookiecutter_context_using_keypath(key_path)
# Trigger
with self.assertRaises(ValueError):
question.ask(context=context)
@parameterized.expand([({"keyPath123": Mock()},), ({"keyPath": [{"valueOf123": Mock()}]},)])
def test_ask_resolves_from_cookiecutter_context_with_default_object_missing_keys(self, default_object):
# Setup
context = {}
question = self.get_question_with_default_from_cookiecutter_context_using_keypath([])
question._default_answer = default_object
# Trigger
with self.assertRaises(KeyError):
question.ask(context=context)
def test_question_allow_autofill_with_default_value(self):
q = Question(text=self._ANY_TEXT, key=self._ANY_KEY, is_required=True, allow_autofill=True, default="123")
self.assertEquals("123", q.ask())
@patch("samcli.lib.cookiecutter.question.click")
def test_question_allow_autofill_without_default_value(self, click_mock):
answer_mock = click_mock.prompt.return_value = Mock()
q = Question(text=self._ANY_TEXT, key=self._ANY_KEY, is_required=True, allow_autofill=True)
self.assertEquals(answer_mock, q.ask())
class TestChoice(TestCase):
def setUp(self):
self.question = Choice(
text=TestQuestion._ANY_TEXT,
key=TestQuestion._ANY_KEY,
options=TestQuestion._ANY_OPTIONS,
default=TestQuestion._ANY_ANSWER,
is_required=True,
next_question_map=TestQuestion._ANY_NEXT_QUESTION_MAP,
default_next_question_key=TestQuestion._ANY_DEFAULT_NEXT_QUESTION_KEY,
)
def test_create_choice_question(self):
self.assertEqual(self.question.text, TestQuestion._ANY_TEXT)
self.assertEqual(self.question.key, TestQuestion._ANY_KEY)
self.assertEqual(self.question._options, TestQuestion._ANY_OPTIONS)
with (self.assertRaises(TypeError)):
Choice(key=TestQuestion._ANY_KEY, text=TestQuestion._ANY_TEXT)
with (self.assertRaises(ValueError)):
Choice(key=TestQuestion._ANY_KEY, text=TestQuestion._ANY_TEXT, options=None)
with (self.assertRaises(ValueError)):
Choice(key=TestQuestion._ANY_KEY, text=TestQuestion._ANY_TEXT, options=[])
def test_get_options_indexes_with_different_bases(self):
indexes = self.question._get_options_indexes()
self.assertEqual(indexes, [0, 1, 2])
indexes = self.question._get_options_indexes(base=1)
self.assertEqual(indexes, [1, 2, 3])
@patch("samcli.lib.cookiecutter.question.click.Choice")
@patch("samcli.lib.cookiecutter.question.click")
def test_ask(self, mock_click, mock_choice):
mock_click.prompt.return_value = 2
answer = self.question.ask({})
self.assertEqual(answer, TestQuestion._ANY_OPTIONS[1]) # we deduct one from user's choice (base 1 vs base 0)
mock_click.prompt.assert_called_once_with(
text="Choice",
default=self.question.default_answer,
show_choices=False,
type=ANY,
show_default=self.question.default_answer is not None,
)
mock_choice.assert_called_once_with(["1", "2", "3"])
class TestInfo(TestCase):
@patch("samcli.lib.cookiecutter.question.click")
def test_ask(self, mock_click):
q = Info(text=TestQuestion._ANY_TEXT, key=TestQuestion._ANY_KEY)
mock_click.echo.return_value = None
answer = q.ask({})
self.assertIsNone(answer)
mock_click.echo.assert_called_once_with(message=q.text)
class TestConfirm(TestCase):
@patch("samcli.lib.cookiecutter.question.click")
def test_ask(self, mock_click):
q = Confirm(text=TestQuestion._ANY_TEXT, key=TestQuestion._ANY_KEY)
mock_click.confirm.return_value = True
answer = q.ask({})
self.assertTrue(answer)
mock_click.confirm.assert_called_once_with(text=q.text)
class TestQuestionFactory(TestCase):
def test_there_is_a_handler_for_each_question_kind(self):
question_json = {"key": TestQuestion._ANY_KEY, "question": TestQuestion._ANY_TEXT, "options": ["a", "b"]}
for kind in QuestionKind:
question_json["kind"] = kind.name
q = QuestionFactory.create_question_from_json(question_json)
expected_type = QuestionFactory.question_classes[kind]
self.assertTrue(isinstance(q, expected_type))
|
70043
|
import copy
import logging
import dask
import numpy as np
import xarray as xr
from numcodecs.compat import ensure_ndarray
from xarray.backends.zarr import (
DIMENSION_KEY,
encode_zarr_attr_value,
encode_zarr_variable,
extract_zarr_variable_encoding,
)
from zarr.meta import encode_fill_value
from zarr.storage import array_meta_key, attrs_key, default_compressor, group_meta_key
from zarr.util import normalize_shape
from .api import DATASET_ID_ATTR_KEY
dask_array_type = (dask.array.Array,)
zarr_format = 2
zarr_consolidated_format = 1
zarr_metadata_key = '.zmetadata'
logger = logging.getLogger('api')
def _extract_dataset_zattrs(dataset: xr.Dataset):
"""helper function to create zattrs dictionary from Dataset global attrs"""
zattrs = {}
for k, v in dataset.attrs.items():
zattrs[k] = encode_zarr_attr_value(v)
# remove xpublish internal attribute
zattrs.pop(DATASET_ID_ATTR_KEY, None)
return zattrs
def _extract_dataarray_zattrs(da):
"""helper function to extract zattrs dictionary from DataArray"""
zattrs = {}
for k, v in da.attrs.items():
zattrs[k] = encode_zarr_attr_value(v)
zattrs[DIMENSION_KEY] = list(da.dims)
# We don't want `_FillValue` in `.zattrs`
# It should go in `fill_value` section of `.zarray`
_ = zattrs.pop('_FillValue', None)
return zattrs
def _extract_fill_value(da, dtype):
"""helper function to extract fill value from DataArray."""
fill_value = da.attrs.pop('_FillValue', None)
return encode_fill_value(fill_value, dtype)
def _extract_zarray(da, encoding, dtype):
"""helper function to extract zarr array metadata."""
meta = {
'compressor': encoding.get('compressor', da.encoding.get('compressor', default_compressor)),
'filters': encoding.get('filters', da.encoding.get('filters', None)),
'chunks': encoding.get('chunks', None),
'dtype': dtype.str,
'fill_value': _extract_fill_value(da, dtype),
'order': 'C',
'shape': list(normalize_shape(da.shape)),
'zarr_format': zarr_format,
}
if meta['chunks'] is None:
meta['chunks'] = da.shape
# validate chunks
if isinstance(da.data, dask_array_type):
var_chunks = tuple([c[0] for c in da.data.chunks])
else:
var_chunks = da.shape
if not var_chunks == tuple(meta['chunks']):
raise ValueError('Encoding chunks do not match inferred chunks')
meta['chunks'] = list(meta['chunks']) # return chunks as a list
return meta
def create_zvariables(dataset):
"""Helper function to create a dictionary of zarr encoded variables."""
zvariables = {}
for key, da in dataset.variables.items():
encoded_da = encode_zarr_variable(da, name=key)
zvariables[key] = encoded_da
return zvariables
def create_zmetadata(dataset):
"""Helper function to create a consolidated zmetadata dictionary."""
zmeta = {'zarr_consolidated_format': zarr_consolidated_format, 'metadata': {}}
zmeta['metadata'][group_meta_key] = {'zarr_format': zarr_format}
zmeta['metadata'][attrs_key] = _extract_dataset_zattrs(dataset)
for key, da in dataset.variables.items():
encoded_da = encode_zarr_variable(da, name=key)
encoding = extract_zarr_variable_encoding(da)
zmeta['metadata'][f'{key}/{attrs_key}'] = _extract_dataarray_zattrs(encoded_da)
zmeta['metadata'][f'{key}/{array_meta_key}'] = _extract_zarray(
encoded_da, encoding, encoded_da.dtype
)
return zmeta
def jsonify_zmetadata(dataset: xr.Dataset, zmetadata: dict) -> dict:
"""Helper function to convert zmetadata dictionary to a json
compatible dictionary.
"""
zjson = copy.deepcopy(zmetadata)
for key in list(dataset.variables):
# convert compressor to dict
compressor = zjson['metadata'][f'{key}/{array_meta_key}']['compressor']
if compressor is not None:
compressor_config = zjson['metadata'][f'{key}/{array_meta_key}'][
'compressor'
].get_config()
zjson['metadata'][f'{key}/{array_meta_key}']['compressor'] = compressor_config
return zjson
def encode_chunk(chunk, filters=None, compressor=None):
"""helper function largely copied from zarr.Array"""
# apply filters
if filters:
for f in filters:
chunk = f.encode(chunk)
# check object encoding
if ensure_ndarray(chunk).dtype == object:
raise RuntimeError('cannot write object array without object codec')
# compress
if compressor:
cdata = compressor.encode(chunk)
else:
cdata = chunk
return cdata
def get_data_chunk(da, chunk_id, out_shape):
"""Get one chunk of data from this DataArray (da).
If this is an incomplete edge chunk, pad the returned array to match out_shape.
"""
ikeys = tuple(map(int, chunk_id.split('.')))
if isinstance(da, dask_array_type):
chunk_data = da.blocks[ikeys]
else:
if ikeys != ((0,) * da.ndim):
raise ValueError(
'Invalid chunk_id for numpy array: %s. Should have been: %s'
% (chunk_id, ((0,) * da.ndim))
)
chunk_data = np.asarray(da)
logger.debug('checking chunk output size, %s == %s' % (chunk_data.shape, out_shape))
if isinstance(chunk_data, dask_array_type):
chunk_data = chunk_data.compute()
# zarr expects full edge chunks, contents out of bounds for the array are undefined
if chunk_data.shape != tuple(out_shape):
new_chunk = np.empty_like(chunk_data, shape=out_shape)
write_slice = tuple([slice(0, s) for s in chunk_data.shape])
new_chunk[write_slice] = chunk_data
return new_chunk
else:
return chunk_data
|
70052
|
def hvplot_with_buffer(gdf, buffer_size, *args, **kwargs):
"""
Convenience function for plotting a GeoPandas point GeoDataFrame using point markers plus buffer polygons
Parameters
----------
gdf : geopandas.GeoDataFrame
point GeoDataFrame to plot
buffer_size : numeric
size of the buffer in meters (measured in EPSG:31287)
"""
buffered = gdf.to_crs('epsg:31287').buffer(buffer_size)
buffered = gdf.copy().set_geometry(buffered).to_crs('epsg:4326')
plot = ( buffered.hvplot(geo=True, tiles='OSM', alpha=0.5, line_width=0, *args, **kwargs) *
gdf.hvplot(geo=True, hover_cols=['DESIGNATION'])
).opts(active_tools=['wheel_zoom'])
return plot
|
70078
|
import argparse
import cv2
import numpy as np
import torch.nn.functional as F
from torchvision.transforms.functional import normalize
from facexlib.matting import init_matting_model
from facexlib.utils import img2tensor
def main(args):
modnet = init_matting_model()
# read image
img = cv2.imread(args.img_path) / 255.
# unify image channels to 3
if len(img.shape) == 2:
img = img[:, :, None]
if img.shape[2] == 1:
img = np.repeat(img, 3, axis=2)
elif img.shape[2] == 4:
img = img[:, :, 0:3]
img_t = img2tensor(img, bgr2rgb=True, float32=True)
normalize(img_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
img_t = img_t.unsqueeze(0).cuda()
# resize image for input
_, _, im_h, im_w = img_t.shape
ref_size = 512
if max(im_h, im_w) < ref_size or min(im_h, im_w) > ref_size:
if im_w >= im_h:
im_rh = ref_size
im_rw = int(im_w / im_h * ref_size)
elif im_w < im_h:
im_rw = ref_size
im_rh = int(im_h / im_w * ref_size)
else:
im_rh = im_h
im_rw = im_w
im_rw = im_rw - im_rw % 32
im_rh = im_rh - im_rh % 32
img_t = F.interpolate(img_t, size=(im_rh, im_rw), mode='area')
# inference
_, _, matte = modnet(img_t, True)
# resize and save matte
matte = F.interpolate(matte, size=(im_h, im_w), mode='area')
matte = matte[0][0].data.cpu().numpy()
cv2.imwrite(args.save_path, (matte * 255).astype('uint8'))
# get foreground
matte = matte[:, :, None]
foreground = img * matte + np.full(img.shape, 1) * (1 - matte)
cv2.imwrite(args.save_path.replace('.png', '_fg.png'), foreground * 255)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type=str, default='assets/test.jpg')
parser.add_argument('--save_path', type=str, default='test_matting.png')
args = parser.parse_args()
main(args)
|
70104
|
from pydub import AudioSegment
class Frame():
def __init__(self, start=0, end=0, audio=AudioSegment.empty()):
self.start = start
self.end = end
self.audio = audio
def __eq__(self, frame):
return self.start == frame.start and self.end == frame.end
def __len__(self):
return self.end - self.start
def __iadd__(self, frame):
if len(self) == 0: # empty case
self.start = frame.start
self.end = frame.end
self.audio = frame.audio
elif self.start < frame.start:
self.end = frame.end
self.audio += frame.audio
elif self.start > frame.start:
self.start = frame.start
self.audio = frame.audio + self.audio
return self
def __add__(self,frame):
new_frame = Frame()
if self.start < frame.start:
new_frame.start = self.start
new_frame.end = frame.end
new_frame.audio = self.audio + frame.audio
elif self.start > frame.start:
new_frame.start = frame.start
new_frame.end = self.end
new_frame.audio = frame.audio + self.audio
return new_frame
def timestamps(self):
return (self.start, self.end)
|
70116
|
from __future__ import absolute_import
import numpy as np
import chainer
import tqdm
import glob
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from ..data import load_image # NOQA
class BaseDataset(chainer.dataset.DatasetMixin, metaclass=ABCMeta):
""" Base class of dataset
Args:
root (str): Directory to the dataset
patients (list, optional): List of patient names. Defaults to [].
classes (None or list, optional): List of class names. Defaults to None.
dtypes (dict, optional): An dictionary of data types. Defaults to {}.
filenames (dict, optional): An dictionary of wildcard to filenames.
Each filename can be a format string using '{root}' and '{patient}'. Defaults to {}.
normalizer (callable, optional): An callable function for normalization. Defaults to None.
augmentor (callable, optional): An callable function for data augmentation. Defaults to None.
"""
def __init__(self,
root,
patients=[],
classes=None,
dtypes={},
filenames={},
normalizer=None,
augmentor=None):
super(BaseDataset, self).__init__()
assert isinstance(patients, (list, np.ndarray)), \
'please specify the patient names..'
if classes is not None:
if isinstance(classes, list):
classes = np.asarray(classes)
assert isinstance(classes, np.ndarray), \
'class names should be list or np.ndarray..'
assert isinstance(dtypes, dict), \
'please specify the dtype per each file..'
assert isinstance(filenames, dict), \
'please specify the filename per each file..'
if normalizer is not None:
assert callable(normalizer), 'normalizer should be callable..'
if augmentor is not None:
assert callable(augmentor), 'augmentor should be callable..'
# initialize
files = OrderedDict()
file_sizes = []
for key in filenames.keys():
files[key] = []
for p in tqdm.tqdm(patients, desc='Collecting %s files' % key, ncols=80):
files[key].extend(
glob.glob(filenames[key].format(root=root, patient=p)))
if len(files[key]) == 0:
warnings.warn('%s files are not found.. ' % key)
file_sizes.append(len(files[key]))
assert all(file_sizes[0] == s for s in file_sizes), \
'the number of files must be the same..'
self._root = root
self._patients = patients
self._classes = classes
self._dtypes = dtypes
self._filenames = filenames
self._files = files
self._normalizer = normalizer
self._augmentor = augmentor
def __len__(self):
key = list(self._files.keys())[0]
return len(self._files[key])
@property
def classes(self):
return self._classes
@property
def n_classes(self):
if self.classes is None:
return None
return len(self.classes)
@property
def files(self):
return self._files
@property
def dtypes(self):
return self._dtypes
@property
def normalizer(self):
return self._normalizer
@property
def augmentor(self):
return self._augmentor
@augmentor.deleter
def augmentor(self):
self._augmentor = None
@classmethod
@abstractmethod
def normalize(self, **kwargs):
raise NotImplementedError()
@classmethod
@abstractmethod
def denormalize(self, **kwargs):
raise NotImplementedError()
@classmethod
@abstractmethod
def get_example(self, i):
raise NotImplementedError()
@classmethod
@abstractmethod
def __copy__(self):
"""Copy the class instance"""
raise NotImplementedError()
from .volume import VolumeDataset # NOQA
from .image import ImageDataset # NOQA
def train_valid_split(train, valid_ratio):
if isinstance(train, BaseDataset):
valid = train.__copy__()
n_samples = len(train)
valid_indices = np.random.choice(np.arange(n_samples),
int(valid_ratio * n_samples),
replace=False)
files = train.files
for key in files.keys():
valid._files[key] = np.asarray(files[key])[valid_indices]
train._files[key] = np.delete(
np.asarray(files[key]), valid_indices)
elif isinstance(train, (list, np.ndarray)):
valid = np.asarray(train)
n_samples = len(train)
valid_indices = np.random.choice(np.arange(n_samples),
int(valid_ratio * n_samples),
replace=False)
valid = valid[valid_indices]
train = np.delete(train, valid_indices)
assert len(train) + len(valid) == n_samples
return train, valid
def load_crossval_list(xls_file, index):
import pandas as pd
from distutils.version import LooseVersion
if LooseVersion(pd.__version__) >= LooseVersion('0.21.0'):
df = pd.read_excel(xls_file, sheet_name=index)
else:
df = pd.read_excel(xls_file, sheetname=index)
train = df['train'].dropna().tolist()
valid = df['valid'].dropna().tolist()
test = df['test'].dropna().tolist()
return {'train': train, 'valid': valid, 'test': test}
|
70142
|
import os
from appdirs import AppDirs
from cihai.config import Configurator, expand_config
#: XDG App directory locations
dirs = AppDirs("cihai", "cihai team") # appname # app author
def test_configurator(tmpdir):
c = Configurator()
isinstance(c.dirs, AppDirs)
assert c
def test_expand_config_xdg_vars():
initial_dict = {
'dirs': {'cache': '{user_cache_dir}', 'data': '{user_cache_dir}/data'}
}
expected_dict = {
'dirs': {
'cache': dirs.user_cache_dir,
'data': os.path.join(dirs.user_cache_dir, 'data'),
}
}
expand_config(initial_dict, dirs)
assert initial_dict == expected_dict
def test_expand_config_user_vars():
initial_dict = {'dirs': {'cache': '~'}}
expected_dict = {'dirs': {'cache': os.path.expanduser('~')}}
expand_config(initial_dict, dirs)
assert initial_dict == expected_dict
def test_expand_config_env_vars(tmpdir, monkeypatch):
monkeypatch.setenv('MYDIR', str(tmpdir))
initial_dict = {'dirs': {'cache': '${MYDIR}'}}
expected_dict = {'dirs': {'cache': os.environ.get('MYDIR')}}
expand_config(initial_dict, dirs)
assert initial_dict == expected_dict
|
70179
|
import tinyflow as tf
from tinyflow.datasets import get_cifar10
import numpy as np
num_epoch = 10
num_batch = 600
batch_size = 100
def conv_factory(x, filter_size, in_filters, out_filters):
x = tf.nn.conv2d(x, num_filter=out_filters,
ksize=[1, filter_size, filter_size, 1], padding='SAME')
x = tf.nn.batch_normalization(x)
x = tf.nn.relu(x)
return x
def residual_factory(x, in_filters, out_filters):
if in_filters == out_filters:
orig_x = x
conv1 = conv_factory(x, 3, in_filters, out_filters)
conv2 = conv_factory(conv1, 3, out_filters, out_filters)
new = orig_x + conv2
return tf.nn.relu(new)
else:
conv1 = conv_factory(x, 3, in_filters, out_filters)
conv2 = conv_factory(conv1, 3, out_filters, out_filters)
project_x = conv_factory(x, 1, in_filters, out_filters)
new = project_x + conv2
return tf.nn.relu(new)
def resnet(x, n, in_filters, out_filters):
for i in range(n):
if i == 0:
x = residual_factory(x, in_filters, 16)
else:
x = residual_factory(x, 16, 16)
for i in range(n):
if i == 0:
x = residual_factory(x, 16, 32)
else:
x = residual_factory(x, 32, 32)
for i in range(n):
if i == 0:
x = residual_factory(x, 32, 64)
else:
x = residual_factory(x, 64, 64)
return x
x = tf.placeholder(tf.float32)
conv1 = tf.nn.conv2d(x, num_filter=16, ksize=[1, 5, 5, 1], padding='SAME')
tanh1 = tf.tanh(conv1)
res = resnet(tanh1, 1, 16, 64)
pool1 = tf.nn.avg_pool(res, ksize=[1, 4, 4, 1], strides=[1, 2, 2, 1], padding='SAME', data_format='NCHW')
conv2 = tf.nn.conv2d(pool1, num_filter=16, ksize=[1, 5, 5, 1])
flatten = tf.nn.flatten_layer(conv2)
fc1 = tf.nn.linear(flatten, num_hidden=10, name="fc1")
# define loss
label = tf.placeholder(tf.float32)
cross_entropy = tf.nn.mean_sparse_softmax_cross_entropy_with_logits(fc1, label)
train_step = tf.train.AdamOptimizer(0.0005).minimize(cross_entropy)
sess = tf.Session(config='gpu')
# Auromatic variable shape inference API, infers the shape and initialize the weights.
known_shape = {x: [batch_size, 3, 32, 32], label: [batch_size]}
stdev = 0.01
init_step = []
for v, name, shape in tf.infer_variable_shapes(
cross_entropy, feed_dict=known_shape):
init_step.append(tf.assign(v, tf.normal(shape, stdev)))
print("shape[%s]=%s" % (name, str(shape)))
sess.run(init_step)
sess.run(tf.initialize_all_variables())
# get the cifar dataset
cifar = get_cifar10()
for epoch in range(num_epoch):
sum_loss = 0.0
for i in range(num_batch):
batch_xs, batch_ys = cifar.train.next_batch(batch_size)
loss, _ = sess.run([cross_entropy, train_step], feed_dict={x: batch_xs, label:batch_ys})
sum_loss += loss
print("epoch[%d] cross_entropy=%g" % (epoch, sum_loss /num_batch))
correct_prediction = tf.equal(tf.argmax(fc1, 1), label)
accuracy = tf.reduce_mean(correct_prediction)
print(sess.run(accuracy, feed_dict={x: cifar.test.images, label: cifar.test.labels}))
|
70200
|
from typing import Dict, List
from aiohttp import web
from .path import full_url
def default_server(request: web.Request) -> Dict[str, str]:
app = request.app
url = full_url(request)
url = url.with_path(app["cli"].base_path)
return dict(url=str(url), description="Api server")
def server_urls(request: web.Request, paths: List[str]) -> List[str]:
base_path = request.app["cli"].base_path
n = len(base_path)
spec = request.app.get("spec")
server = spec.servers[0] if spec and spec.servers else default_server(request)
base_url = server["url"]
return [f"{base_url}{p[n:]}" for p in paths]
|
70355
|
import turbodbc.data_types
from turbodbc import STRING, BINARY, NUMBER, DATETIME, ROWID
ALL_TYPE_CODES = [turbodbc.data_types._BOOLEAN_CODE,
turbodbc.data_types._INTEGER_CODE,
turbodbc.data_types._FLOATING_POINT_CODE,
turbodbc.data_types._STRING_CODE,
turbodbc.data_types._UNICODE_CODE,
turbodbc.data_types._TIMESTAMP_CODE,
turbodbc.data_types._DATE_CODE]
ALL_DATA_TYPES = [STRING, BINARY, NUMBER, DATETIME, ROWID]
def test_each_type_code_matches_one_data_type():
for type_code in ALL_TYPE_CODES:
matches = [type for type in ALL_DATA_TYPES if type_code == type]
assert 1 == len(matches)
def test_each_type_code_mismatches_all_but_one_data_type():
for type_code in ALL_TYPE_CODES:
mismatches = [type for type in ALL_DATA_TYPES if type_code != type]
expected = len(ALL_DATA_TYPES) - 1
assert expected == len(mismatches)
|
70357
|
import torch
import torch.optim as optim
import sys
import os
import argparse
import tokenization
from torch.optim import lr_scheduler
from loss import registry as loss_f
from loader import registry as loader
from model import registry as Producer
from evaluate import overall
#hyper-parameters
parser = argparse.ArgumentParser(description='contrastive learning framework for word vector')
parser.add_argument('-dataset', help='the file of target vectors', type=str, default='data/wiki_100.vec')
parser.add_argument('-batch_size', help='the number of samples in one batch', type=int, default=32)
parser.add_argument('-epochs', help='the number of epochs to train the model', type=int, default=20)
parser.add_argument('-shuffle', help='whether shuffle the samples', type=bool, default=True)
parser.add_argument('-lowercase', help='if only use lower case', type=bool, default=True)
parser.add_argument('-model_type', help='sum, rnn, cnn, attention, pam', type=str, default='pam')
parser.add_argument('-encoder_layer', help='the number of layer of the encoder', type=int, default=1)
parser.add_argument('-merge', help='merge pam and attention layer', type=bool, default=True)
parser.add_argument('-att_head_num', help='the number of attentional head for the pam encoder', type=int, default=1)
parser.add_argument('-loader_type', help='simple, aug, hard', type=str, default='hard')
parser.add_argument('-loss_type', help='mse, ntx, align_uniform', type=str, default='ntx')
parser.add_argument('-input_type', help='mixed, char, sub', type=str, default='mixed')
parser.add_argument('-learning_rate', help='learning rate for training', type=float, default=2e-3)
parser.add_argument('-drop_rate', help='the rate for dropout', type=float, default=0.1)
parser.add_argument('-gamma', help='decay rate', type=float, default=0.97)
parser.add_argument('-emb_dim', help='the dimension of target embeddings (FastText:300; BERT:768)', type=int, default=300)
parser.add_argument('-vocab_path', help='the vocabulary used for training and inference', type=str, default='data/vocab.txt')
parser.add_argument('-hard_neg_numbers', help='the number of hard negatives in each mini-batch', type=int, default=3)
parser.add_argument('-hard_neg_path', help='the file path of hard negative samples ', type=str, default='data/hard_neg_samples.txt')
parser.add_argument('-vocab_size', help='the size of the vocabulart', type=int, default=0)
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
def main():
TOKENIZER = tokenization.FullTokenizer(vocab_file=args.vocab_path, do_lower_case=args.lowercase)
vocab_size = len(TOKENIZER.vocab)
args.vocab_size = vocab_size
data_loader = loader[args.loader_type](args, TOKENIZER)
train_iterator = data_loader(data_path=args.dataset)
model = Producer[args.model_type](args)
print(model)
trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(trainable_num)
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=args.gamma)
criterion = loss_f[args.loss_type]()
max_acc = 0
for e in range(args.epochs):
epoch_loss = 0
batch_num = 0
for words, oririn_repre, aug_repre_ids, mask in train_iterator:
model.train()
optimizer.zero_grad()
batch_num += 1
if batch_num % 1000 == 0:
print('sample = {b}, loss = {a}'.format(a=epoch_loss/batch_num, b=batch_num*args.batch_size))
# get produced vectors
oririn_repre = oririn_repre.cuda()
aug_repre_ids = aug_repre_ids.cuda()
mask = mask.cuda()
aug_embeddings = model(aug_repre_ids, mask)
# calculate loss
loss = criterion(oririn_repre, aug_embeddings)
# backward
loss.backward()
optimizer.step()
epoch_loss += loss.item()
scheduler.step()
print('[ lr rate] = {a}'.format(a=optimizer.state_dict()['param_groups'][0]['lr']))
print('----------------------')
print('this is the {a} epoch, loss = {b}'.format(a=e + 1, b=epoch_loss / len(train_iterator)))
if (e) % 1 == 0:
model_path = './output/model_{a}.pt'.format(a=e+1)
torch.save(model.state_dict(), model_path)
overall(args, model_path=model_path, tokenizer=TOKENIZER)
return max_acc
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
main()
|
70358
|
import os, sys
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import tensorflow as tf
import cv2
import numpy as np
sys.path.insert(1, os.path.join(sys.path[0], '/mywork/tensorflow-tuts/sd19reader'))
from batches2patches_tensorflow import GetFuncToPatches, GetFuncOverlapAdd
from myutils import describe
from vizutils import bw_grid_vis, color_grid_vis
from mypca import my_PCA_scikitlike as PCA
# load image
path2file = os.path.dirname(os.path.realpath(__file__))
inimg = os.path.join(path2file,'Lenna_noise1.png')
testim = cv2.imread(inimg).astype(np.float32) / 255.0
# will use "valid" conv, so pad 1 wide for 3x3 patches
padtest = np.pad(testim, [(1,1), (1,1), (0,0)], 'edge')
# get patching function for local windows
imshape = [int(ii) for ii in padtest.shape]
batchsize = 1
batchimshapefull = [batchsize,]+imshape
patchsize = 3
bordermode = 'valid'
pimshape = (imshape[0]-patchsize+1,imshape[1]-patchsize+1)
reconstrmode = 'full'
N_PCA_COMPS = 6
batchunpadtest = np.expand_dims(testim, 0)
batchtestims = padtest.reshape(batchimshapefull) # only one in batch, so resize the one
featswrtshape = [int(ii) for ii in batchunpadtest.shape]
featswrtshape[-1] = N_PCA_COMPS
patchtheanofunc = GetFuncToPatches(batchimshapefull, patchsize, border_mode=bordermode, filter_flip=False)
overlapaddfunc = GetFuncOverlapAdd(batchimshapefull, patchsize, pimshape, border_mode=reconstrmode, filter_flip=False)
#########################################
# bilateral filter
#tf_stdv_space = tf.get_variable('tf_stdv_space', initializer=tf.constant(1.0))
#tf_stdv_bilat = tf.get_variable('tf_stdv_bilat', initializer=tf.constant(1.0))
tf_placehold_img = tf.placeholder(tf.float32, batchunpadtest.shape, name="tf_placehold_img")
tf_placehold_wrt = tf.placeholder(tf.float32, featswrtshape, name="tf_placehold_wrt")
from test_utils import *
bilateral_filters = load_func_from_lib()
#########################################
# tensorflow sess init
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
outfold = 'test_patch_pca'
#########################################
# compute patch PCA
patches = patchtheanofunc(batchtestims)
print(" ")
describe("patches",patches)
flatpatches = patches.reshape((patches.shape[0]*patches.shape[1]*patches.shape[2], np.prod(patches.shape[3:])))
describe("flatpatches",flatpatches)
pca = PCA(n_components=N_PCA_COMPS, doplot=False).fit(flatpatches)
transfpatches = pca.transform(flatpatches)
reshtransfpatch = transfpatches.reshape((patches.shape[0], patches.shape[1], patches.shape[2], N_PCA_COMPS))
print(" ")
describe("transfpatches", transfpatches)
describe("reshtransfpatch", reshtransfpatch)
print(" ")
procpatches = pca.inverse_transform(transfpatches).reshape(patches.shape)
tehpidx = -1
for tehpatchs in [patches, procpatches]:
tehpidx += 1
FLPTCHS = tehpatchs.reshape((tehpatchs.shape[0], tehpatchs.shape[1]*tehpatchs.shape[2], np.prod(tehpatchs.shape[3:])))
#describe("FLPTCHS", FLPTCHS)
for jj in range(batchsize):
#describe("FLPTCHS[jj,...]", FLPTCHS[jj,...])
color_grid_vis(FLPTCHS[jj,...], savename=os.path.join(outfold,'pcacnn_FLPTCHS_'+str(tehpidx)+'_'+str(jj)+'.png'), flipbgr=True)
#quit()
#########################################
#define the function that's called every time one of the trackbars is moved
def updateWindow(xxx):
stdspace = float(cv2.getTrackbarPos('std_space*10','ImageWindow')) / 10.
stdcolor = float(cv2.getTrackbarPos('std_color*50','ImageWindow')) / 50.
stdspace = max(1e-3, stdspace)
stdcolor = max(1e-3, stdcolor)
#tf_stdv_space = tf.get_variable('tf_stdv_space', initializer=tf.constant(1.0))
#tf_stdv_bilat = tf.get_variable('tf_stdv_bilat', initializer=tf.constant(1.0))
#tf_placehold_img = tf.placeholder(tf.float32, batchimshapefull, name="tf_placehold_img")
#tf_placehold_wrt = tf.placeholder(tf.float32, featswrtshape, name="tf_placehold_wrt")
ret = bilateral_filters(NHWC_to_NCHW(tf_placehold_img),
NHWC_to_NCHW(tf_placehold_wrt),
stdspace, stdcolor)
outbilNCHW = ret
outbilat = NCHW_to_NHWC(outbilNCHW)
tfret = outbilat.eval({tf_placehold_img: batchunpadtest, tf_placehold_wrt: reshtransfpatch})
describe("tfret00", tfret)
tfret[tfret<0.0] = 0.0
tfret[tfret>1.0] = 1.0
describe("tfret11", tfret)
cv2.imshow("ImageWindow", tfret[0,...])
cv2.namedWindow('ImageWindow')
cv2.createTrackbar('std_space*10','ImageWindow',1,200,updateWindow)
cv2.createTrackbar('std_color*50','ImageWindow',1,200,updateWindow)
updateWindow(0) #Creates the window for the first time
cv2.waitKey(0)
|
70373
|
from django.views.decorators.http import require_http_methods
from graphene_django.views import GraphQLView
@require_http_methods(['POST'])
def graphql_view(request):
from graph_wrap.tastypie import schema
schema = schema()
view = GraphQLView.as_view(schema=schema)
return view(request)
|
70416
|
from .model_action import ModelAction
"""
If the seed model has displacement ventilation object,
this measure will delete or keep the related objects
If the seed model has no displacement ventilation object but the decision value is 1 (On)
This measure will insert room air model objects for zones under the control of central air systems
EnergyPlus related object:
RoomAirSettings:ThreeNodeDisplacementVentilation
Parameters:
Number of Plumes per Occupant: 1
Thermostat Height: 1.1 m
Comfort Height: 1.1 m
Temperature Difference Threshold for Reporting: 0.4
"""
class DisplacementVentilation(ModelAction):
def __init__(self):
ModelAction.__init__(self, 'displace_vent')
self._default_list = [1, 0]
self._data = 1
self._measure_name = 'DisplacementVent'
self._lower_limit = 0
self._upper_limit = 1
self._measure_help = '''
measure name: DisplacementVent
Unit: Not required
Minimum: 0 (Off)
Maximum: 1 (On)
Type: Categorical (On/Off)
If the seed model has displacement ventilation object, this measure will delete or keep the related objects
If the seed model has no displacement ventilation object but the decision value is 1 (On)
This measure will insert room air model objects for zones under the control of central air systems
EnergyPlus related object:
RoomAirSettings:ThreeNodeDisplacementVentilation:
Default Setting
Number of Plumes per Occupant: 1
Thermostat Height: 1.1 m
Comfort Height: 1.1 m
Temperature Difference Threshold for Reporting: 0.4
Use design template to configure your DV specifications.
Warning: This measure only works on HVAC systems with central air handling unit.
'''
def get_num_value(self):
return ModelAction.num_of_value(self)
def set_datalist(self, datalist):
# this is just a on off option
ModelAction.set_datalist(self, self._default_list)
def set_min(self, min_val):
ModelAction.set_min(self, 0)
def set_max(self, max_val):
ModelAction.set_max(self, 1)
|
70450
|
from rest_framework.mixins import (
CreateModelMixin,
DestroyModelMixin,
ListModelMixin
)
from rest_framework.viewsets import GenericViewSet
from pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage
from pydis_site.apps.api.serializers import OffensiveMessageSerializer
class OffensiveMessageViewSet(
CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet
):
"""
View providing CRUD access to offensive messages.
## Routes
### GET /bot/offensive-messages
Returns all offensive messages in the database.
#### Response format
>>> [
... {
... 'id': '631953598091100200',
... 'channel_id': '291284109232308226',
... 'delete_date': '2019-11-01T21:51:15.545000Z'
... },
... ...
... ]
#### Status codes
- 200: returned on success
### POST /bot/offensive-messages
Create a new offensive message object.
#### Request body
>>> {
... 'id': int,
... 'channel_id': int,
... 'delete_date': datetime.datetime # ISO-8601-formatted date
... }
#### Status codes
- 201: returned on success
- 400: if the body format is invalid
### DELETE /bot/offensive-messages/<id:int>
Delete the offensive message object with the given `id`.
#### Status codes
- 204: returned on success
- 404: if a offensive message object with the given `id` does not exist
## Authentication
Requires an API token.
"""
serializer_class = OffensiveMessageSerializer
queryset = OffensiveMessage.objects.all()
|
70462
|
from hamcrest import *
from utils import *
@pytest.mark.serial
@pytest.mark.manual_batch_review
def test_approve_pending_batch_change_success(shared_zone_test_context):
"""
Test approving a batch change succeeds for a support user
"""
client = shared_zone_test_context.ok_vinyldns_client
approver = shared_zone_test_context.support_user_client
batch_change_input = {
"changes": [
get_change_A_AAAA_json("test-approve-success.not.loaded.", address="4.3.2.1"),
get_change_A_AAAA_json("needs-review.not.loaded.", address="4.3.2.1"),
get_change_A_AAAA_json("zone-name-flagged-for-manual-review.zone.requires.review.")
],
"ownerGroupId": shared_zone_test_context.ok_group['id']
}
to_delete = []
to_disconnect = None
try:
result = client.create_batch_change(batch_change_input, status=202)
get_batch = client.get_batch_change(result['id'])
assert_that(get_batch['status'], is_('PendingReview'))
assert_that(get_batch['approvalStatus'], is_('PendingReview'))
assert_that(get_batch['changes'][0]['status'], is_('NeedsReview'))
assert_that(get_batch['changes'][0]['validationErrors'][0]['errorType'], is_('ZoneDiscoveryError'))
assert_that(get_batch['changes'][1]['status'], is_('NeedsReview'))
assert_that(get_batch['changes'][1]['validationErrors'][0]['errorType'], is_('RecordRequiresManualReview'))
assert_that(get_batch['changes'][2]['status'], is_('NeedsReview'))
assert_that(get_batch['changes'][2]['validationErrors'][0]['errorType'], is_('RecordRequiresManualReview'))
# need to create the zone so the change can succeed
zone = {
'name': 'not.loaded.',
'email': '<EMAIL>',
'adminGroupId': shared_zone_test_context.ok_group['id'],
'backendId': 'func-test-backend',
'shared': True
}
zone_create = approver.create_zone(zone, status=202)
to_disconnect = zone_create['zone']
approver.wait_until_zone_active(to_disconnect['id'])
approved = approver.approve_batch_change(result['id'], status=202)
completed_batch = client.wait_until_batch_change_completed(approved)
to_delete = [(change['zoneId'], change['recordSetId']) for change in completed_batch['changes']]
assert_that(completed_batch['status'], is_('Complete'))
for change in completed_batch['changes']:
assert_that(change['status'], is_('Complete'))
assert_that(len(change['validationErrors']), is_(0))
assert_that(completed_batch['approvalStatus'], is_('ManuallyApproved'))
assert_that(completed_batch['reviewerId'], is_('support-user-id'))
assert_that(completed_batch['reviewerUserName'], is_('support-user'))
assert_that(completed_batch, has_key('reviewTimestamp'))
assert_that(get_batch, not(has_key('cancelledTimestamp')))
finally:
clear_zoneid_rsid_tuple_list(to_delete, client)
if to_disconnect:
approver.abandon_zones(to_disconnect['id'], status=202)
@pytest.mark.manual_batch_review
def test_approve_pending_batch_change_fails_if_there_are_still_errors(shared_zone_test_context):
"""
Test approving a batch change fails if there are still errors
"""
client = shared_zone_test_context.ok_vinyldns_client
approver = shared_zone_test_context.support_user_client
batch_change_input = {
"changes": [
get_change_A_AAAA_json("needs-review.nonexistent.", address="4.3.2.1"),
get_change_A_AAAA_json("zone.does.not.exist.")
],
"ownerGroupId": shared_zone_test_context.ok_group['id']
}
complete_rs = None
try:
result = client.create_batch_change(batch_change_input, status=202)
get_batch = client.get_batch_change(result['id'])
assert_that(get_batch['status'], is_('PendingReview'))
assert_that(get_batch['approvalStatus'], is_('PendingReview'))
assert_that(get_batch['changes'][0]['status'], is_('NeedsReview'))
assert_that(get_batch['changes'][0]['validationErrors'][0]['errorType'], is_('RecordRequiresManualReview'))
assert_that(get_batch['changes'][1]['status'], is_('NeedsReview'))
assert_that(get_batch['changes'][1]['validationErrors'][0]['errorType'], is_('ZoneDiscoveryError'))
approval_response = approver.approve_batch_change(result['id'], status=400)
assert_that((approval_response[0]['errors'][0]), contains_string('Zone Discovery Failed'))
assert_that((approval_response[1]['errors'][0]), contains_string('Zone Discovery Failed'))
updated_batch = client.get_batch_change(result['id'], status=200)
assert_that(updated_batch['status'], is_('PendingReview'))
assert_that(updated_batch['approvalStatus'], is_('PendingReview'))
assert_that(updated_batch, not(has_key('reviewerId')))
assert_that(updated_batch, not(has_key('reviewerUserName')))
assert_that(updated_batch, not(has_key('reviewTimestamp')))
assert_that(updated_batch, not(has_key('cancelledTimestamp')))
assert_that(updated_batch['changes'][0]['status'], is_('NeedsReview'))
assert_that(updated_batch['changes'][0]['validationErrors'][0]['errorType'], is_('ZoneDiscoveryError'))
assert_that(updated_batch['changes'][1]['status'], is_('NeedsReview'))
assert_that(updated_batch['changes'][1]['validationErrors'][0]['errorType'], is_('ZoneDiscoveryError'))
finally:
if complete_rs:
delete_result = client.delete_recordset(complete_rs['zoneId'], complete_rs['id'], status=202)
client.wait_until_recordset_change_status(delete_result, 'Complete')
@pytest.mark.manual_batch_review
def test_approve_batch_change_with_invalid_batch_change_id_fails(shared_zone_test_context):
"""
Test approving a batch change with invalid batch change ID
"""
client = shared_zone_test_context.ok_vinyldns_client
error = client.approve_batch_change("some-id", status=404)
assert_that(error, is_("Batch change with id some-id cannot be found"))
@pytest.mark.manual_batch_review
def test_approve_batch_change_with_comments_exceeding_max_length_fails(shared_zone_test_context):
"""
Test approving a batch change with comments exceeding 1024 characters fails
"""
client = shared_zone_test_context.ok_vinyldns_client
approve_batch_change_input = {
"reviewComment": "a"*1025
}
errors = client.approve_batch_change("some-id", approve_batch_change_input, status=400)['errors']
assert_that(errors, contains_inanyorder("Comment length must not exceed 1024 characters."))
@pytest.mark.manual_batch_review
def test_approve_batch_change_fails_with_forbidden_error_for_non_system_admins(shared_zone_test_context):
"""
Test approving a batch change if the reviewer is not a super user or support user
"""
client = shared_zone_test_context.ok_vinyldns_client
batch_change_input = {
"changes": [
get_change_A_AAAA_json("no-owner-group-id.ok.", address="4.3.2.1")
]
}
to_delete = []
try:
result = client.create_batch_change(batch_change_input, status=202)
completed_batch = client.wait_until_batch_change_completed(result)
to_delete = [(change['zoneId'], change['recordSetId']) for change in completed_batch['changes']]
error = client.approve_batch_change(completed_batch['id'], status=403)
assert_that(error, is_("User does not have access to item " + completed_batch['id']))
finally:
clear_zoneid_rsid_tuple_list(to_delete, client)
|
70463
|
import info
class subinfo( info.infoclass ):
def setTargets( self ):
for ver in ["2.4.6"]:
self.targets[ ver ] = f"https://ftp.gnu.org/gnu/libtool/libtool-{ver}.tar.xz"
self.targetInstSrc[ ver ] = f"libtool-{ver}"
self.targetDigests["2.4.6"] = (['7c87a8c2c8c0fc9cd5019e402bed4292462d00a718a7cd5f11218153bf28b26f'], CraftHash.HashAlgorithm.SHA256)
self.description = "GNU libtool is a generic library support script."
self.patchLevel["2.4.6"] = 2
self.defaultTarget = "2.4.6"
def setDependencies( self ):
self.buildDependencies["dev-utils/automake"] = None
from Package.AutoToolsPackageBase import *
class Package( AutoToolsPackageBase ):
def __init__( self ):
AutoToolsPackageBase.__init__( self )
self.subinfo.options.configure.autoreconf = False
self.subinfo.options.configure.args += " --disable-static --enable-shared "
def postInstall(self):
return self.patchInstallPrefix([os.path.join(self.installDir(), x) for x in [f"bin/libtool",
f"bin/libtoolize"]],
self.subinfo.buildPrefix,
CraftCore.standardDirs.craftRoot())
|
70501
|
from germanium.decorators import login
from germanium.test_cases.rest import RESTTestCase
from germanium.tools import assert_in
from germanium.tools.http import assert_http_unauthorized, assert_http_forbidden, assert_http_not_found
from .test_case import HelperTestCase, AsSuperuserTestCase
__all__ =(
'HttpExceptionsTestCase',
)
class HttpExceptionsTestCase(AsSuperuserTestCase, HelperTestCase, RESTTestCase):
ISSUE_API_URL = '/api/issue/'
USER_API_URL = '/api/user/'
ACCEPT_TYPES = ('application/json', 'text/xml', 'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
def test_401_exception(self):
for accept_type in self.ACCEPT_TYPES:
resp = self.get(self.ISSUE_API_URL, headers={'HTTP_ACCEPT': accept_type})
assert_in(accept_type, resp['Content-Type'])
assert_http_unauthorized(resp)
@login(is_superuser=False)
def test_403_exception(self):
self.get_user_obj()
for accept_type in self.ACCEPT_TYPES:
resp = self.post(self.USER_API_URL, headers={'HTTP_ACCEPT': accept_type}, data={})
assert_in(accept_type, resp['Content-Type'])
assert_http_forbidden(resp)
@login(is_superuser=False)
def test_404_exception(self):
for accept_type in self.ACCEPT_TYPES:
resp = self.get('%s%s/' % (self.ISSUE_API_URL, 5), headers={'HTTP_ACCEPT': accept_type})
assert_in(accept_type, resp['Content-Type'])
assert_http_not_found(resp)
@login(is_superuser=True)
def test_403_csrf_exception(self):
cookies = self.c.cookies
self.c = self.client_class(enforce_csrf_checks=True)
self.c.cookies = cookies
for accept_type in self.ACCEPT_TYPES:
resp = self.post(self.ISSUE_API_URL, {}, headers={'HTTP_ACCEPT': accept_type,
'CONTENT_TYPE': 'application/json'})
assert_in(accept_type, resp['Content-Type'])
assert_http_forbidden(resp)
|
70528
|
import cPickle
__all__ = ['memoize']
# This would usually be defined elsewhere
class decoratorargs(object):
def __new__(typ, *attr_args, **attr_kwargs):
def decorator(orig_func):
self = object.__new__(typ)
self.__init__(orig_func, *attr_args, **attr_kwargs)
return self
return decorator
class memoize(decoratorargs):
class Node:
__slots__ = ['key', 'value', 'older', 'newer']
def __init__(self, key, value, older=None, newer=None):
self.key = key
self.value = value
self.older = older
self.newer = newer
def __init__(self, func, capacity, keyfunc=lambda *args, **kwargs: cPickle.dumps((args, kwargs))):
self.func = func
self.capacity = capacity
self.keyfunc = keyfunc
self.reset()
def reset(self):
self.mru = self.Node(None, None)
self.mru.older = self.mru.newer = self.mru
self.nodes = {self.mru.key: self.mru}
self.count = 1
self.hits = 0
self.misses = 0
def __call__(self, *args, **kwargs):
key = self.keyfunc(*args, **kwargs)
try:
node = self.nodes[key]
except KeyError:
# We have an entry not in the cache
self.misses += 1
value = self.func(*args, **kwargs)
lru = self.mru.newer # Always true
# If we haven't reached capacity
if self.count < self.capacity:
# Put it between the MRU and LRU - it'll be the new MRU
node = self.Node(key, value, self.mru, lru)
self.mru.newer = node
lru.older = node
self.mru = node
self.count += 1
else:
# It's FULL! We'll make the LRU be the new MRU, but replace its
# value first
del self.nodes[lru.key] # This mapping is now invalid
lru.key = key
lru.value = value
self.mru = lru
# Add the new mapping
self.nodes[key] = self.mru
return value
# We have an entry in the cache
self.hits += 1
# If it's already the MRU, do nothing
if node is self.mru:
return node.value
lru = self.mru.newer # Always true
# If it's the LRU, update the MRU to be it
if node is lru:
self.mru = lru
return node.value
# Remove the node from the list
node.older.newer = node.newer
node.newer.older = node.older
# Put it between MRU and LRU
node.older = self.mru
self.mru.newer = node
node.newer = lru
lru.older = node
self.mru = node
return node.value
# Example usage - fib only needs a cache size of 3 to keep it from
# being an exponential-time algorithm
@memoize(3)
def fib(n): return (n > 1) and (fib(n - 1) + fib(n - 2)) or 1
fib(100) # => 573147844013817084101L
# This is faster because it doesn't use the default key function -
# it doesn't need to call cPickle.dumps((*args, **kwargs))
@memoize(100, lambda n: n)
def fib(n): return (n > 1) and (fib(n - 1) + fib(n - 2)) or 1
fib(100) # => 573147844013817084101L
# See what's in the cache
# => [(98, 218922995834555169026L), (99, 354224848179261915075L), (100, 573147844013817084101L)]
[(node.key, node.value) for node in fib.nodes.values()]
# Get an example of the key function working
fib.keyfunc(40) # => 40
# Simple report on performance
# => Hit %: 0.492462
print 'Hit %%: %f' % (float(fib.hits) / (fib.hits + fib.misses))
# Resize the LRU cache
fib.capacity = 100
fib.reset() # Not necessary unless you shrink it
|
70556
|
import heapq
class Solution:
"""
@param k: an integer
@param W: an integer
@param Profits: an array
@param Capital: an array
@return: final maximized capital
"""
def findMaximizedCapital(self, k, W, Profits, Capital):
# Write your code here
cappq = [(cap, i) for i, cap in enumerate(Capital)]
heapq.heapify(cappq)
profitpq = []
for _ in range(k):
while cappq and cappq[0][0] <= W:
cap, index = heapq.heappop(cappq)
heapq.heappush(profitpq, -Profits[index])
if profitpq:
W -= heapq.heappop(profitpq)
else:
break
return W
|
70583
|
import os
import logging
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
from supervised.utils.config import LOG_LEVEL
from supervised.utils.common import learner_name_to_fold_repeat
from supervised.utils.metric import Metric
logger.setLevel(LOG_LEVEL)
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
MY_COLORS = list(mcolors.TABLEAU_COLORS.values())
class LearningCurves:
output_file_name = "learning_curves.png"
@staticmethod
def single_iteration(learner_names, model_path):
for ln in learner_names:
df = pd.read_csv(
os.path.join(model_path, f"{ln}_training.log"),
names=["iteration", "train", "test"],
)
if df.shape[0] > 1:
return False
return True
@staticmethod
def plot(learner_names, metric_name, model_path, trees_in_iteration=None):
colors = MY_COLORS
if len(learner_names) > len(colors):
repeat_colors = int(np.ceil(len(learner_names) / len(colors)))
colors = colors * repeat_colors
if LearningCurves.single_iteration(learner_names, model_path):
LearningCurves.plot_single_iter(
learner_names, metric_name, model_path, colors
)
else:
LearningCurves.plot_iterations(
learner_names, metric_name, model_path, colors, trees_in_iteration
)
@staticmethod
def plot_single_iter(learner_names, metric_name, model_path, colors):
plt.figure(figsize=(10, 7))
for ln in learner_names:
df = pd.read_csv(
os.path.join(model_path, f"{ln}_training.log"),
names=["iteration", "train", "test"],
)
fold, repeat = learner_name_to_fold_repeat(ln)
repeat_str = f" Reapeat {repeat+1}," if repeat is not None else ""
plt.bar(
f"Fold {fold+1},{repeat_str} train",
df.train[0],
color="white",
edgecolor=colors[fold],
)
plt.bar(f"Fold {fold+1},{repeat_str} test", df.test[0], color=colors[fold])
plt.ylabel(metric_name)
plt.xticks(rotation=90)
plt.tight_layout(pad=2.0)
plot_path = os.path.join(model_path, LearningCurves.output_file_name)
plt.savefig(plot_path)
plt.close("all")
@staticmethod
def plot_iterations(
learner_names, metric_name, model_path, colors, trees_in_iteration=None
):
plt.figure(figsize=(10, 7))
for ln in learner_names:
df = pd.read_csv(
os.path.join(model_path, f"{ln}_training.log"),
names=["iteration", "train", "test"],
)
fold, repeat = learner_name_to_fold_repeat(ln)
repeat_str = f" Reapeat {repeat+1}," if repeat is not None else ""
# if trees_in_iteration is not None:
# df.iteration = df.iteration * trees_in_iteration
any_none = np.sum(pd.isnull(df.train))
if any_none == 0:
plt.plot(
df.iteration,
df.train,
"--",
color=colors[fold],
label=f"Fold {fold+1},{repeat_str} train",
)
any_none = np.sum(pd.isnull(df.test))
if any_none == 0:
plt.plot(
df.iteration,
df.test,
color=colors[fold],
label=f"Fold {fold+1},{repeat_str} test",
)
best_iter = None
if Metric.optimize_negative(metric_name):
best_iter = df.test.argmax()
else:
best_iter = df.test.argmin()
if best_iter is not None and best_iter != -1:
plt.axvline(best_iter, color=colors[fold], alpha=0.3)
if trees_in_iteration is not None:
plt.xlabel("#Trees")
else:
plt.xlabel("#Iteration")
plt.ylabel(metric_name)
# limit number of learners in the legend
# too many will raise warnings
if len(learner_names) <= 15:
plt.legend(loc="best")
plt.tight_layout(pad=2.0)
plot_path = os.path.join(model_path, LearningCurves.output_file_name)
plt.savefig(plot_path)
plt.close("all")
@staticmethod
def plot_for_ensemble(scores, metric_name, model_path):
plt.figure(figsize=(10, 7))
plt.plot(range(1, len(scores) + 1), scores, label=f"Ensemble")
plt.xlabel("#Iteration")
plt.ylabel(metric_name)
plt.legend(loc="best")
plot_path = os.path.join(model_path, LearningCurves.output_file_name)
plt.savefig(plot_path)
plt.close("all")
|
70622
|
import datetime as dt
import json
from typing import List, Optional
from uuid import UUID
from fastapi.encoders import jsonable_encoder
from injector import singleton, inject
from common.cache import fail_silently, hash_cache_key
from common.injection import Cache
from database.utils import map_to
from post.models import Post
@singleton
class PostCache:
POSTS_EX: int = int(dt.timedelta(minutes=1).total_seconds())
@inject
def __init__(self, cache: Cache):
self._cache = cache
@fail_silently()
async def get_posts(
self,
wall_profile_id: UUID,
include_friends: bool,
older_than: dt.datetime) -> Optional[List[Post]]:
cached_posts_ids = await self._cache.get(
f"walls:{wall_profile_id}:posts:"
f"{hash_cache_key(wall_profile_id, include_friends, older_than)}")
cached_posts_ids = cached_posts_ids and json.loads(cached_posts_ids)
if not cached_posts_ids:
return None
cached_posts = await self._cache.mget(
*[f"posts:{post_id}" for post_id in cached_posts_ids])
return (all(cached_posts) or None) and [map_to(json.loads(post), Post)
for post in cached_posts]
@fail_silently()
async def get_post(self, post_id: UUID) -> Optional[Post]:
cached_post = await self._cache.get(f"posts:{post_id}")
return cached_post and map_to(json.loads(cached_post), Post)
@fail_silently()
async def set_post(self, post: Post) -> None:
await self._cache.set(f"posts:{post.id}",
json.dumps(jsonable_encoder(post)),
expire=PostCache.POSTS_EX)
@fail_silently()
async def set_posts(
self,
posts: List[Post],
wall_profile_id: UUID,
include_friends: bool,
older_than: Optional[dt.date]) -> None:
params_cache_key = hash_cache_key(
wall_profile_id, include_friends, older_than)
posts_ids_key = f"walls:{wall_profile_id}:posts:{params_cache_key}"
pipe = self._cache.pipeline()
pipe.mset(posts_ids_key, json.dumps([str(post.id) for post in posts]),
*list(sum([(f"posts:{post.id}",
json.dumps(jsonable_encoder(post)))
for post in posts], ())))
for key in [posts_ids_key, *[f"posts:{post.id}" for post in posts]]:
pipe.expire(key, PostCache.POSTS_EX)
await pipe.execute()
@fail_silently()
async def unset_posts_ids(
self,
wall_profile_id: UUID,
include_friends: bool,
older_than: Optional[dt.date]) -> None:
await self._cache.delete(
f"walls:{wall_profile_id}:posts:"
f"{hash_cache_key(wall_profile_id, include_friends, older_than)}")
@fail_silently()
async def unset_post(self, post_id: UUID) -> None:
await self._cache.delete(f"posts:{post_id}")
|
70662
|
from typing import Type
import warnings
from base64 import b64encode
from html import escape
import json
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import Draw
from .utils import (env,
requires,
tooltip_formatter,
mol_to_record,
mol_to_smiles,
sdf_to_dataframe,
remove_coordinates)
from .select import register
try:
from IPython.display import HTML, Javascript
except ModuleNotFoundError:
pass
else:
warnings.filterwarnings("ignore",
"Consider using IPython.display.IFrame instead")
class MolGrid:
"""Class that handles drawing molecules, rendering the HTML document and
saving or displaying it in a notebook
"""
def __init__(self, df, smiles_col="SMILES", mol_col=None, removeHs=False,
use_coords=True, coordGen=True, useSVG=True, size=(160, 120),
MolDrawOptions=None, rename=None, name="default", **kwargs):
"""
Parameters
----------
df : pandas.DataFrame or dict or list
Dataframe containing a SMILES or mol column, or dictionary
containing a list of SMILES, or list of dictionnaries containing a
SMILES field
smiles_col : str or None
Name of the SMILES column in the dataframe, if available
mol_col : str or None
Name of an RDKit molecule column. If available, coordinates and
atom/bonds annotations from this will be used for depiction
removeHs : bool
Remove hydrogen atoms from the drawings
use_coords : bool
Use the existing coordinates of the molecule
coordGen : bool
Sets whether or not the CoordGen library should be preferred to the
RDKit depiction library
useSVG : bool
Use SVG instead of PNG
size : tuple
The size of the drawing canvas
MolDrawOptions : rdkit.Chem.Draw.MolDrawOptions or None
Drawing options. Useful for making highly customized drawings
rename : dict or None
Rename the properties/fields stored in the molecule
name : str
Name of the grid. Used when retrieving selections from multiple
grids at the same time
kwargs : object
MolDrawOptions attributes
Notes
-----
The list of supported MolDrawOptions attributes are available in
https://www.rdkit.org/docs/source/rdkit.Chem.Draw.rdMolDraw2D.html#rdkit.Chem.Draw.rdMolDraw2D.MolDrawOptions
..versionchanged: 0.1.0
Added `rename` argument to replace `mapping`
"""
if not (smiles_col or mol_col):
raise ValueError("One of `smiles_col` or `mol_col` must be set")
if not isinstance(name, str):
raise TypeError(
f"`name` must be a string. Currently of type {type(name).__name__}")
Draw.rdDepictor.SetPreferCoordGen(coordGen)
if isinstance(df, pd.DataFrame):
dataframe = df.copy()
else:
# list of dicts or other input formats for dataframes
dataframe = pd.DataFrame(df)
mapping = kwargs.pop("mapping", None)
if mapping:
warnings.warn(
"`mapping` is deprecated and will be removed soon. Consider "
"using `rename` in the future."
)
rename = rename or mapping
if rename:
dataframe.rename(columns=rename, inplace=True)
self._extra_columns = ["img", "mols2grid-id"]
# generate temporary RDKit molecules
if smiles_col and not mol_col:
mol_col = "mol"
keep_mols = False
dataframe[mol_col] = dataframe[smiles_col].apply(Chem.MolFromSmiles)
else:
keep_mols = True
# remove hydrogens
if removeHs:
dataframe[mol_col] = dataframe[mol_col].apply(Chem.RemoveHs)
if not use_coords:
dataframe[mol_col] = dataframe[mol_col].apply(remove_coordinates)
# generate smiles col
if mol_col and (smiles_col not in dataframe.columns):
dataframe[smiles_col] = dataframe[mol_col].apply(mol_to_smiles)
# add index
dataframe["mols2grid-id"] = list(range(len(dataframe)))
# drop None
dataframe.dropna(axis=0, subset=[mol_col], inplace=True)
# generate drawings
self.useSVG = useSVG
opts = MolDrawOptions or Draw.MolDrawOptions()
for key, value in kwargs.items():
setattr(opts, key, value)
self.MolDrawOptions = opts
self._MolDraw2D = Draw.MolDraw2DSVG if useSVG else Draw.MolDraw2DCairo
self.img_size = size
dataframe["img"] = dataframe[mol_col].apply(self.mol_to_img)
if keep_mols:
self.dataframe = dataframe
else:
self.dataframe = dataframe.drop(columns=mol_col)
mol_col = None
self.smiles_col = smiles_col
self.mol_col = mol_col
# register instance
self._grid_id = name
register._init_grid(name)
@classmethod
def from_mols(cls, mols, **kwargs):
"""Set up the dataframe used by mols2grid directly from a list of RDKit
molecules
Parameters
----------
mols : list
List of RDKit molecules
kwargs : object
Other arguments passed on initialization
"""
mol_col = kwargs.pop("mol_col", "mol")
df = pd.DataFrame([mol_to_record(mol, mol_col=mol_col)
for mol in mols])
return cls(df, mol_col=mol_col, **kwargs)
@classmethod
def from_sdf(cls, sdf_file, **kwargs):
"""Set up the dataframe used by mols2grid directly from an SDFile
Parameters
----------
sdf_file : str
Path to the SDF file
kwargs : object
Other arguments passed on initialization
"""
mol_col = kwargs.pop("mol_col", "mol")
df = sdf_to_dataframe(sdf_file, mol_col=mol_col)
return cls(df, mol_col=mol_col, **kwargs)
@property
def template(self):
"""Kind of grid displayed, one of:
- pages
- table
"""
return self._template
@template.setter
def template(self, value):
if value not in ["pages", "table"]:
raise ValueError(f"template={value!r} not supported. "
"Use one of 'pages' or 'table'")
self._template = value
def draw_mol(self, mol):
"""Draw a molecule"""
d2d = self._MolDraw2D(*self.img_size)
d2d.SetDrawOptions(self.MolDrawOptions)
hl_atoms = getattr(mol, "__sssAtoms", [])
d2d.DrawMolecule(mol, highlightAtoms=hl_atoms)
d2d.FinishDrawing()
return d2d.GetDrawingText()
def mol_to_img(self, mol):
"""Convert an RDKit mol to an HTML image containing a drawing of the
molecule"""
img = self.draw_mol(mol)
if self.useSVG:
return img
data = b64encode(img).decode()
return f'<img src="data:image/png;base64,{data}">'
def render(self, template="pages", **kwargs):
"""Returns the HTML document corresponding to the "pages" or "table"
template. See `to_pages` and `to_table` for the list of arguments
Parameters
----------
template : str
Kind of grid to draw:
* "table" is a very simple table where all molecules are
displayed on the document, the main usecase is printing to
PDF or on paper.
* "pages" is a more interactive version that splits the
original data into several pages.
"""
self.template = template
return getattr(self, f"to_{self.template}")(**kwargs)
def to_pages(self, subset=None, tooltip=None,
cell_width=160, n_cols=5, n_rows=3,
border="1px solid #cccccc", gap=0,
fontsize="12pt", fontfamily="'DejaVu', sans-serif",
textalign="center", tooltip_fmt="<strong>{key}</strong>: {value}",
tooltip_trigger="click hover", tooltip_placement="bottom",
hover_color="#e7e7e7", style=None, selection=True, transform=None,
custom_css=None, custom_header=None, callback=None, sort_by=None):
"""Returns the HTML document for the "pages" template
Parameters
----------
subset : list or None
Columns to be displayed in each cell of the grid. Each
column's value will be displayed from top to bottom in the same
order given here. Use `"img"` for the image of the molecule.
Default: all columns (with "img" in first position)
tooltip : list or None
Columns to be displayed as a tooltip when hovering/clicking on the
image of a cell. Use `None` for no tooltip.
tooltip_fmt : str
Format string of each key/value pair in the tooltip
tooltip_trigger : str
Sequence of triggers for the tooltip: (click, hover, focus)
tooltip_placement : str
Position of the tooltip: auto, top, bottom, left, right
n_cols : int
Number of columns per page
n_rows : int
Number of rows per page
border : str
Styling of the border around each cell (CSS)
gap : int
Size of the margin around each cell (CSS)
fontsize : str
Font size of the text displayed in each cell (CSS)
fontfamily : str
Font used for the text in each cell (CSS)
textalign : str
Alignment of the text in each cell (CSS)
hover_color : str
Background color when hovering a cell (CSS)
style : dict or None
CSS styling applied to specific items in all cells. The dict must follow a
`key: function` structure where the key must correspond to one of
the columns in `subset` or `tooltip`. The function takes the item's value as
input, and outputs a valid CSS styling, for example
`style={"Solubility": lambda x: "color: red" if x < -5 else ""}`
if you want to color the text corresponding to the "Solubility"
column in your dataframe. You can also style a whole cell using the `__all__`
key, the corresponding function then has access to all values for each cell:
`style={"__all__": lambda x: "color: red" if x["Solubility"] < -5 else ""}`
selection : bool
Enables the selection of molecules and displays a checkbox at the top of each
cell. This is only usefull in the context of a Jupyter notebook, which gives
you access to your selection (index and SMILES) through
`mols2grid.get_selection()`
transform : dict or None
Functions applied to specific items in all cells. The dict must follow a
`key: function` structure where the key must correspond to one of the columns
in `subset` or `tooltip`. The function takes the item's value as input and
transforms it, for example:
`transform={"Solubility": lambda x: f"{x:.2f}",
"Melting point": lambda x: f"MP: {5/9*(x-32):.1f}Β°C"}`
will round the solubility to 2 decimals, and display the melting point in
Celsius instead of Fahrenheit with a single digit precision and some text
before (MP) and after (Β°C) the value. These transformations only affect
columns in `subset` and `tooltip`, and do not interfere with `style`.
custom_css : str or None
Custom CSS properties applied to the content of the HTML document
custom_header : str or None
Custom libraries to be loaded in the header of the document
callback : str or callable
JavaScript or Python callback to be executed when clicking on an image. A
dictionnary containing the data for the full cell is directly available as
`data` in JS. For Python, the callback function must have `data` as the first
argument to the function. All the values in the `data` dict are parsed as
strings, except "mols2grid-id" which is always an integer.
sort_by : str or None
Sort the grid according to the following field (which must be present in
`subset` or `tooltip`).
"""
if self.mol_col:
df = self.dataframe.drop(columns=self.mol_col).copy()
else:
df = self.dataframe.copy()
cell_width = self.img_size[0]
smiles = self.smiles_col
content = []
column_map = {}
width = n_cols * (cell_width + 2 * (gap + 2))
if subset is None:
subset = df.columns.tolist()
subset = [subset.pop(subset.index("img"))] + subset
# define fields that are searchable and sortable
search_cols = [f"data-{col}" for col in subset if col != "img"]
if tooltip:
search_cols.append("mols2grid-tooltip")
sort_cols = search_cols[:-1]
sort_cols.extend([f"data-{col}" for col in tooltip])
for col in tooltip:
if col not in subset:
s = f'<div class="data data-{col}" style="display: none;"></div>'
content.append(s)
column_map[col] = f"data-{col}"
else:
sort_cols = search_cols[:]
sort_cols = ["mols2grid-id"] + sort_cols
# get unique list but keep order
sort_cols = list(dict.fromkeys(sort_cols))
if style is None:
style = {}
if transform is None:
transform = {}
if tooltip is None:
tooltip = []
value_names = list(set(subset + [smiles] + tooltip))
value_names = [f"data-{col}" for col in value_names]
# force id, SMILES, and tooltip values to be present in the data
final_columns = subset[:]
final_columns.extend(["mols2grid-id", smiles])
if tooltip:
final_columns.extend(tooltip)
final_columns = list(set(final_columns))
# make a copy if id shown explicitely
if "mols2grid-id" in subset:
id_name = "mols2grid-id-copy"
df[id_name] = df["mols2grid-id"]
value_names.append(f"data-{id_name}")
final_columns.append(id_name)
subset = [id_name if x == "mols2grid-id" else x for x in subset]
# organize data
for col in subset:
if col == "img" and tooltip:
s = (f'<a tabindex="0" class="data data-{col} mols2grid-tooltip" '
'data-toggle="popover" data-content="foo"></a>')
else:
if style.get(col):
s = f'<div class="data data-{col} style-{col}" style=""></div>'
else:
s = f'<div class="data data-{col}"></div>'
content.append(s)
column_map[col] = f"data-{col}"
# add but hide SMILES div if not present
if smiles not in (subset + tooltip):
s = f'<div class="data data-{smiles}" style="display: none;"></div>'
content.append(s)
column_map[smiles] = f"data-{smiles}"
# set mapping for list.js
if "__all__" in style.keys():
whole_cell_style = True
x = "[{data: ['mols2grid-id', 'cellstyle']}, "
else:
whole_cell_style = False
x = "[{data: ['mols2grid-id']}, "
value_names = x + str(value_names)[1:]
# apply CSS styles
for col, func in style.items():
if col == "__all__":
name = "cellstyle"
df[name] = df.apply(func, axis=1)
else:
name = f"style-{col}"
df[name] = df[col].apply(func)
final_columns.append(name)
value_names = value_names[:-1] + f", {{ attr: 'style', name: {name!r} }}]"
if tooltip:
df["mols2grid-tooltip"] = df.apply(tooltip_formatter, axis=1,
args=(tooltip, tooltip_fmt, style,
transform))
final_columns = final_columns + ["mols2grid-tooltip"]
value_names = (value_names[:-1] +
", {attr: 'data-content', name: 'mols2grid-tooltip'}]")
# apply custom user function
for col, func in transform.items():
df[col] = df[col].apply(func)
if selection:
checkbox = '<input type="checkbox" class="position-relative float-left">'
else:
checkbox = ""
if whole_cell_style:
item = ('<div class="cell" data-mols2grid-id="0" '
'data-cellstyle="0">{checkbox}{content}</div>')
else:
item = ('<div class="cell" data-mols2grid-id="0">'
'{checkbox}{content}</div>')
item = item.format(checkbox=checkbox, content="".join(content))
# callback
if callable(callback):
if callback.__name__ == "<lambda>":
raise TypeError(
"Lambda functions are not supported as callbacks. Please "
"use a regular function instead.")
callback_type = "python"
callback = callback.__name__
else:
callback_type = "js"
if sort_by and sort_by != "mols2grid-id":
if sort_by in (subset + tooltip):
sort_by = f"data-{sort_by}"
else:
raise ValueError(f"{sort_by} is not an available field in "
"`subset` or `tooltip`")
else:
sort_by = "mols2grid-id"
df = df[final_columns].rename(columns=column_map).sort_values(sort_by)
template = env.get_template('pages.html')
template_kwargs = dict(
width = width,
border = border,
textalign = textalign,
cell_width = cell_width,
fontfamily = fontfamily,
fontsize = fontsize,
gap = gap,
hover_color = hover_color,
item = item,
item_repr = repr(item),
value_names = value_names,
tooltip = tooltip,
tooltip_trigger = repr(tooltip_trigger),
tooltip_placement = repr(tooltip_placement),
n_items_per_page = n_rows * n_cols,
search_cols = search_cols,
data = json.dumps(df.to_dict("records")),
selection = selection,
smiles_col = smiles,
sort_cols = sort_cols,
grid_id = self._grid_id,
whole_cell_style = whole_cell_style,
custom_css = custom_css or "",
custom_header = custom_header or "",
callback = callback,
callback_type = callback_type,
sort_by = sort_by,
)
return template.render(**template_kwargs)
def get_selection(self):
"""Retrieve the dataframe subset corresponding to your selection
Returns
-------
pandas.DataFrame
"""
sel = list(register.get_selection().keys())
return (self.dataframe.loc[self.dataframe["mols2grid-id"].isin(sel)]
.drop(columns=self._extra_columns))
def filter(self, mask):
"""Filters the grid using a mask (boolean array)
Parameters
----------
mask : list, pd.Series, np.ndarray
Boolean array: `True` when the item should be displayed, `False` if it should
be filtered out.
"""
# convert mask to mols2grid-id
ids = self.dataframe.loc[mask]["mols2grid-id"]
return self._filter_by_id(ids)
def filter_by_index(self, indices):
"""Filters the grid using the dataframe's index"""
# convert index to mols2grid-id
ids = self.dataframe.loc[self.dataframe.index.isin(indices)]["mols2grid-id"]
return self._filter_by_id(ids)
def _filter_by_id(self, ids):
"""Filters the grid using the values in the `mols2grid-id` column"""
if isinstance(ids, (pd.Series, np.ndarray)):
ids = ids.to_list()
code = env.get_template('js/filter.js').render(
grid_id = self._grid_id,
ids = ids)
return Javascript(code)
def to_table(self, subset=None, tooltip=None, n_cols=6,
cell_width=160, border="1px solid #cccccc", gap=0,
fontsize="12pt", fontfamily="'DejaVu', sans-serif",
textalign="center", tooltip_fmt="<strong>{key}</strong>: {value}",
tooltip_trigger="click hover", tooltip_placement="bottom",
hover_color="#e7e7e7", style=None, transform=None):
"""Returns the HTML document for the "table" template
Parameters
----------
subset : list or None
Columns to be displayed in each cell of the grid. Each
column's value will be displayed from top to bottom in the same
order given here. Use `"img"` for the image of the molecule.
Default: all columns (with "img" in first position)
tooltip : list or None
Columns to be displayed as a tooltip when hovering/clicking on the
image of a cell. Use `None` for no tooltip.
tooltip_fmt : str
Format string of each key/value pair in the tooltip
tooltip_trigger : str
Sequence of triggers for the tooltip: (click, hover, focus)
tooltip_placement : str
Position of the tooltip: auto, top, bottom, left, right
n_cols : int
Number of columns in the table
border : str
Styling of the border around each cell (CSS)
gap : int or str
Size of the margin around each cell (CSS)
fontsize : str
Font size of the text displayed in each cell (CSS)
fontfamily : str
Font used for the text in each cell (CSS)
textalign : str
Alignment of the text in each cell (CSS)
hover_color : str
Background color when hovering a cell (CSS)
style : dict or None
CSS styling applied to specific items in all cells. The dict must follow a
`key: function` structure where the key must correspond to one of
the columns in `subset` or `tooltip`. The function takes the item's value as
input, and outputs a valid CSS styling, for example
`style={"Solubility": lambda x: "color: red" if x < -5 else "color: black"}`
if you want to color the text corresponding to the "Solubility"
column in your dataframe
transform : dict or None
Functions applied to specific items in all cells. The dict must follow a
`key: function` structure where the key must correspond to one of the columns
in `subset`. The function takes the item's value as input and transforms it,
for example:
`transform={"Solubility": lambda x: f"{x:.2f}",
"Melting point": lambda x: f"MP: {5/9*(x-32):.1f}Β°C"}`
will round the solubility to 2 decimals, and display the melting point in
Celsius instead of Fahrenheit with a single digit precision and some text
before (MP) and after (Β°C) the value. These transformations only affect
columns in `subset` and `tooltip`, and are applied independantly from `style`
"""
tr = []
data = []
df = self.dataframe
cell_width = self.img_size[0]
if subset is None:
subset = df.columns.tolist()
subset = [subset.pop(subset.index("img"))] + subset
if style is None:
style = {}
if transform is None:
transform = {}
for i, row in df.iterrows():
ncell = i + 1
nrow, ncol = divmod(i, n_cols)
td = [f'<td class="col-{ncol}>"']
if "__all__" in style.keys():
s = style["__all__"](row)
div = [f'<div class="cell-{i}" style="{s}">']
else:
div = [f'<div class="cell-{i}">']
for col in subset:
v = row[col]
if col == "img" and tooltip:
popover = tooltip_formatter(row, tooltip, tooltip_fmt, style,
transform)
func = transform.get(col)
v = func(v) if func else v
item = (f'<div class="data data-{col} mols2grid-tooltip" data-toggle="popover" '
f'data-content="{escape(popover)}">{v}</div>')
else:
func = style.get(col)
if func:
item = f'<div class="data data-{col}" style="{func(v)}">'
else:
item = f'<div class="data data-{col}">'
func = transform.get(col)
v = func(v) if func else v
item += f'{v}</div>'
div.append(item)
div.append("</div>")
td.append("\n".join(div))
td.append("</td>")
tr.append("\n".join(td))
if (ncell % n_cols == 0) or (ncell == len(df)):
cell = [f'<tr class="row-{nrow}">']
cell.append("\n".join(tr))
cell.append("</tr>")
data.append("\n".join(cell))
tr = []
template = env.get_template('table.html')
template_kwargs = dict(
border = border,
textalign = textalign,
cell_width = cell_width,
fontfamily = fontfamily,
fontsize = fontsize,
gap = gap,
hover_color = hover_color,
tooltip = tooltip,
tooltip_trigger = repr(tooltip_trigger),
tooltip_placement = repr(tooltip_placement),
data = "\n".join(data),
)
return template.render(**template_kwargs)
@requires("IPython.display")
def display(self, width="100%", height=None, iframe_allow="clipboard-write",
**kwargs):
"""Render and display the grid in a Jupyter notebook"""
doc = self.render(**kwargs)
iframe = (env.get_template("html/iframe.html")
.render(width=width, height=height, padding=18,
allow=iframe_allow, doc=escape(doc)))
return HTML(iframe)
def save(self, output, **kwargs):
"""Render and save the grid in an HTML document"""
with open(output, "w") as f:
f.write(self.render(**kwargs))
|
70717
|
from unittest.mock import patch
from urllib.parse import urlencode
import pytest
from pyinaturalist.constants import API_V1_BASE_URL
from pyinaturalist.v1 import get_taxa, get_taxa_autocomplete, get_taxa_by_id, get_taxa_map_layers
from test.conftest import load_sample_data
CLASS_AND_HIGHER = ['class', 'superclass', 'subphylum', 'phylum', 'kingdom']
SPECIES_AND_LOWER = ['form', 'variety', 'subspecies', 'hybrid', 'species']
CLASS_THOUGH_PHYLUM = ['class', 'superclass', 'subphylum', 'phylum']
def test_get_taxa(requests_mock):
params = {'q': 'vespi', 'rank': 'genus,subgenus,species'}
requests_mock.get(
f'{API_V1_BASE_URL}/taxa?{urlencode(params)}',
json=load_sample_data('get_taxa.json'),
status_code=200,
)
response = get_taxa(q='vespi', rank=['genus', 'subgenus', 'species'])
first_result = response['results'][0]
assert len(response['results']) == response['total_results'] == 30
assert first_result['id'] == 70118
assert first_result['name'] == 'Nicrophorus vespilloides'
assert first_result['rank'] == 'species'
assert first_result['is_active'] is True
assert len(first_result['ancestor_ids']) == 14
@pytest.mark.parametrize(
'params, expected_ranks',
[
({'rank': 'genus'}, 'genus'),
({'min_rank': 'class'}, CLASS_AND_HIGHER),
({'max_rank': 'species'}, SPECIES_AND_LOWER),
({'min_rank': 'class', 'max_rank': 'phylum'}, CLASS_THOUGH_PHYLUM),
({'max_rank': 'species', 'rank': 'override_me'}, SPECIES_AND_LOWER),
],
)
@patch('pyinaturalist.v1.taxa.get_v1')
def test_get_taxa_by_rank_range(
mock_get,
params,
expected_ranks,
):
# Make sure custom rank params result in the correct 'rank' param value
get_taxa(**params)
params = mock_get.call_args[1]
requested_rank = params['rank']
assert requested_rank == expected_ranks
def test_get_taxa_by_id(requests_mock):
taxon_id = 70118
requests_mock.get(
f'{API_V1_BASE_URL}/taxa/{taxon_id}',
json=load_sample_data('get_taxa_by_id.json'),
status_code=200,
)
response = get_taxa_by_id(taxon_id)
result = response['results'][0]
assert response['total_results'] == len(response['results']) == 1
assert result['id'] == taxon_id
assert result['name'] == 'Nicrophorus vespilloides'
assert result['rank'] == 'species'
assert result['is_active'] is True
assert len(result['ancestors']) == 12
@pytest.mark.parametrize('taxon_id', ['asdf', [None], [1, 'not a number']])
def test_get_taxa_by_id__invalid_inputs(taxon_id):
with pytest.raises(ValueError):
get_taxa_by_id(taxon_id)
def test_get_taxa_autocomplete(requests_mock):
requests_mock.get(
f'{API_V1_BASE_URL}/taxa/autocomplete',
json=load_sample_data('get_taxa_autocomplete.json'),
status_code=200,
)
response = get_taxa_autocomplete(q='vespi')
first_result = response['results'][0]
assert len(response['results']) == response['total_results'] == 10
assert first_result['matched_term'] == 'Vespidae'
assert first_result['id'] == 52747
assert first_result['name'] == 'Vespidae'
assert first_result['rank'] == 'family'
assert first_result['is_active'] is True
assert len(first_result['ancestor_ids']) == 11
def test_get_taxa_map_layers(requests_mock):
requests_mock.get(
f'{API_V1_BASE_URL}/taxa/47588/map_layers',
json=load_sample_data('get_taxa_map_layers.json'),
status_code=200,
)
response = get_taxa_map_layers(47588)
assert response['gbif_id'] == 2820380
assert response['gbif_url'] == 'https://www.gbif.org/species/2820380'
assert response['ranges'] is False
assert response['listed_places'] is True
|
70735
|
class LDAP_record:
"""
consume LDAP record and provide methods for accessing interesting data
"""
def __init__(self, unid):
self.error = False
ldap_dict = {}
#
# request complete user record from LDAP
cmd = "/Users/" + unid
try:
raw_data = subprocess.check_output(["/usr/bin/dscl", "/LDAPv3/your.ldap.server", "-read", cmd])
except:
self.error = True
return
#
# begin parsing data into dictionary
raw_data = string.replace(raw_data, '\n ', ' ')
raw_data = raw_data.split('\n')
for line in raw_data:
y = line.split(":")
y = [x for x in y if 'dsAttrTypeNative' not in x]
if len(y) == 2:
key = y[0]
value = y[1]
value = value.lstrip()
else:
key = y[0]
value = y[1:]
value = [x for x in value if x]
if key:
ldap_dict[key] = value
self.record = ldap_dict
def is_student(self):
try:
if 'CurrentStudent' in self.record['Student']:
return True
else:
return False
except:
return False
def is_staff(self):
try:
if self.record['Employee']: return True
except:
return False
def my_name(self):
try:
if self.record['gecos']:
if len(self.record['gecos']) > 1:
return self.record['gecos']
else:
# print "Beep!"
try:
if self.record['displayName']: return self.record['displayName']
except:
return None
except:
try:
if self.record['displayName']: return self.record['displayName']
except:
return None
def my_title(self):
try:
if self.record['title']: return self.record['title']
except:
return None
def my_email(self):
try:
if self.record['mail']: return self.record['mail']
except:
try:
if self.record['ExtensionAttribute4']: return self.record['ExtensionAttribute4']
except:
return None
def my_phone(self):
try:
if self.record['telephoneNumber']: return self.record['telephoneNumber']
except:
return None
def my_department(self):
try:
if self.record['department']: return self.record['department']
except:
return None
def my_address(self):
try:
if self.record['streetAddress']: return self.record['streetAddress']
except:
return None
#
# diagnostic methods
def print_full(self):
for k, v in self.record.items():
print ("%s > %r" % (k, v))
def print_keys(self):
return self.record.keys()
def ldap(self):
"""
translate LDAP data from object into fields used in tugboat
"""
try:
self.status_label.configure(style='Normal.TLabel')
self.status_string.set("LDAP selected.")
if self.valid_unid():
print("ldap %r" % self.endusername_string.get())
this_person = LDAP_record(self.endusername_string.get())
if not this_person.error:
self.fullname_string.set(this_person.my_name())
self.email_string.set(this_person.my_email())
self.phone_string.set(this_person.my_phone())
self.room_string.set(this_person.my_address())
if this_person.my_title() is None:
if this_person.my_department() is None:
self.position_string.set("")
else:
self.position_string.set(this_person.my_department())
else:
if this_person.my_department() is None:
self.position_string.set(this_person.my_title())
else:
self.position_string.set(this_person.my_title() + "/" + this_person.my_department())
if self.division_string.get():
self.division_string.set('None')
if self.building_string.get():
self.building_string.set('None')
else:
self.status_label.configure(style='Warning.TLabel')
self.status_string.set("LDAP error, no record found for uNID.")
self.reset_data()
else:
self.status_label.configure(style='Warning.TLabel')
self.status_string.set("Error setting LDAP Mode, no valid uNID.")
self.reset_user()
return
except ValueError:
self.status_label.configure(style='Warning.TLabel')
self.status_string.set("Error setting LDAP Mode.")
return
|
70745
|
import datetime
import logging
import json
import hashlib
import hmac
import base64
import aiohttp
import asyncio
from collections import deque
class AzureSentinelConnectorAsync:
def __init__(self, session: aiohttp.ClientSession, log_analytics_uri, workspace_id, shared_key, log_type, queue_size=1000, queue_size_bytes=25 * (2**20)):
self.log_analytics_uri = log_analytics_uri
self.workspace_id = workspace_id
self.shared_key = shared_key
self.log_type = log_type
self.queue_size = queue_size
self.queue_size_bytes = queue_size_bytes
self._queue = deque()
self.successfull_sent_events_number = 0
self.failed_sent_events_number = 0
self.lock = asyncio.Lock()
self.session = session
async def send(self, event):
events = None
async with self.lock:
self._queue.append(event)
if len(self._queue) >= self.queue_size:
events = list(self._queue)
self._queue.clear()
if events:
await self._flush(events)
async def flush(self):
await self._flush(list(self._queue))
async def _flush(self, data: list):
if data:
data = self._split_big_request(data)
await asyncio.gather(*[self._post_data(self.session, self.workspace_id, self.shared_key, d, self.log_type) for d in data])
def _build_signature(self, workspace_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(workspace_id, encoded_hash)
return authorization
async def _post_data(self, session: aiohttp.ClientSession, workspace_id, shared_key, body, log_type):
logging.debug('Start sending data to sentinel')
events_number = len(body)
body = json.dumps(body)
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = self._build_signature(workspace_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = self.log_analytics_uri + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
try_number = 1
while True:
try:
if len(body) < 10:
logging.info(body)
await self._make_request(session, uri, body, headers)
except Exception as err:
if try_number < 3:
logging.warning('Error while sending data to Azure Sentinel. Try number: {}. Trying one more time. {}'.format(try_number, err))
await asyncio.sleep(try_number)
try_number += 1
else:
logging.error(str(err))
self.failed_sent_events_number += events_number
raise err
else:
logging.debug('{} events have been successfully sent to Azure Sentinel'.format(events_number))
self.successfull_sent_events_number += events_number
break
async def _make_request(self, session, uri, body, headers):
async with session.post(uri, data=body, headers=headers, ssl=False) as response:
if not (200 <= response.status <= 299):
raise Exception("Error during sending events to Azure Sentinel. Response code: {}".format(response.status))
def _check_size(self, queue):
data_bytes_len = len(json.dumps(queue).encode())
return data_bytes_len < self.queue_size_bytes
def _split_big_request(self, queue):
if self._check_size(queue):
return [queue]
else:
middle = int(len(queue) / 2)
queues_list = [queue[:middle], queue[middle:]]
return self._split_big_request(queues_list[0]) + self._split_big_request(queues_list[1])
|
70763
|
import sys
import yaml
class Config:
def __init__(self, cfg=None):
self.cfg = {}
if cfg is not None:
self.update(cfg)
def __getattribute__(self, name):
cfg = object.__getattribute__(self, 'cfg')
if name not in cfg:
return object.__getattribute__(self, name)
return cfg[name]
def items(self):
return object.__getattribute__(self, 'cfg').items()
def update(self, new_cfg):
cfg = self.cfg
for key, val in new_cfg.items():
if type(val) == dict:
val = Config(val)
if key in cfg:
cfg[key].update(val)
continue
cfg[key] = val
def add(self, arg, val=None):
# Manual item
if val is not None:
subkeys = arg.split('.')
subconfig = self
for subkey in subkeys[:-1]:
subconfig = subconfig.cfg[subkey]
if subkeys[-1] in subconfig.cfg:
if type(subconfig.cfg[subkeys[-1]]) == int:
val = int(val)
elif type(subconfig.cfg[subkeys[-1]]) == float:
val = float(val)
subconfig.cfg[subkeys[-1]] = val
print('{} is set to {}'.format(arg, val))
return
# Config file shortcut
if not arg.endswith('.yaml'):
arg = 'configs/{}.yaml'.format(arg)
# Config file
print('importing config from "{}"'.format(arg))
with open(arg) as f:
self.update(yaml.load(f, Loader=yaml.Loader))
def as_dict(self):
return {key: (val.as_dict() if isinstance(val, Config) else val) for key, val in self.cfg.items()}
def show(self, depth=0):
yaml.dump(self.as_dict(), sys.stdout)
def get_path(self, name):
return self.data.cfg[name].format(self.data.name, self.model.shortname)
def init_config():
config = Config()
config.add('configs/default.yaml')
for arg in sys.argv[1:]:
config.add(*arg.split('='))
return config
def reset_config():
global config
config = init_config()
config = init_config()
|
70820
|
import os
import copy
import json
import logging
import torch
from torch.utils.data import TensorDataset
logger = logging.getLogger(__name__)
class InputExample(object):
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
def __init__(self, guid, text_a, label):
self.guid = guid
self.text_a = text_a
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, attention_mask, token_type_ids, label_id):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label_id = label_id
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class NsmcProcessor(object):
"""Processor for the NSMC data set """
def __init__(self, args):
self.args = args
@classmethod
def _read_file(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
lines = []
for line in f:
lines.append(line.strip())
return lines
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines[1:]):
line = line.split('\t')
guid = "%s-%s" % (set_type, i)
text_a = line[1]
label = int(line[2])
if i % 1000 == 0:
logger.info(line)
examples.append(InputExample(guid=guid, text_a=text_a, label=label))
return examples
def get_examples(self, mode):
"""
Args:
mode: train, dev, test
"""
file_to_read = None
if mode == 'train':
file_to_read = self.args.train_file
elif mode == 'dev':
file_to_read = self.args.dev_file
elif mode == 'test':
file_to_read = self.args.test_file
logger.info("LOOKING AT {}".format(os.path.join(self.args.data_dir, file_to_read)))
return self._create_examples(self._read_file(os.path.join(self.args.data_dir, file_to_read)), mode)
processors = {
"nsmc": NsmcProcessor,
}
def convert_examples_to_features(examples, max_seq_len, tokenizer,
cls_token_segment_id=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
mask_padding_with_zero=True):
# Setting based on the current model type
cls_token = tokenizer.cls_token
sep_token = tokenizer.sep_token
pad_token_id = tokenizer.pad_token_id
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 5000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens = tokenizer.tokenize(example.text_a)
# Account for [CLS] and [SEP]
special_tokens_count = 2
if len(tokens) > max_seq_len - special_tokens_count:
tokens = tokens[:(max_seq_len - special_tokens_count)]
# Add [SEP] token
tokens += [sep_token]
token_type_ids = [sequence_a_segment_id] * len(tokens)
# Add [CLS] token
tokens = [cls_token] + tokens
token_type_ids = [cls_token_segment_id] + token_type_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_len - len(input_ids)
input_ids = input_ids + ([pad_token_id] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_len, "Error with input length {} vs {}".format(len(input_ids), max_seq_len)
assert len(attention_mask) == max_seq_len, "Error with attention mask length {} vs {}".format(len(attention_mask), max_seq_len)
assert len(token_type_ids) == max_seq_len, "Error with token type length {} vs {}".format(len(token_type_ids), max_seq_len)
label_id = example.label
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % example.guid)
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label_id=label_id
))
return features
def load_and_cache_examples(args, tokenizer, mode):
processor = processors[args.task](args)
# Load data features from cache or dataset file
cached_file_name = 'cached_{}_{}_{}_{}'.format(
args.task, list(filter(None, args.model_name_or_path.split("/"))).pop(), args.max_seq_len, mode)
cached_features_file = os.path.join(args.data_dir, cached_file_name)
if os.path.exists(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
if mode == "train":
examples = processor.get_examples("train")
elif mode == "dev":
examples = processor.get_examples("dev")
elif mode == "test":
examples = processor.get_examples("test")
else:
raise Exception("For mode, Only train, dev, test is available")
features = convert_examples_to_features(examples, args.max_seq_len, tokenizer)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask,
all_token_type_ids, all_label_ids)
return dataset
|
70844
|
import numpy as np
import scipy.sparse as ssp
import torch
from beta_rec.models.torch_engine import ModelEngine
from beta_rec.utils.common_util import timeit
def top_k(values, k, exclude=[]):
"""Return the indices of the k items with the highest value in the list of values.
Exclude the ids from the list "exclude".
"""
# Put low similarity to viewed items to exclude them from recommendations
values[exclude] = -np.inf
return list(np.argpartition(-values, range(k))[:k])
def get_sparse_vector(ids, length, values=None):
"""Sparse vector generation.
If "values" is None, the elements are set to 1.
"""
n = len(ids)
if values is None:
return ssp.coo_matrix((np.ones(n), (ids, np.zeros(n))), (length, 1)).tocsc()
else:
return ssp.coo_matrix((values, (ids, np.zeros(n))), (length, 1)).tocsc()
class UserKNN(torch.nn.Module):
"""A PyTorch Module for UserKNN model."""
def __init__(self, config):
"""Initialize UserKNN Class."""
super(UserKNN, self).__init__()
self.config = config
self.device = self.config["device_str"]
self.n_users = self.config["n_users"]
self.n_items = self.config["n_items"]
self.neighbourhood_size = self.config["neighbourhood_size"]
def prepare_model(self, data):
"""Load data into matrices.
:param data:
:return:
"""
row = data.train["col_user"].to_numpy()
col = data.train["col_item"].to_numpy()
self.binary_user_item = ssp.coo_matrix(
(np.ones(len(data.train)), (row, col)), shape=(self.n_users, self.n_items)
).tocsr()
def _items_count_per_user(self):
"""Calculate the number of interacted items for an user.
:return:
"""
if not hasattr(self, "__items_count_per_user"):
self.__items_count_per_user = np.asarray(
self.binary_user_item.sum(axis=1)
).ravel()
return self.__items_count_per_user
def similarity_with_users(self, sequence):
"""Calculate the similarity between the a given user and all users according to the overlap ratio.
:param sequence: the user's interacted items
:return:
"""
sparse_sequence = get_sparse_vector(sequence, self.n_items)
overlap = self.binary_user_item.dot(sparse_sequence).toarray().ravel()
overlap[overlap != 0] /= np.sqrt(self._items_count_per_user()[overlap != 0])
return overlap
def forward(self, batch_data):
"""Redundant method for UserKNN.
Args:
batch_data: tuple consists of (users, pos_items, neg_items), which must be LongTensor.
"""
return 0.0
def predict(self, users, items):
"""Predict result with the model.
Args:
users (int, or list of int): user id(s).
items (int, or list of int): item id(s).
Return:
scores (int, or list of int): predicted scores of these user-item pairs.
"""
scores = []
for i in range(len(users)):
sequence = self.binary_user_item.getrow(users[i]).nonzero()[0]
sim_with_users = self.similarity_with_users(sequence)
nearest_neighbour = top_k(sim_with_users, self.neighbourhood_size)
neighbour_items = get_sparse_vector(
nearest_neighbour,
self.n_users,
values=sim_with_users[nearest_neighbour],
)
sim_with_items = (
self.binary_user_item.T.dot(neighbour_items).toarray().ravel()
)
sim_with_items[sequence] = -np.inf
scores.append(sim_with_items[items[i]])
return torch.tensor(scores)
class UserKNNEngine(ModelEngine):
"""UserKNNEngine Class."""
def __init__(self, config):
"""Initialize UserKNNEngine Class."""
print("userKNNEngine init")
self.config = config
self.model = UserKNN(config["model"])
# super(UserKNNEngine, self).__init__(config)
def train_single_batch(self, batch_data):
"""Train a single batch.
However, userKNN is a neighbourhood model bases its prediction on the similarity relationships among users.
It requires no training procedure.
Args:
batch_data (list): batch users, positive items and negative items.
Return:
0
"""
assert hasattr(self, "model"), "Please specify the exact model !"
return 0
@timeit
def train_an_epoch(self, train_loader, epoch_id):
"""Train a epoch, generate batch_data from data_loader, and call train_single_batch.
Like the train_single_batch method, UserKNN requires no training procedure.
Args:
train_loader (DataLoader):
epoch_id (int): set to 1.
"""
assert hasattr(self, "model"), "Please specify the exact model !"
# self.model.train()
print(f"[Training Epoch {epoch_id}] skipped")
self.writer.add_scalar("model/loss", 0.0, epoch_id)
self.writer.add_scalar("model/regularizer", 0.0, epoch_id)
|
70849
|
class RetrievalError(Exception):
pass
class SetterError(Exception):
pass
class ControlError(SetterError):
pass
class AuthentificationError(Exception):
pass
class TemporaryAuthentificationError(AuthentificationError):
pass
class APICompatibilityError(Exception):
pass
class APIError(Exception):
pass
|
70857
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides access to the chemical component database, which is stored in
``moldesign/_static_data/chemical_components``
and can be re-generated by running
``cd moldesign/_static_data/ && scripts/generate_residue_data.py --download``
"""
import os
from . import PACKAGEPATH
from moldesign import utils
class _DatabaseEntry(object):
""" Maps into a field stored in the database
"""
def __init__(self, hostdb, keyname):
self.hostdb = hostdb
self.keyname = keyname
self.index = self.hostdb['__FIELDS__']['RESFIELDS'].index(keyname)
def __repr__(self):
return '<Chemical component dictionary: "%s" entries>' % self.keyname
def __getitem__(self, item):
return self.hostdb[item][self.index]
__contains__ = utils.Alias('hostdb.__contains__')
def keys(self):
for key in self.hostdb.keys():
if key == '__FIELDS__':
continue
yield key
def items(self):
for key in self:
yield key, self[key]
__iter__ = keys
# This is a very big dict, so we load it as a compressed database
_bondfilename = os.path.join(PACKAGEPATH, '_static_data', 'chemical_components')
CCD_DATABASE = utils.CompressedJsonDbm(_bondfilename, 'r', dbm=utils.ReadOnlyDumb)
RESIDUE_BONDS = _DatabaseEntry(CCD_DATABASE, 'bonds')
RESIDUE_ATOMS = _DatabaseEntry(CCD_DATABASE, 'atoms')
RESIDUE_CCD_NAMES = _DatabaseEntry(CCD_DATABASE, 'name')
RESIDUE_CCD_TYPES = _DatabaseEntry(CCD_DATABASE, 'type')
|
70877
|
from rknn.api import RKNN
class RKNN_model_container():
def __init__(self, model_path, target=None, device_id=None) -> None:
rknn = RKNN()
# Direct Load RKNN Model
rknn.load_rknn(model_path)
print('--> Init runtime environment')
if target==None:
ret = rknn.init_runtime()
else:
ret = rknn.init_runtime(target=target, device_id=device_id)
if ret != 0:
print('Init runtime environment failed')
exit(ret)
print('done')
self.rknn = rknn
def run(self, inputs):
if isinstance(inputs, list) or isinstance(inputs, tuple):
pass
else:
inputs = [inputs]
result = self.rknn.inference(inputs=inputs)
return result
|
70891
|
import geopandas as gpd
import requests
from vt2geojson.tools import vt_bytes_to_geojson
MAPBOX_ACCESS_TOKEN = "<KEY>"
x = 150
y = 194
z = 9
url = f"https://api.mapbox.com/v4/mapbox.mapbox-streets-v6/{z}/{x}/{y}.vector.pbf?access_token={MAPBOX_ACCESS_TOKEN}"
r = requests.get(url)
assert r.status_code == 200, r.content
vt_content = r.content
features = vt_bytes_to_geojson(vt_content, x, y, z)
gdf = gpd.GeoDataFrame.from_features(features)
print(gdf.head())
|
70892
|
from tensorflow.python.ops import init_ops
from tensorflow.python.util import nest
import tensorflow as tf
def stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, inputs, initial_states_fw=None, initial_states_bw=None,
dtype=None, sequence_length=None, parallel_iterations=None, scope=None,
time_pooling=None, pooling_avg=None, initializer=None, inter_layers=None,
inter_layer_activation=None, batch_norm=None, inter_layer_keep_prob=None,
pervasive_dropout=None):
states_fw = []
states_bw = []
prev_layer = inputs
with tf.variable_scope(scope or "stack_bidirectional_rnn", initializer=initializer):
for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)):
initial_state_fw = None
initial_state_bw = None
if initial_states_fw:
initial_state_fw = initial_states_fw[i]
if initial_states_bw:
initial_state_bw = initial_states_bw[i]
with tf.variable_scope('cell_{}'.format(i)):
outputs, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
prev_layer,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
sequence_length=sequence_length,
parallel_iterations=parallel_iterations,
dtype=dtype)
# Concat the outputs to create the new input.
prev_layer = tf.concat(outputs, axis=2)
if time_pooling and i < len(cells_fw) - 1:
prev_layer, sequence_length = apply_time_pooling(prev_layer, sequence_length, time_pooling[i],
pooling_avg)
if inter_layers and len(inter_layers) > i and inter_layers[i]:
layer_size = inter_layers[i]
prev_layer = tf.layers.dense(prev_layer, layer_size, use_bias=not batch_norm)
if inter_layer_activation.lower() == 'relu':
prev_layer = tf.nn.relu(prev_layer)
if batch_norm:
prev_layer = tf.layers.batch_normalization(prev_layer)
if inter_layer_keep_prob is not None:
noise_shape = [1, 1, tf.shape(prev_layer)[2]] if pervasive_dropout else None
prev_layer = tf.nn.dropout(prev_layer, keep_prob=inter_layer_keep_prob,
noise_shape=noise_shape)
states_fw.append(state_fw)
states_bw.append(state_bw)
return prev_layer, tuple(states_fw), tuple(states_bw)
def apply_time_pooling(inputs, sequence_length, stride, pooling_avg=False):
shape = [tf.shape(inputs)[0], tf.shape(inputs)[1], inputs.get_shape()[2].value]
if pooling_avg:
inputs_ = [inputs[:, i::stride, :] for i in range(stride)]
max_len = tf.shape(inputs_[0])[1]
for k in range(1, stride):
len_ = tf.shape(inputs_[k])[1]
paddings = tf.stack([[0, 0], [0, max_len - len_], [0, 0]])
inputs_[k] = tf.pad(inputs_[k], paddings=paddings)
inputs = tf.reduce_sum(inputs_, axis=0) / len(inputs_)
else:
inputs = inputs[:, ::stride, :]
inputs = tf.reshape(inputs, tf.stack([shape[0], tf.shape(inputs)[1], shape[2]]))
sequence_length = (sequence_length + stride - 1) // stride # rounding up
return inputs, sequence_length
class CellInitializer(init_ops.Initializer):
"""
Orthogonal initialization of recurrent connections, like in Bahdanau et al. 2015
"""
def __init__(self, cell_size):
self.cell_size = cell_size
self.default_initializer = tf.get_variable_scope().initializer or init_ops.glorot_uniform_initializer()
self.initializer = tf.orthogonal_initializer()
def __call__(self, shape, dtype=None, partition_info=None, verify_shape=None):
if len(shape) == 1 or shape[1] % self.cell_size != 0:
return self.default_initializer(shape, dtype=dtype, partition_info=partition_info)
input_size = shape[0] - self.cell_size
W, U = [], []
for _ in range(shape[1] // self.cell_size):
W.append(self.default_initializer(shape=[input_size, self.cell_size]))
U.append(self.initializer(shape=[self.cell_size, self.cell_size]))
return tf.concat([tf.concat(W, axis=1), tf.concat(U, axis=1)], axis=0)
class DropoutGRUCell(tf.nn.rnn_cell.RNNCell):
def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None,
layer_norm=False, state_keep_prob=None, input_keep_prob=None, input_size=None, final=False):
super(DropoutGRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or tf.nn.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._layer_norm = layer_norm
self._state_keep_prob = state_keep_prob
self._input_keep_prob = input_keep_prob
self._final = final
def batch_noise(s):
s = tf.concat(([1], tf.TensorShape(s).as_list()), 0)
return tf.random_uniform(s)
if input_keep_prob is not None:
self._input_noise = DropoutGRUCell._enumerated_map_structure(lambda i, s: batch_noise(s), input_size)
if state_keep_prob is not None:
self._state_noise = DropoutGRUCell._enumerated_map_structure(lambda i, s: batch_noise(s), num_units)
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@staticmethod
def _enumerated_map_structure(map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return nest.map_structure(enumerated_fn, *args, **kwargs)
@staticmethod
def _dropout(values, recurrent_noise, keep_prob):
def dropout(index, value, noise):
random_tensor = keep_prob + noise
binary_tensor = tf.floor(random_tensor)
ret = tf.div(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
return DropoutGRUCell._enumerated_map_structure(dropout, values, recurrent_noise)
def call(self, inputs, state):
inputs = tf.concat(inputs, axis=1)
input_size = inputs.shape[1]
state_size = state.shape[1]
dtype = inputs.dtype
if self._state_keep_prob:
dropped_state = DropoutGRUCell._dropout(state, self._state_noise, self._state_keep_prob)
else:
dropped_state = state
if self._input_keep_prob:
dropped_inputs = DropoutGRUCell._dropout(inputs, self._input_noise, self._input_keep_prob)
else:
dropped_inputs = inputs
with tf.variable_scope('state'):
state_weights = tf.get_variable('kernel', [state_size, 3 * self._num_units], dtype=dtype, initializer=self._kernel_initializer)
with tf.variable_scope('input'):
input_weights = tf.get_variable('kernel', [input_size, 3 * self._num_units], dtype=dtype, initializer=self._kernel_initializer)
bias = tf.get_variable('bias', [3 * self._num_units], dtype=dtype, initializer=self._bias_initializer)
inputs_ = tf.matmul(dropped_inputs, input_weights)
state_ = tf.matmul(dropped_state, state_weights)
if self._layer_norm:
state_ = tf.contrib.layers.layer_norm(state_)
inputs_ = tf.contrib.layers.layer_norm(inputs_)
size = 2 * self._num_units
value = tf.nn.sigmoid(state_[:,:size] + inputs_[:,:size] + bias[:size])
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
c = self._activation(inputs_[:,size:] + state_[:,size:] * r + bias[size:])
new_h = u * state + (1 - u) * c
return new_h, new_h
class GRUCell(tf.nn.rnn_cell.RNNCell):
def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None,
layer_norm=False):
super(GRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or tf.nn.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._layer_norm = layer_norm
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
inputs = tf.concat(inputs, axis=1)
input_size = inputs.shape[1]
state_size = state.shape[1]
dtype = inputs.dtype
with tf.variable_scope("gates"):
bias_initializer = self._bias_initializer
if self._bias_initializer is None and not self._layer_norm: # bias of 1 for layer norm?
bias_initializer = init_ops.constant_initializer(1.0, dtype=dtype)
bias = tf.get_variable('bias', [2 * self._num_units], dtype=dtype, initializer=bias_initializer)
weights = tf.get_variable('kernel', [input_size + state_size, 2 * self._num_units], dtype=dtype,
initializer=self._kernel_initializer)
inputs_ = tf.matmul(inputs, weights[:input_size])
state_ = tf.matmul(state, weights[input_size:])
if self._layer_norm:
inputs_ = tf.contrib.layers.layer_norm(inputs_, scope='inputs')
state_ = tf.contrib.layers.layer_norm(state_, scope='state')
value = tf.nn.sigmoid(inputs_ + state_ + bias)
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
with tf.variable_scope("candidate"):
bias = tf.get_variable('bias', [self._num_units], dtype=dtype, initializer=self._bias_initializer)
weights = tf.get_variable('kernel', [input_size + state_size, self._num_units], dtype=dtype,
initializer=self._kernel_initializer)
c = tf.matmul(tf.concat([inputs, r * state], axis=1), weights)
if self._layer_norm:
c = tf.contrib.layers.layer_norm(c)
c = self._activation(c + bias)
new_h = u * state + (1 - u) * c
return new_h, new_h
|
70909
|
import urllib.parse
from datetime import datetime
from unittest.mock import patch
from django.contrib.auth.models import User
from django.contrib.messages import get_messages
from django.test import TestCase
from django.utils import timezone
from dfirtrack_main.models import (
System,
Systemstatus,
Task,
Taskname,
Taskpriority,
Taskstatus,
)
class TaskCreatorViewTestCase(TestCase):
"""task creator view tests"""
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(
username='testuser_task_creator', password='<PASSWORD>'
)
# create objects
Taskname.objects.create(taskname_name='task_creator_taskname_1')
Taskname.objects.create(taskname_name='task_creator_taskname_2')
Taskname.objects.create(taskname_name='task_creator_taskname_3')
Taskpriority.objects.create(taskpriority_name='taskpriority_1')
# create object
systemstatus_1 = Systemstatus.objects.create(
systemstatus_name='task_creator_systemstatus_1'
)
# create objects
System.objects.create(
system_name='task_creator_system_1',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
System.objects.create(
system_name='task_creator_system_2',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
System.objects.create(
system_name='task_creator_system_3',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
def test_task_creator_not_logged_in(self):
"""test creator view"""
# create url
destination = '/login/?next=' + urllib.parse.quote('/task/creator/', safe='')
# get response
response = self.client.get('/task/creator/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_task_creator_logged_in(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get response
response = self.client.get('/task/creator/')
# compare
self.assertEqual(response.status_code, 200)
def test_task_creator_template(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get response
response = self.client.get('/task/creator/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_creator.html')
def test_task_creator_get_user_context(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get response
response = self.client.get('/task/creator/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_task_creator')
def test_task_creator_redirect(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# create url
destination = urllib.parse.quote('/task/creator/', safe='/')
# get response
response = self.client.get('/task/creator', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_task_creator_post_redirect(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get objects
taskname_1 = Taskname.objects.get(taskname_name='task_creator_taskname_1')
taskpriority_1 = Taskpriority.objects.get(taskpriority_name='taskpriority_1')
taskstatus_pending = Taskstatus.objects.get(taskstatus_name='10_pending')
system_1 = System.objects.get(system_name='task_creator_system_1')
# create post data
data_dict = {
'taskname': [
taskname_1.taskname_id,
],
'taskpriority': taskpriority_1.taskpriority_id,
'taskstatus': taskstatus_pending.taskstatus_id,
'system': [
system_1.system_id,
],
}
# create url
destination = '/task/'
# get response
response = self.client.post('/task/creator/', data_dict)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_task_creator_post_system_and_tasks(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get objects
taskname_1 = Taskname.objects.get(taskname_name='task_creator_taskname_1')
taskname_2 = Taskname.objects.get(taskname_name='task_creator_taskname_2')
taskname_3 = Taskname.objects.get(taskname_name='task_creator_taskname_3')
taskpriority_1 = Taskpriority.objects.get(taskpriority_name='taskpriority_1')
taskstatus_pending = Taskstatus.objects.get(taskstatus_name='10_pending')
system_1 = System.objects.get(system_name='task_creator_system_1')
system_2 = System.objects.get(system_name='task_creator_system_2')
system_3 = System.objects.get(system_name='task_creator_system_3')
# create post data
data_dict = {
'taskname': [taskname_1.taskname_id, taskname_2.taskname_id],
'taskpriority': taskpriority_1.taskpriority_id,
'taskstatus': taskstatus_pending.taskstatus_id,
'system': [system_1.system_id, system_2.system_id],
}
# get response
self.client.post('/task/creator/', data_dict)
# get object
task_1 = Task.objects.get(
system=system_1,
taskname=taskname_1,
)
# compare
self.assertTrue(system_1.task_set.filter(taskname=taskname_1).exists())
self.assertTrue(system_1.task_set.filter(taskname=taskname_2).exists())
self.assertFalse(system_1.task_set.filter(taskname=taskname_3).exists())
self.assertTrue(system_2.task_set.filter(taskname=taskname_1).exists())
self.assertTrue(system_2.task_set.filter(taskname=taskname_2).exists())
self.assertFalse(system_2.task_set.filter(taskname=taskname_3).exists())
self.assertFalse(system_3.task_set.filter(taskname=taskname_1).exists())
self.assertFalse(system_3.task_set.filter(taskname=taskname_2).exists())
self.assertFalse(system_3.task_set.filter(taskname=taskname_3).exists())
self.assertEqual(task_1.task_started_time, None)
self.assertEqual(task_1.task_finished_time, None)
def test_task_creator_post_times_working(self):
"""test creator view"""
# mock timezone.now()
dt = datetime(2020, 1, 2, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=dt):
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get objects
taskname_started = Taskname.objects.create(
taskname_name='task_creator_started_time_working'
)
taskpriority_1 = Taskpriority.objects.get(
taskpriority_name='taskpriority_1'
)
taskstatus_working = Taskstatus.objects.get(taskstatus_name='20_working')
system_1 = System.objects.get(system_name='task_creator_system_1')
# create post data
data_dict = {
'taskname': [
taskname_started.taskname_id,
],
'taskpriority': taskpriority_1.taskpriority_id,
'taskstatus': taskstatus_working.taskstatus_id,
'system': [
system_1.system_id,
],
}
# get response
self.client.post('/task/creator/', data_dict)
# get object
task_started = Task.objects.get(
system=system_1,
taskname=taskname_started,
)
# compare
self.assertEqual(task_started.task_started_time, timezone.now())
self.assertEqual(task_started.task_finished_time, None)
def test_task_creator_post_times_done(self):
"""test creator view"""
# mock timezone.now()
dt = datetime(2020, 3, 4, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=dt):
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get objects
taskname_finished = Taskname.objects.create(
taskname_name='task_creator_finished_time_working'
)
taskpriority_1 = Taskpriority.objects.get(
taskpriority_name='taskpriority_1'
)
taskstatus_done = Taskstatus.objects.get(taskstatus_name='30_done')
system_1 = System.objects.get(system_name='task_creator_system_1')
# create post data
data_dict = {
'taskname': [
taskname_finished.taskname_id,
],
'taskpriority': taskpriority_1.taskpriority_id,
'taskstatus': taskstatus_done.taskstatus_id,
'system': [
system_1.system_id,
],
}
# get response
self.client.post('/task/creator/', data_dict)
# get object
task_finished = Task.objects.get(
system=system_1,
taskname=taskname_finished,
)
# compare
self.assertEqual(task_finished.task_started_time, timezone.now())
self.assertEqual(task_finished.task_finished_time, timezone.now())
def test_task_creator_post_invalid_reload(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# create post data
data_dict = {}
# get response
response = self.client.post('/task/creator/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
def test_task_creator_post_invalid_template(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# create post data
data_dict = {}
# get response
response = self.client.post('/task/creator/', data_dict)
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_creator.html')
def test_task_creator_post_messages(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get objects
taskname_1 = Taskname.objects.get(taskname_name='task_creator_taskname_1')
taskname_2 = Taskname.objects.get(taskname_name='task_creator_taskname_2')
taskname_3 = Taskname.objects.get(taskname_name='task_creator_taskname_3')
taskpriority_1 = Taskpriority.objects.get(taskpriority_name='taskpriority_1')
taskstatus_pending = Taskstatus.objects.get(taskstatus_name='10_pending')
system_1 = System.objects.get(system_name='task_creator_system_1')
system_2 = System.objects.get(system_name='task_creator_system_2')
system_3 = System.objects.get(system_name='task_creator_system_3')
# create post data
data_dict = {
'taskname': [
taskname_1.taskname_id,
taskname_2.taskname_id,
taskname_3.taskname_id,
],
'taskpriority': taskpriority_1.taskpriority_id,
'taskstatus': taskstatus_pending.taskstatus_id,
'system': [system_1.system_id, system_2.system_id, system_3.system_id],
}
# get response
response = self.client.post('/task/creator/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(messages[0]), 'Task creator started')
self.assertEqual(str(messages[1]), '9 tasks created for 3 systems.')
|
70995
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def PictureOptions():
from ..picture import PictureOptions
return PictureOptions
class TestPictureOptions:
def test_ctor(self, PictureOptions):
picture = PictureOptions()
xml = tostring(picture.to_tree())
expected = """
<pictureOptions />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, PictureOptions):
src = """
<pictureOptions />
"""
node = fromstring(src)
picture = PictureOptions.from_tree(node)
assert picture == PictureOptions()
|
71029
|
from nalp.corpus import TextCorpus
from nalp.encoders import IntegerEncoder
from nalp.models import SeqGAN
# When generating artificial text, make sure
# to use the same data, classes and parameters
# as the pre-trained network
# Creating a character TextCorpus from file
corpus = TextCorpus(from_file='data/text/chapter1_harry.txt', corpus_type='word')
# Creating an IntegerEncoder and learning encoding
encoder = IntegerEncoder()
encoder.learn(corpus.vocab_index, corpus.index_vocab)
# Creating the SeqGAN
seqgan = SeqGAN(encoder=encoder, vocab_size=corpus.vocab_size, max_length=10, embedding_size=256,
hidden_size=512, n_filters=(64, 128, 256), filters_size=(3, 5, 5), dropout_rate=0.25, temperature=1)
# Loading pre-trained SeqGAN weights
seqgan.load_weights('trained/seqgan').expect_partial()
# Now, for the inference step, we build with a batch size equals to 1
seqgan.G.build((1, None))
# Defining an start string to generate the text
start_string = 'Mr. and Mrs. Dursley'
# Generating artificial text
text = seqgan.G.generate_temperature_sampling(start=start_string.split(' '), max_length=1000, temperature=1)
# Outputting the text
print(start_string + ' ' + ' '.join(text))
|
71046
|
import fnmatch
import os
import datetime
import sys
from subprocess import Popen, PIPE
import zipfile
import skimage.io
import scipy.io.wavfile
import jsonlines
import torch
from setka.pipes.Pipe import Pipe
def get_process_output(command):
if not isinstance(command, (list, tuple)):
command = command.split(' ')
process = Popen(command, stdout=PIPE, shell=True)
output, err = process.communicate()
exit_code = process.wait()
return exit_code, output.decode()
def check_list(path, masks):
for mask in masks:
if fnmatch.fnmatch(path, mask):
return False
return True
def collect_snapshot_list(command_root_dir, ignore_list, full_path=True):
results = []
for file in os.listdir(command_root_dir):
if check_list(file, ignore_list) and file[0] != '.':
if os.path.isdir(os.path.join(command_root_dir, file)):
for root, _, files in os.walk(os.path.join(command_root_dir, file)):
for sub_file in files:
if check_list(os.path.relpath(os.path.join(root, sub_file), command_root_dir), ignore_list):
results.append(os.path.join(root, sub_file))
else:
results.append(os.path.join(command_root_dir, file))
if full_path:
return results
else:
return [os.path.relpath(f, command_root_dir) for f in results]
class Logger(Pipe):
"""
This pipe saves all the information about training process of the
model. It is important the training process understanding and for the
experiment reproducibility.
The information is stored in the directory ```./logs/time```, where
time is a string representation of the timestamp when the experiment has
started.
During the initialization, the Logger creates all the necessary directories
for storing information. It also saves the bash command that has triggered
the Trainer creation in the text file called "bash_command.txt", it saves
all the contents of the directory from where the command was called in the
archive "snapshot.zip" (except for ```checkpoints```, ```logs```,
```predictions``` and ```runs``` directories). It creates the
directories ```./logs/<timestamp>/checkpoints```
and ```./logs/<timestamp>/predictions``` and saves paths to these directories
to the ```trainer._checkpoints_dir``` and ```trainer._predictions_dir```.
The following information is logged during the training process:
* loss value is saved after each batch in loss.txt file in the checkpoint
directory
* metrics values are stored in metrics.txt file (if metrics are specified)
in the checkpoint directory
* images, audios, figures, texts are stored in the corresponding directories
in the checkpoint directory if the processing function ```f``` is
specified.
Args:
f (callable): function for test samples visualization. If set to None, test will not be visualized.
name (str): name of the experiment (will be used as a a name of the log folder)
log_dir (str): path to the directory, where the logs are stored.
ignore_list (list of str): folders to not to include to the snapshot.
"""
def __init__(self, f=None, name='experiment', log_dir='runs', make_snapshot=True,
ignore_list=[
'*.zip*',
'*.pth*',
'*__pycache__*',
'*.ipynb_checkpoints*',
'*.jpg',
'*.jpeg',
'*.png',
'*.wav',
'*.mp4',
'*.bmp',
'*.mov',
'*.mp3',
'*.csv',
'*.txt',
'*.json',
'*.tar.gz',
'*.zip',
'*.gzip',
'*.7z',
'*.ipynb',
'*.coredump',
'*data*',
'logs/*',
'runs/*',
'core.*'],
full_snapshot_path=False, collect_environment=True):
super(Logger, self).__init__()
self.root_path = None
self.f = f
self.name = name
self.log_dir = log_dir
self.make_snapshot = make_snapshot
self.full_snapshot_path = full_snapshot_path
self.collect_environment = collect_environment
self.ignore_list = ignore_list
def on_init(self):
self.root_path = os.path.join(self.log_dir, self.name,
str(self.trainer.creation_time).replace(' ', '_').replace(':', '-'))
if not os.path.exists(self.root_path):
os.makedirs(self.root_path)
with open(os.path.join(self.root_path, 'bash_command.txt'), 'w+') as fout:
fout.write(' '.join(sys.argv))
if self.make_snapshot:
command_root_dir = os.getcwd()
with zipfile.ZipFile(os.path.join(self.root_path, 'snapshot.zip'), 'w') as snapshot:
snapshot_list = collect_snapshot_list(command_root_dir, self.ignore_list, self.full_snapshot_path)
for file in snapshot_list:
snapshot.write(file)
print('Made snapshot of size {:.2f} MB'.format(
os.path.getsize(os.path.join(self.root_path, 'snapshot.zip')) / (1024 * 1024)))
if self.collect_environment:
is_conda = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
if is_conda:
print('Collecting environment using conda...', end=' ')
code, res = get_process_output('conda env export')
else:
print('Collecting environment using pip...', end=' ')
code, res = get_process_output('pip list')
print('FAILED' if code != 0 else 'OK')
with open(os.path.join(self.root_path, 'environment.txt'), 'w') as f:
f.write(res)
predictions_dir = os.path.join(self.root_path, 'predictions')
if not os.path.exists(predictions_dir):
os.makedirs(predictions_dir)
# def before_epoch(self):
# """
# Dumps metrics to the log file.
# """
# if self.trainer._mode == 'train':
# with open(os.path.join(self.root_path, 'metrics.txt'), 'a+') as fout:
# if hasattr(self.trainer, '_metrics'):
# fout.write(str(self.trainer._epoch - 1) + '\t' + str(self.trainer._metrics) + '\n')
@staticmethod
def make_dirs(fname):
dir_name = os.sep.join(fname.split(os.sep)[:-1])
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def save_image(self, name, content, epoch, ext='png'):
fname = os.path.join(self.root_path, str(epoch) + '_' + name)
if len(fname.split(os.sep)[-1].split('.')) == 1:
fname = fname + '.' + ext
if len(content.shape) == 3:
content = content.swapaxes(0, 2).swapaxes(0, 1)
self.make_dirs(fname)
skimage.io.imsave(fname, content)
def save_text(self, name, content, epoch, ext='txt'):
fname = os.path.join(self.root_path, str(epoch) + '_' + name)
if len(fname.split(os.sep)[-1].split('.')) == 1:
fname = fname + '.' + ext
self.make_dirs(fname)
with open(fname, 'w+') as fout:
fout.write(content)
def save_audio(self, name, content, epoch, ext='wav'):
fname = os.path.join(self.root_path, str(epoch) + '_' + name)
if len(fname.split(os.sep)[-1].split('.')) == 1:
fname = fname + '.' + ext
self.make_dirs(fname)
scipy.io.wavfile.write(fname, 44100, content)
def save_figure(self, name, content, epoch, ext='png'):
fname = os.path.join(self.root_path, str(epoch) + '_' + name)
if len(fname.split(os.sep)[-1].split('.')) == 1:
fname = fname + '.' + ext
self.make_dirs(fname)
content.savefig(fname)
def save_file(self, name, content, epoch, ext='bin'):
fname = os.path.join(self.root_path, str(epoch) + '_' + name)
if len(fname.split(os.sep)[-1].split('.')) == 1:
fname = fname + '.' + ext
self.make_dirs(fname)
with open(fname, 'wb+') as fout:
# print(content)
content.seek(0)
fout.write(content.read())
def show(self, to_show, id):
type_writers = {
'images': self.save_image,
'texts': self.save_text,
'audios': self.save_audio,
'figures': self.save_figure,
'files': self.save_file
}
for type in type_writers:
if type in to_show:
for desc in to_show[type]:
kwargs = {
'name': os.path.join(type, str(id), desc),
'content': to_show[type][desc],
'epoch': str(self.trainer._epoch)
}
type_writers[type](**kwargs)
def after_batch(self):
"""
Writes the loss to the loss log (in case of train mode).
Also performs visualisation in case of test mode.
"""
if self.trainer._mode == 'train':
with jsonlines.open(os.path.join(self.root_path, 'batch_log.json'), 'a') as fout:
fout.write(self.trainer.status)
# fout.write(status)
# fout.write(str(self.trainer._epoch) + '\t' +
# str(self.trainer._loss.detach().cpu().item()) + '\n')
if self.trainer._mode == 'test' and (self.f is not None):
for index in range(len(self.trainer._ids)):
one_input = self.trainer.collection_op.split_index(self.trainer._input, index)[0]
one_output = self.trainer.collection_op.split_index(self.trainer._output, index)[0]
res = self.f(one_input, one_output)
id = self.trainer._ids[index]
self.show(res, id)
def after_epoch(self):
"""
Writes the trainer status to the log file.
"""
# line = ' '.join([str(k) + ': ' + str(v) for k, v in self.trainer.status.items()])
with jsonlines.open(os.path.join(self.root_path, 'epoch_log.json'), 'a') as fout:
# print(self.trainer.status)
fout.write(self.trainer.status)
# fout.write(line + '\n')
|
71067
|
from django import forms
from tally_ho.libs.permissions import groups
from tally_ho.apps.tally.models.tally import Tally
from tally_ho.apps.tally.models.user_profile import UserProfile
from tally_ho.libs.utils.form import lower_case_form_data
disable_copy_input = {
'onCopy': 'return false;',
'onDrag': 'return false;',
'onDrop': 'return false;',
'onPaste': 'return false;',
'autocomplete': 'off',
'class': 'form-control required'
}
class TallyForm(forms.ModelForm):
class Meta:
model = Tally
fields = ['name']
administrators = forms.ModelMultipleChoiceField(
queryset=UserProfile.objects.filter(
groups__name__exact=groups.SUPER_ADMINISTRATOR),
widget=forms.CheckboxSelectMultiple())
def __init__(self, *args, **kwargs):
if 'instance' in kwargs and kwargs['instance']:
initial = kwargs.setdefault('initial', {})
initial['administrators'] = [
admin.pk for admin in kwargs['instance'].administrators.all()]
super(TallyForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update({'class': 'form-control'})
def clean(self):
if self.is_valid():
lower_case_form_data(self, TallyForm, ['name'])
def save(self):
instance = forms.ModelForm.save(self)
instance.administrators.clear()
for admin in self.cleaned_data['administrators']:
instance.administrators.add(admin)
return instance
|
71104
|
from __future__ import print_function
import sys
import os
from glob import glob
import shutil
try:
import setuptools
except ImportError:
sys.stderr.write(
"Please install setuptools before running this script. Exiting.")
sys.exit(1)
from setuptools import setup, find_packages
# --------------------------------------------------------------------------- #
# Basic project information
# --------------------------------------------------------------------------- #
name = 'BGWpy'
description = 'Interface BerkeleyGW flows in python.'
license = 'BSD'
url = 'https://github.com/BerkeleyGW/BGWpy'
__version__ = '3.1.4'
# author and author_email should be a single string, not a list, but we can put
# multiple authors / emails by separating them by commas inside the string.
# If you contributed to this package, add your name and email. Don't be shy!
author = '<NAME>'
author_email = '<EMAIL>'
# Requirements
install_requires = [
'numpy >=1.6',
'pymatgen >=2020',
]
# --------------------------------------------------------------------------- #
# Helper functions
# --------------------------------------------------------------------------- #
def find_package_data():
package_data={'BGWpy': ['data/structures/*', 'data/pseudos/*']}
return package_data
def find_data_files():
return [('config', ['config/BGWpyrc'])]
def find_scripts():
scripts = []
scripts.extend(glob(os.path.join('BGWpy', 'scripts', "*.py")))
return scripts
def cleanup():
print('Cleaning up')
shutil.rmtree('BGWpy.egg-info', ignore_errors=True)
shutil.rmtree('build', ignore_errors=True)
shutil.rmtree('__pycache__', ignore_errors=True)
# --------------------------------------------------------------------------- #
# Setup
# --------------------------------------------------------------------------- #
setup_args = dict(
name = name,
version = __version__,
description = description,
author = author,
author_email = author_email,
license = license,
url = url,
install_requires = install_requires,
packages = find_packages(),
package_data = find_package_data(),
data_files = find_data_files(),
scripts = find_scripts(),
)
if __name__ == "__main__":
setup(**setup_args)
|
71113
|
import json, re, select, random, traceback, urllib, datetime, base64
import asyncio, aiohttp
# The core codes for YouTube support are basically from taizan-hokuto/pytchat
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36",
}
class Youtube:
q = None
url = ""
vid = ""
ctn = ""
client = None
stop = False
@classmethod
async def run(cls, url, q, client, **kargs):
from .paramgen import liveparam
cls.q = q
cls.url = url
cls.client = client
cls.stop = False
cls.key = "<KEY>
await cls.get_url()
while cls.stop == False:
try:
await cls.get_room_info()
cls.ctn = liveparam.getparam(cls.vid, cls.cid, 1)
await cls.get_chat()
except:
traceback.print_exc()
await asyncio.sleep(1)
@classmethod
async def stop(cls):
cls.stop == True
@classmethod
async def get_url(cls):
a = re.search(r"youtube.com/channel/([^/?]+)", cls.url)
try:
cid = a.group(1)
cls.cid = cid
cls.url = f"https://www.youtube.com/channel/{cid}/videos"
except:
a = re.search(r"youtube.com/watch\?v=([^/?]+)", cls.url)
async with cls.client.request(
"get", f"https://www.youtube.com/embed/{a.group(1)}"
) as resp:
b = re.search(r'\\"channelId\\":\\"(.{24})\\"', await resp.text())
cls.cid = b.group(1)
cls.url = f"https://www.youtube.com/channel/{cls.cid}/videos"
@classmethod
async def get_room_info(cls):
async with cls.client.request("get", cls.url) as resp:
t = re.search(
r'"gridVideoRenderer"((.(?!"gridVideoRenderer"))(?!"style":"UPCOMING"))+"label":"(LIVE|LIVE NOW|PREMIERING NOW)"([\s\S](?!"style":"UPCOMING"))+?("gridVideoRenderer"|</script>)',
await resp.text(),
).group(0)
cls.vid = re.search(r'"gridVideoRenderer".+?"videoId":"(.+?)"', t).group(1)
# print(cls.vid)
@classmethod
async def get_chat_single(cls):
msgs = []
data = {
"context": {
"client": {
"visitorData": "",
"userAgent": headers["user-agent"],
"clientName": "WEB",
"clientVersion": "".join(
(
"2.",
(datetime.datetime.today() - datetime.timedelta(days=1)).strftime(
"%Y%m%d"
),
".01.00",
)
),
},
},
"continuation": cls.ctn,
}
u = f'https://www.youtube.com/{base64.b64decode(cls.key).decode("utf-8")}'
async with cls.client.request("post", u, headers=headers, json=data) as resp:
# print(await resp.text())
j = await resp.json()
j = j["continuationContents"]
cont = j["liveChatContinuation"]["continuations"][0]
if cont is None:
raise Exception("No Continuation")
metadata = (
cont.get("invalidationContinuationData")
or cont.get("timedContinuationData")
or cont.get("reloadContinuationData")
or cont.get("liveChatReplayContinuationData")
)
cls.ctn = metadata["continuation"]
# print(j['liveChatContinuation'].get('actions'))
for action in j["liveChatContinuation"].get("actions", []):
try:
renderer = action["addChatItemAction"]["item"]["liveChatTextMessageRenderer"]
msg = {}
msg["name"] = renderer["authorName"]["simpleText"]
message = ""
runs = renderer["message"].get("runs")
for r in runs:
if r.get("emoji"):
message += r["emoji"].get("shortcuts", [""])[0]
else:
message += r.get("text", "")
msg["content"] = message
msg["msg_type"] = "danmaku"
msgs.append(msg)
except:
pass
return msgs
@classmethod
async def get_chat(cls):
while cls.stop == False:
ms = await cls.get_chat_single()
if len(ms) != 0:
interval = 1 / len(ms)
else:
await asyncio.sleep(1)
for m in ms:
await cls.q.put(m)
await asyncio.sleep(interval)
|
71120
|
import math;
import decimal;
import sys
def trunc(f):
if f == 0:
return '0.0';
d = decimal.Decimal(f)
if type(d)(int(d)) == d:
return str(d) + '.0';
slen = len('%.*f' % (20, d))
return str(d)[:slen]
# min, max: values; lower, upper: domain
def interpolate(minV, maxV, lower, upper, varname):
fact = (maxV-minV) / (upper-lower);
return "" + trunc(minV) + " + (" + trunc(fact) + " * (" + varname + " - " + trunc(lower) + "))";
def ite(ifCond, thenExpr, elseExpr):
return "if (" + ifCond + ") then " + thenExpr + " else " + elseExpr;
def iteBlock(dom, code, varname):
max = len(dom)-1;
block = code[max];
for i in range(len(dom)-1):
cond = "" + trunc(dom[max-i-1]) + "<=" + varname + " and " + varname + "<=" + trunc(dom[max-i]);
block = ite(cond, code[max-i-1], block);
if (i<max-1):
block = "\n " + block;
return block;
def itpBlock(dom, vals, lastElseExpr, varname):
block = lastElseExpr;
max = len(dom)-1;
code = [lastElseExpr];
for i in range(len(dom)-1):
itp = interpolate(vals[max-i-1], vals[max-i], dom[max-i-1], dom[max-i], varname);
code.append(itp);
code.reverse();
return iteBlock(dom, code, varname);
def funcNode(name, inputDecl, code, outputName, outputType):
node = "node " + name + "(" + inputDecl + ") returns (" + outputName + ":" + outputType + ");";
node = node + "\nlet\n";
node = node + outputName + " = " + code + ";";
node = node + "\ntel\n";
return node;
def interval(minD, maxD, precision):
dom = [];
width = float(maxD) - float(minD);
step = width / (float(precision) - 1.0);
dval = float(minD);
for i in range(0, precision):
dom.append(dval);
# print dval;
dval += step;
return dom;
# generates an interval with exponentially
# decreasing resolution (2^) from minD to
# maxD around center.
#def intervalExponential(minD, maxD, center):
#def intervalExponential(center):
# return intervalExponential(sys.float_info.min, sys.float_info.max, center);
def toRadians(interval):
for i in range(0, len(interval)):
interval[i] = math.radians(interval[i]);
return interval;
def sinLookupNode(precision):
dom = toRadians( interval(0, 90, precision) );
val = [];
for i in range(0, precision):
val.append( math.sin(dom[i]) );
code = itpBlock(dom, val, "-1.0", "x");
return funcNode("__sin", "x:real", code, "out", "real");
def tanLookupNode(precision):
dom = toRadians( interval(0, 90, precision) );
val = [];
for i in range(0, precision):
val.append( math.tan(dom[i]) );
code = itpBlock(dom, val, "-1.0", "x");
return funcNode("__tan", "x:real", code, "out", "real");
def asinLookupNode(precision):
dom = toRadians( interval(0, 90, precision) );
val = [];
for i in range(0, precision):
dom[i] = math.sin(dom[i]);
val.append( math.asin(dom[i]) );
code = itpBlock(dom, val, "-1.0", "x");
return funcNode("__asin", "x:real", code, "out", "real");
def sinNode():
dom = toRadians( interval(0, 360, 5) );
val = ["__sin(x)", "__sin(" + trunc(math.pi) + "-x)", "-(__sin(x - "+ trunc(math.pi)+"))", "-(__sin(" + trunc(math.pi*2.0) + " - x)) ", "-1.0"];
code = iteBlock(dom, val, "x")
return funcNode("zsin", "x:real", code, "out", "real");
def tanNode():
dom = toRadians( interval(0, 360, 5) );
val = ["__tan(x)", "-(__tan(" + trunc(math.pi) + "-x))", "__tan(x - "+ trunc(math.pi)+")", "-(__tan(" + trunc(math.pi*2.0) + " - x)) ", "-1.0"];
code = iteBlock(dom, val, "x")
return funcNode("ztan", "x:real", code, "out", "real");
def cosNode():
dom = [0.0, math.pi/2.0*3.0, math.pi*2.0];
val = ["zsin(" + trunc(math.pi/2.0) + " + x)", "zsin(x - " + trunc(math.pi/2.0*3.0) + ")", "-1.0"];
code = iteBlock(dom, val, "x")
return funcNode("zcos", "x:real", code, "out", "real");
def asinNode():
dom = [-1.0, 0.0, 1.0];
val = ["-(__asin(-x))", "__asin(x)", "-1.0"];
code = iteBlock(dom, val, "x")
return funcNode("zasin", "x:real", code, "out", "real");
def acosNode():
return funcNode("zacos", "x:real", "" + trunc(math.pi/2.0) + "-zasin(x)" , "out", "real");
def atanLookupNode(precision):
precision -= 1;
dom = toRadians( interval(0, 360, precision) );
dom.append(math.pi*512.0)
precision += 1;
val = [];
for i in range(0, precision):
val.append( math.atan(dom[i]) );
code = itpBlock(dom, val, trunc(math.pi/2.0), "x");
return funcNode("__atan", "x:real", code, "out", "real");
def atanNode():
code = ite("x>=0.0", "__atan(x)", "-(__atan(-x))");
return funcNode("zatan", "x:real", code, "out", "real");
# FH: maybe the divisions here are problematic?
#
def atan2Node():
x_less_0 = ite("y>=0.0", "zatan(y/x) + " + trunc(math.pi), "zatan(y/x) - " + trunc(math.pi));
y_leq_0 = ite("y<0.0", trunc(math.pi/-2.0), trunc(math.atan2(0,0)) );
x_eq_0 = ite("y>0.0", trunc(math.pi/2.0), "\n " + y_leq_0);
code = ite("x<0.0", "\n " + x_less_0, "\n " + x_eq_0)
code = ite("x>0.0", "zatan(y/x)", "\n " + code);
return funcNode("zatan2", "y:real; x:real", code, "out", "real");
def cos():
print sinLookupNode(11);
print sinNode();
print cosNode();
return
def sin():
print sinLookupNode(11);
print sinNode();
def tan():
print tanLookupNode(11);
print tanNode();
def acos():
print asinLookupNode(11);
print asinNode();
print acosNode();
def asin():
print asinLookupNode(11);
print asinNode();
def atan():
print atanLookupNode(11);
print atanNode();
def acos():
print asinLookupNode(11);
print asinNode();
print acosNode();
def asin():
print asinLookupNode(11);
print asinNode();
def atan():
print atanLookupNode(11);
print atanNode();
def atan2():
print atanLookupNode(11);
print atanNode();
print atan2Node();
def acos():
print asinLookupNode(11);
print asinNode();
print acosNode();
def asin():
print asinLookupNode(11);
print asinNode();
def atan():
print atanLookupNode(11);
print atanNode();
def parseArgs(argv):
import argparse as arg
p = arg.ArgumentParser (description='\t Generate Math functions')
p.add_argument ('--trig', help='Trig function', dest='trig', nargs="*", required=False, default = [])
pars = p.parse_args(argv)
return pars
if __name__ == "__main__":
#args = parseArgs(sys.argv[1:])
#for t in args.trig:
# try:
# if t=="cos": cos()
# if t=="sin": sin()
# if t=="tan": tan()
# if t=="asin": asin()
# if t=="acos": acos()
# if t=="atan": atan()
# if t=="atan2": atan2()
# except Exception as e:
# print e
print sinLookupNode(11);
print sinNode();
print cosNode();
print asinLookupNode(11);
print asinNode();
print acosNode();
print atanLookupNode(11);
print atanNode();
print atan2Node();
print tanLookupNode(11);
print tanNode();
|
71128
|
import pytest
from redis.exceptions import RedisError
from rq.exceptions import NoSuchJobError
from busy_beaver.models import Task, PostGitHubSummaryTask, PostTweetTask
MODULE_TO_TEST = "busy_beaver.models.task"
###########
# Base Task
###########
def test_create_task(session):
# Arrange
task = Task(
job_id="abcd",
name="task_created_for_test",
description="Task created for testing purposes",
)
# Act
session.add(task)
session.commit()
# Assert
assert task.job_id == "abcd"
assert task.complete is False
assert task.failed is False
def add(x, y):
return x + y
def test_run_async_task_update_progress(app, rq, session):
# Arrange
rq.job(add)
job = add.queue(5, 2)
job.meta["progress"] = 100
job.save_meta()
# Act
queued_task = Task(job_id=job.id, name="Add", description="Add task")
session.add(queued_task)
session.commit()
# Assert
assert queued_task.get_progress() == 100
def test_run_async_task_get_job_from_task(app, rq, session):
# Arrange
rq.job(add)
job = add.queue(5, 2)
queued_task = Task(job_id=job.id, name="Add", description="Add task")
session.add(queued_task)
session.commit()
# Act
retrieved_job = queued_task.get_rq_job()
# Assert
assert retrieved_job.id == job.id
@pytest.fixture
def patched_rq(patcher):
def _wrapper(replacement):
return patcher(MODULE_TO_TEST, namespace="Job", replacement=replacement)
return _wrapper
@pytest.mark.parametrize("raise_exc", [RedisError, NoSuchJobError])
def test_task_model_get_job_raises_exception(app, rq, session, patched_rq, raise_exc):
# Arrange
class FakeJob:
def __init__(self, error):
self.error = error
def fetch(self, *args, **kwargs):
raise self.error
patched_rq(FakeJob(raise_exc))
rq.job(add)
job = add.queue(5, 2)
queued_task = Task(job_id=job.id, name="Add", description="Add task")
session.add(queued_task)
session.commit()
# Act
retrieved_job = queued_task.get_rq_job()
# Assert
assert retrieved_job is None
#####################
# GitHub Summary Task
#####################
def test_post_github_summary_task(session):
# Arrange
channel_name = "test-channel"
task = PostGitHubSummaryTask(
job_id="abcd",
name="task_created_for_test",
description="Task created for testing purposes",
data={"channel_name": channel_name},
)
# Act
session.add(task)
session.commit()
# Assert
assert task.job_id == "abcd"
assert task.complete is False
assert task.failed is False
assert task.data["channel_name"] == channel_name
#################
# Post Tweet Task
#################
def test_post_tweet_task(session):
# Arrange
channel_name = "test-channel"
task = PostTweetTask(
job_id="abcd",
name="task_created_for_test",
description="Task created for testing purposes",
data={"channel_name": channel_name},
)
# Act
session.add(task)
session.commit()
# Assert
assert task.job_id == "abcd"
assert task.complete is False
assert task.failed is False
assert task.data["channel_name"] == channel_name
|
71152
|
import glob
from setuptools import setup
setup(
name='edxcut',
version='0.4',
author='<NAME> and <NAME>',
author_email='<EMAIL>',
packages=['edxcut'],
scripts=[],
url='http://pypi.python.org/pypi/edxcut/',
license='LICENSE.txt',
description='edX course unit tester',
long_description=open('README.md').read(),
include_package_data=True,
entry_points={
'console_scripts': [
'edxcut = edxcut.main:CommandLine',
],
},
install_requires=['lxml',
'requests',
'pyyaml',
'pytest',
'pysrt',
],
package_dir={'edxcut': 'edxcut'},
package_data={},
# data_files = data_files,
# test_suite="edxcut.test",
)
|
71202
|
import random
from ananas import PineappleBot, ConfigurationError, hourly, reply
def make_gram(word_array):
return " ".join(word_array)
class NGramTextModel():
def __init__(self, n, lines):
self.n = n
self.gram_dictionary = dict()
self.build_from_lines(lines)
def build_from_lines(self, lines):
for line in lines:
line = line.replace("\r", " ")
line = line.replace("\n", " ")
line_arr = ["^"] * self.n + [word.strip() for word in line.split()] + ["$"] * self.n
for i in range(self.n, len(line_arr)):
gram = make_gram(line_arr[i - self.n : i])
word = line_arr[i]
if (gram not in self.gram_dictionary):
self.gram_dictionary[gram] = []
self.gram_dictionary[gram].append(word)
def generate_sentence(self):
sentence = self.n*["^"]
next_gram = sentence[-self.n : ]
while(next_gram != ["$"]*self.n):
try:
word_suggestion = random.choice(self.gram_dictionary[make_gram(next_gram)])
sentence += [word_suggestion]
except IndexError:
break
next_gram = sentence[-self.n : ]
return " ".join(sentence[self.n : -self.n])
class MarkovBot(PineappleBot):
def init():
self.config.n = 2
def start():
if "corpus" not in self.config: raise ConfigurationError("MarkovBot requires a 'corpus'")
with open(self.config.grammar_file, "r") as f:
if f: self.model = NGramTextModel(self.config.n, f.lines())
else: raise ConfigurationError("Couldn't open corpus file")
@reply
def reply(self, mention, user):
self.mastodon.status_post("@{} {}".format(user["acct"],
self.model.generate_sentence()),
in_reply_to_id = mention["id"],
visibility = mention["visibility"])
@hourly()
def post(self):
self.mastodon.toot(self.model.generate_sentence())
|
71245
|
from social_auth.backends import PIPELINE
from social_auth.utils import setting
def save_status_to_session(request, auth, pipeline_index, *args, **kwargs):
"""Saves current social-auth status to session."""
next_entry = setting('SOCIAL_AUTH_PIPELINE_RESUME_ENTRY')
if next_entry and next_entry in PIPELINE:
idx = PIPELINE.index(next_entry)
else:
idx = pipeline_index + 1
data = auth.to_session_dict(idx, *args, **kwargs)
name = setting('SOCIAL_AUTH_PARTIAL_PIPELINE_KEY', 'partial_pipeline')
request.session[name] = data
|
71269
|
from __future__ import print_function
import torch
import torch.optim as optim
from data.data_loader import CreateDataLoader
import tqdm
import cv2
import yaml
from schedulers import WarmRestart, LinearDecay
import numpy as np
from models.networks import get_nets
from models.losses import get_loss
from models.models import get_model
from tensorboardX import SummaryWriter
import logging
logging.basicConfig(filename='res.log',level=logging.DEBUG)
writer = SummaryWriter('res_runs')
REPORT_EACH = 100
torch.backends.cudnn.bencmark = True
cv2.setNumThreads(0)
class Trainer:
def __init__(self, config):
self.config = config
self.train_dataset = self._get_dataset(config, 'train')
self.val_dataset = self._get_dataset(config, 'test')
self.best_metric = 0
self.warmup_epochs = config['warmup_num']
def train(self):
self._init_params()
for epoch in range(0, config['num_epochs']):
if (epoch == self.warmup_epochs) and not(self.warmup_epochs == 0):
self.netG.module.unfreeze()
self.optimizer_G = self._get_optim(self.netG, self.config['optimizer']['lr_G'])
self.scheduler_G = self._get_scheduler(self.optimizer_G)
train_loss = self._run_epoch(epoch)
val_loss, val_psnr = self._validate(epoch)
self.scheduler_G.step()
val_metric = val_psnr
if val_metric > self.best_metric:
self.best_metric = val_metric
torch.save({
'model': self.netG.state_dict()
}, 'best_{}.h5'.format(self.config['experiment_desc']))
torch.save({
'model': self.netG.state_dict()
}, 'last_{}.h5'.format(self.config['experiment_desc']))
print(('val_loss={}, val_metric={}, best_metric={}\n'.format(val_loss, val_metric, self.best_metric)))
logging.debug("Experiment Name: %s, Epoch: %d, Train Loss: %.3f, Val Accuracy: %.3f, Val Loss: %.3f, Best Loss: %.3f" % (
self.config['experiment_desc'], epoch, train_loss, val_loss, val_metric, self.best_metric))
def _run_epoch(self, epoch):
self.netG = self.netG.train()
losses_G = []
losses_vgg = []
losses_adv = []
psnrs = []
ssim = []
batches_per_epoch = len(self.train_dataset) / config['batch_size']
for param_group in self.optimizer_G.param_groups:
lr = param_group['lr']
tq = tqdm.tqdm(self.train_dataset.dataloader)
tq.set_description('Epoch {}, lr {}'.format(epoch, lr))
i = 0
for data in tq:
inputs, targets = self.model.get_input(data)
outputs = self.netG(inputs)
for _ in range(config['D_update_ratio']):
self.optimizer_D.zero_grad()
loss_D = config['loss']['adv'] * self.criterionD(self.netD, outputs, targets)
loss_D.backward(retain_graph=True)
self.optimizer_D.step()
self.optimizer_G.zero_grad()
loss_content = self.criterionG(outputs, targets)
loss_adv = self.criterionD.get_g_loss(self.netD, outputs)
loss_G = loss_content + config['loss']['adv'] * loss_adv
loss_G.backward()
self.optimizer_G.step()
losses_G.append(loss_G.item())
losses_vgg.append(loss_content.item())
losses_adv.append(loss_adv.item())
curr_psnr, curr_ssim = self.model.get_acc(outputs, targets)
psnrs.append(curr_psnr)
ssim.append(curr_ssim)
mean_loss_G = np.mean(losses_G[-REPORT_EACH:])
mean_loss_vgg = np.mean(losses_vgg[-REPORT_EACH:])
mean_loss_adv = np.mean(losses_adv[-REPORT_EACH:])
mean_psnr = np.mean(psnrs[-REPORT_EACH:])
mean_ssim = np.mean(ssim[-REPORT_EACH:])
if i % 100 == 0:
writer.add_scalar('Train_G_Loss', mean_loss_G, i + (batches_per_epoch * epoch))
writer.add_scalar('Train_G_Loss_vgg', mean_loss_vgg, i + (batches_per_epoch * epoch))
writer.add_scalar('Train_G_Loss_adv', mean_loss_adv, i + (batches_per_epoch * epoch))
writer.add_scalar('Train_PSNR', mean_psnr, i + (batches_per_epoch * epoch))
writer.add_scalar('Train_SSIM', mean_ssim, i + (batches_per_epoch * epoch))
writer.add_image('output', outputs)
writer.add_image('target', targets)
self.model.visualize_data(writer, data, i + (batches_per_epoch * epoch))
tq.set_postfix(loss=self.model.get_loss(mean_loss_G, mean_psnr, mean_ssim))
i += 1
tq.close()
return np.mean(losses_G)
def _validate(self, epoch):
self.netG = self.netG.eval()
losses = []
psnrs = []
ssim = []
tq = tqdm.tqdm(self.val_dataset.dataloader)
tq.set_description('Validation')
for data in tq:
inputs, targets = self.model.get_input(data)
outputs = self.netG(inputs)
loss_content = self.criterionG(outputs, targets)
loss_G = loss_content + config['loss']['adv'] * self.criterionD.get_g_loss(self.netD, outputs)
losses.append(loss_G.item())
curr_psnr, curr_ssim = self.model.get_acc(outputs, targets, full=True)
psnrs.append(curr_psnr)
ssim.append(curr_ssim)
val_loss = np.mean(losses)
val_psnr = np.mean(psnrs)
val_ssim = np.mean(ssim)
tq.close()
writer.add_scalar('Validation_Loss', val_loss, epoch)
writer.add_scalar('Validation_PSNR', val_psnr, epoch)
writer.add_scalar('Validation_SSIM', val_ssim, epoch)
writer.add_image('output', outputs)
writer.add_image('target', targets)
return val_loss, val_psnr
def _get_dataset(self, config, filename):
data_loader = CreateDataLoader(config, filename)
return data_loader.load_data()
def _get_optim(self, model, lr):
if self.config['optimizer']['name'] == 'adam':
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
elif self.config['optimizer']['name'] == 'sgd':
optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
elif self.config['optimizer']['name'] == 'adadelta':
optimizer = optim.Adadelta(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
else:
raise ValueError("Optimizer [%s] not recognized." % self.config['optimizer']['name'])
return optimizer
def _get_scheduler(self, optimizer):
if self.config['scheduler']['name'] == 'plateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
patience=self.config['scheduler']['patience'],
factor=self.config['scheduler']['factor'],
min_lr=self.config['scheduler']['min_lr'])
elif self.config['optimizer']['name'] == 'sgdr':
scheduler = WarmRestart(optimizer)
elif self.config['scheduler']['name'] == 'linear':
scheduler = LinearDecay(optimizer,
min_lr=self.config['scheduler']['min_lr'],
num_epochs=self.config['num_epochs'],
start_epoch=self.config['scheduler']['start_epoch'])
else:
raise ValueError("Scheduler [%s] not recognized." % self.config['scheduler']['name'])
return scheduler
def _init_params(self):
self.netG, self.netD = get_nets(self.config['model'])
self.netG.cuda()
self.netD.cuda()
self.model = get_model(self.config['model'])
self.criterionG, self.criterionD = get_loss(self.config['model'])
self.optimizer_G = self._get_optim(self.netG, self.config['optimizer']['lr_G'])
self.optimizer_D = self._get_optim(self.netD, self.config['optimizer']['lr_D'])
self.scheduler_G = self._get_scheduler(self.optimizer_G)
self.scheduler_D = self._get_scheduler(self.optimizer_D)
if __name__ == '__main__':
with open('config/deblur_solver.yaml', 'r') as f:
config = yaml.load(f)
trainer = Trainer(config)
trainer.train()
|
71284
|
from __future__ import absolute_import
import sys
import copy
import operator
from functools import reduce
from sqlbuilder.smartsql.compiler import compile
from sqlbuilder.smartsql.constants import CONTEXT, PLACEHOLDER, MAX_PRECEDENCE
from sqlbuilder.smartsql.exceptions import MaxLengthError
from sqlbuilder.smartsql.pycompat import string_types
from sqlbuilder.smartsql.utils import Undef, is_list, warn
__all__ = (
'Operable', 'Expr', 'ExprList', 'CompositeExpr', 'Param', 'Parentheses', 'OmitParentheses',
'Callable', 'NamedCallable', 'Constant', 'ConstantSpace', 'Case', 'Cast', 'Concat',
'Alias', 'Name', 'NameCompiler', 'Value', 'ValueCompiler', 'Array', 'ArrayItem',
'expr_repr', 'datatypeof', 'const', 'func'
)
SPACE = " "
@compile.when(object)
def compile_object(compile, expr, state):
state.sql.append(PLACEHOLDER)
state.params.append(expr)
@compile.when(type(None))
def compile_none(compile, expr, state):
state.sql.append('NULL')
@compile.when(slice)
def compile_slice(compile, expr, state):
# FIXME: Should be here numrange()? Looks like not, see http://initd.org/psycopg/docs/extras.html#adapt-range
state.sql.append("[")
state.sql.append("{0:d}".format(expr.start))
if expr.stop is not None:
state.sql.append(", ")
state.sql.append("{0:d}".format(expr.stop))
state.sql.append("]")
@compile.when(list)
@compile.when(tuple)
def compile_list(compile, expr, state):
compile(Parentheses(ExprList(*expr).join(", ")), state)
class Operable(object):
__slots__ = ('_datatype', '__weakref__')
def __init__(self, datatype=None):
if datatype is None:
from sqlbuilder.smartsql.datatypes import BaseType
datatype = BaseType
self._datatype = datatype
def __getattr__(self, name):
"""Use in derived classes:
try:
return Operable.__getattr__(self, key)
except AttributeError:
return derived_logic()
"""
if name.startswith('__'): # All allowed special method already defined.
raise AttributeError
delegate = self._datatype(self)
return getattr(delegate, name)
__hash__ = object.__hash__
def __add__(self, other):
return self._datatype(self).__add__(other)
def __radd__(self, other):
return self._datatype(self).__radd__(other)
def __sub__(self, other):
return self._datatype(self).__sub__(other)
def __rsub__(self, other):
return self._datatype(self).__rsub__(other)
def __mul__(self, other):
return self._datatype(self).__mul__(other)
def __rmul__(self, other):
return self._datatype(self).__rmul__(other)
def __div__(self, other):
return self._datatype(self).__div__(other)
def __rdiv__(self, other):
return self._datatype(self).__rdiv__(other)
def __truediv__(self, other):
return self._datatype(self).__truediv__(other)
def __rtruediv__(self, other):
return self._datatype(self).__rtruediv__(other)
def __floordiv__(self, other):
return self._datatype(self).__floordiv__(other)
def __rfloordiv__(self, other):
return self._datatype(self).__rfloordiv__(other)
def __and__(self, other):
return self._datatype(self).__and__(other)
def __rand__(self, other):
return self._datatype(self).__rand__(other)
def __or__(self, other):
return self._datatype(self).__or__(other)
def __ror__(self, other):
return self._datatype(self).__ror__(other)
def __gt__(self, other):
return self._datatype(self).__gt__(other)
def __lt__(self, other):
return self._datatype(self).__lt__(other)
def __ge__(self, other):
return self._datatype(self).__ge__(other)
def __le__(self, other):
return self._datatype(self).__le__(other)
def __eq__(self, other):
return self._datatype(self).__eq__(other)
def __ne__(self, other):
return self._datatype(self).__ne__(other)
def __rshift__(self, other):
return self._datatype(self).__rshift__(other)
def __rrshift__(self, other):
return self._datatype(self).__rshift__(other)
def __lshift__(self, other):
return self._datatype(self).__lshift__(other)
def __rlshift__(self, other):
return self._datatype(self).__lshift__(other)
def __pos__(self):
return self._datatype(self).__pos__()
def __neg__(self):
return self._datatype(self).__neg__()
def __invert__(self):
return self._datatype(self).__invert__()
def __pow__(self, other):
return self._datatype(self).__pow__(other)
def __rpow__(self, other):
return self._datatype(self).__rpow__(other)
def __mod__(self, other):
return self._datatype(self).__mod__(other)
def __rmod__(self, other):
return self._datatype(self).__rmod__(other)
def __abs__(self):
return self._datatype(self).__abs__()
def __getitem__(self, key):
return self._datatype(self).__getitem__(key)
class Expr(Operable):
__slots__ = ('sql', 'params')
def __init__(self, sql, *params, **kwargs):
Operable.__init__(self, kwargs.get('datatype'))
if params and is_list(params[0]):
self.__init__(sql, *params[0])
return
self.sql, self.params = sql, params
def __repr__(self):
return expr_repr(self)
@compile.when(Expr)
def compile_expr(compile, expr, state):
state.sql.append(expr.sql)
state.params += expr.params
class ExprList(Expr):
__slots__ = ('data', )
def __init__(self, *args):
# if args and is_list(args[0]):
# self.__init__(*args[0])
# return
Expr.__init__(self, ' ')
self.data = list(args)
def join(self, sep):
self.sql = sep
return self
def __len__(self):
return len(self.data)
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
if isinstance(key, slice):
start = key.start or 0
end = key.stop or sys.maxsize
return ExprList(*self.data[start:end])
return self.data[key]
def __iter__(self):
return iter(self.data)
def append(self, x):
return self.data.append(x)
def insert(self, i, x):
return self.data.insert(i, x)
def extend(self, l):
return self.data.extend(l)
def pop(self, i):
return self.data.pop(i)
def remove(self, x):
return self.data.remove(x)
def reset(self):
del self.data[:]
return self
def __copy__(self):
dup = copy.copy(super(ExprList, self))
dup.data = dup.data[:]
return dup
@compile.when(ExprList)
def compile_exprlist(compile, expr, state):
first = True
for a in expr:
if first:
first = False
else:
state.sql.append(expr.sql)
compile(a, state)
class CompositeExpr(object):
__slots__ = ('data', 'sql')
def __init__(self, *args):
self.data = args
self.sql = ", "
def as_(self, aliases):
return self.__class__(*(expr.as_(alias) for expr, alias in zip(self.data, aliases)))
def in_(self, composite_others):
return self._op_list(operator.eq, composite_others)
def not_in(self, composite_others):
return ~self._op_list(operator.eq, composite_others)
def _op_list(self, op, composite_others):
return reduce(operator.or_, (self._op(op, composite_other) for composite_other in composite_others))
def _op(self, op, composite_other):
return reduce(operator.and_, (op(expr, val) for (expr, val) in zip(self.data, composite_other)))
def __eq__(self, composite_other):
return self._op(operator.eq, composite_other)
def __ne__(self, composite_other):
return self._op(operator.ne, composite_other)
def __iter__(self):
return iter(self.data)
def __repr__(self):
return expr_repr(self)
@compile.when(CompositeExpr)
def compile_compositeexpr(compile, expr, state):
compile_exprlist(compile, expr, state)
class Param(Expr):
__slots__ = ()
def __init__(self, params):
Operable.__init__(self)
self.params = params
@compile.when(Param)
def compile_param(compile, expr, state):
compile(expr.params, state)
class Parentheses(Expr):
__slots__ = ('expr', )
def __init__(self, expr):
Operable.__init__(self)
self.expr = expr
@compile.when(Parentheses)
def compile_parentheses(compile, expr, state):
state.precedence += MAX_PRECEDENCE
compile(expr.expr, state)
class OmitParentheses(Parentheses):
pass
@compile.when(OmitParentheses)
def compile_omitparentheses(compile, expr, state):
state.precedence = 0
compile(expr.expr, state)
class Callable(Expr):
__slots__ = ('expr', 'args')
def __init__(self, expr, *args):
Operable.__init__(self)
self.expr = expr
self.args = ExprList(*args).join(", ")
@compile.when(Callable)
def compile_callable(compile, expr, state):
compile(expr.expr, state)
state.sql.append('(')
compile(expr.args, state)
state.sql.append(')')
class NamedCallable(Callable):
__slots__ = ()
def __init__(self, *args):
Operable.__init__(self)
self.args = ExprList(*args).join(", ")
@compile.when(NamedCallable)
def compile_namedcallable(compile, expr, state):
state.sql.append(expr.sql)
state.sql.append('(')
compile(expr.args, state)
state.sql.append(')')
class Constant(Expr):
__slots__ = ()
def __init__(self, const):
Expr.__init__(self, const.upper())
def __call__(self, *args):
return Callable(self, *args)
@compile.when(Constant)
def compile_constant(compile, expr, state):
state.sql.append(expr.sql)
class ConstantSpace(object):
__slots__ = ()
def __getattr__(self, attr):
return Constant(attr)
class Case(Expr):
__slots__ = ('cases', 'expr', 'default')
def __init__(self, cases, expr=Undef, default=Undef):
Operable.__init__(self)
self.cases = cases
self.expr = expr
self.default = default
@compile.when(Case)
def compile_case(compile, expr, state):
state.sql.append('CASE')
if expr.expr is not Undef:
state.sql.append(SPACE)
compile(expr.expr, state)
for clause, value in expr.cases:
state.sql.append(' WHEN ')
compile(clause, state)
state.sql.append(' THEN ')
compile(value, state)
if expr.default is not Undef:
state.sql.append(' ELSE ')
compile(expr.default, state)
state.sql.append(' END ')
class Cast(NamedCallable):
__slots__ = ("expr", "type",)
sql = "CAST"
def __init__(self, expr, type):
Operable.__init__(self)
self.expr = expr
self.type = type
@compile.when(Cast)
def compile_cast(compile, expr, state):
state.sql.append(expr.sql)
state.sql.append('(')
compile(expr.expr, state)
state.sql.append(' AS ')
state.sql.append(expr.type)
state.sql.append(')')
class Concat(ExprList):
__slots__ = ('_ws', )
def __init__(self, *args):
super(Concat, self).__init__(*args)
self.sql = ' || '
self._ws = None
def ws(self, sep=None):
if sep is None:
return self._ws
self._ws = sep
self.sql = ', '
return self
@compile.when(Concat)
def compile_concat(compile, expr, state):
if not expr.ws():
return compile_exprlist(compile, expr, state)
state.sql.append('concat_ws(')
compile(expr.ws(), state)
for a in expr:
state.sql.append(expr.sql)
compile(a, state)
state.sql.append(')')
class Alias(Expr):
__slots__ = ('expr', 'sql')
def __init__(self, expr=Undef, name=Undef):
if isinstance(expr, string_types):
warn('Alias(alias, expr)', 'Alias(name, expr)')
expr, name = name, expr
self.expr = expr
if isinstance(name, string_types):
name = Name(name)
super(Alias, self).__init__(name)
@compile.when(Alias)
def compile_alias(compile, expr, state):
if state.context == CONTEXT.FIELD:
compile(expr.expr, state)
state.sql.append(' AS ')
compile(expr.sql, state)
class Name(object):
__slots__ = ('name', )
def __init__(self, name=None):
self.name = name
def __repr__(self):
return expr_repr(self)
class NameCompiler(object):
_translation_mapping = (
("\\", "\\\\"),
("\000", "\\0"),
('\b', '\\b'),
('\n', '\\n'),
('\r', '\\r'),
('\t', '\\t'),
("%", "%%")
)
_delimiter = '"'
_escape_delimiter = '"'
_max_length = 63
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, '_{}'.format(k), v)
def __call__(self, compile, expr, state):
state.sql.append(self._delimiter)
name = expr.name
name = name.replace(self._delimiter, self._escape_delimiter + self._delimiter)
for k, v in self._translation_mapping:
name = name.replace(k, v)
if len(name) > self._get_max_length(state):
raise MaxLengthError("The length of name {0!r} is more than {1}".format(name, self._max_length))
state.sql.append(name)
state.sql.append(self._delimiter)
def _get_max_length(self, state):
# Max length can depend on context.
return self._max_length
compile_name = NameCompiler()
compile.when(Name)(compile_name)
class Value(object):
__slots__ = ('value', )
def __init__(self, value):
self.value = value
def __repr__(self):
return expr_repr(self)
class ValueCompiler(object):
_translation_mapping = (
("\\", "\\\\"),
("\000", "\\0"),
('\b', '\\b'),
('\n', '\\n'),
('\r', '\\r'),
('\t', '\\t'),
("%", "%%")
)
_delimiter = "'"
_escape_delimiter = "'"
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, '_{}'.format(k), v)
def __call__(self, compile, expr, state):
state.sql.append(self._delimiter)
value = str(expr.value)
value = value.replace(self._delimiter, self._escape_delimiter + self._delimiter)
for k, v in self._translation_mapping:
value = value.replace(k, v)
state.sql.append(value)
state.sql.append(self._delimiter)
compile_value = ValueCompiler()
compile.when(Value)(compile_value)
class Array(ExprList): # TODO: use composition instead of inheritance, to solve ambiguous of __getitem__()???
__slots__ = ()
def __init__(self, *args):
Operable.__init__(self)
self.sql, self.data = ", ", list(args)
@compile.when(Array)
def compile_array(compile, expr, state):
if not expr.data:
state.sql.append("'{}'")
state.sql.append("ARRAY[{0}]".format(compile_exprlist(compile, expr, state)))
class ArrayItem(Expr):
__slots__ = ('array', 'key')
def __init__(self, array, key):
Operable.__init__(self)
self.array = array
assert isinstance(key, slice)
self.key = key
@compile.when(ArrayItem)
def compile_arrayitem(compile, expr, state):
compile(expr.array)
state.sql.append("[")
state.sql.append("{0:d}".format(expr.key.start))
if expr.key.stop is not None:
state.sql.append(", ")
state.sql.append("{0:d}".format(expr.key.stop))
state.sql.append("]")
def datatypeof(obj):
if isinstance(obj, Operable):
return obj._datatype
from sqlbuilder.smartsql.datatypes import BaseType
return BaseType
def expr_repr(expr):
return "<{0}: {1}, {2!r}>".format(type(expr).__name__, *compile(expr))
func = const = ConstantSpace()
|
71294
|
import torch
import torch
import numpy as np
import torch
from torch.autograd import Variable
import os
import argparse
from datetime import datetime
import torch.nn.functional as F
def joint_loss(pred, mask):
weit = 1 + 5*torch.abs(F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15) - mask)
wbce = F.binary_cross_entropy_with_logits(pred, mask, reduction='none')
wbce = (weit*wbce).sum(dim=(2, 3)) / weit.sum(dim=(2, 3))
pred = torch.sigmoid(pred)
inter = ((pred * mask)*weit).sum(dim=(2, 3))
union = ((pred + mask)*weit).sum(dim=(2, 3))
wiou = 1 - (inter + 1)/(union - inter+1)
return (wbce + wiou).mean()
def joint_loss_flat(pred, mask, roi_mask=None):
W = 1 + 5*torch.abs(F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15) - mask).squeeze()
L = F.binary_cross_entropy_with_logits(pred, mask, reduction='none').squeeze()
P = torch.sigmoid(pred).squeeze()
M = mask.squeeze()
if roi_mask is not None:
W = W[roi_mask]
L = L[roi_mask]
P = P[roi_mask]
M = M[roi_mask]
# Sum them up
WL = (W*L).sum() / W.sum()
I = ((P * M)*W).sum()
U = ((P + M)*W).sum()
# Compute Weighted IoU
WIoU = 1 - (I + 1)/(U - I+1)
return (WL + WIoU).mean()
def clip_gradient(optimizer, grad_clip):
"""
For calibrating mis-alignment gradient via cliping gradient technique
:param optimizer:
:param grad_clip:
:return:
"""
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def adjust_lr(optimizer, epoch, decay_rate=0.1, decay_epoch=30):
decay = decay_rate ** (epoch // decay_epoch)
for param_group in optimizer.param_groups:
param_group['lr'] *= decay
def collate_fn(batch):
batch_dict = {}
for k in batch[0]:
batch_dict[k] = []
for i in range(len(batch)):
batch_dict[k] += [batch[i][k]]
# tuple(zip(*batch))
batch_dict['images'] = torch.stack(batch_dict['images'])
if 'masks' in batch_dict:
batch_dict['masks'] = torch.stack(batch_dict['masks'])
if 'points' in batch_dict:
batch_dict['points'] = torch.stack(batch_dict['points'])
if 'edges' in batch_dict:
batch_dict['edges'] = torch.stack(batch_dict['edges'])
return batch_dict
|
71413
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from torch.distributions.transforms import TanhTransform
from rl_sandbox.constants import OBS_RMS, VALUE_RMS, CPU
from rl_sandbox.model_architectures.utils import RunningMeanStd
class ActorCritic(nn.Module):
def __init__(self,
obs_dim,
norm_dim=(0,),
device=torch.device(CPU),
normalize_obs=False,
normalize_value=False,
**kwargs):
super().__init__(**kwargs)
self.device = device
self._obs_dim = obs_dim
if normalize_obs:
if isinstance(obs_dim, int):
obs_dim = (obs_dim,)
self.obs_rms = RunningMeanStd(shape=obs_dim, norm_dim=norm_dim)
if normalize_value:
self.value_rms = RunningMeanStd(shape=(1,), norm_dim=(0,))
def _extract_features(self, x):
x = self._flatten(x)
obs, extra_features = x[:, :self._obs_dim], x[:, self._obs_dim:]
if hasattr(self, OBS_RMS):
obs = self.obs_rms.normalize(obs)
x = torch.cat((obs, extra_features), dim=1)
x = x.to(self.device)
return x
def forward(self, x, **kwargs):
raise NotImplementedError()
def evaluate_action(self, x, h, a, **kwargs):
dist, value, _ = self.forward(x, h, **kwargs)
log_prob = dist.log_prob(a.clone().detach().to(self.device)).sum(dim=-1, keepdim=True)
return log_prob, value, dist.entropy()
def compute_action(self, x, h, **kwargs):
self.eval()
with torch.no_grad():
dist, value, h = self.forward(x, h=h)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(dim=-1, keepdim=True)
self.train()
return action[0].cpu().numpy(), value[0].cpu().numpy(), h[0].cpu().numpy(), log_prob[0].cpu().numpy(), dist.entropy()[0].cpu().numpy(), dist.mean[0].cpu().numpy(), dist.variance[0].cpu().numpy()
def deterministic_action(self, x, h, **kwargs):
self.eval()
with torch.no_grad():
dist, value, h = self.forward(x, h=h)
action = dist.mean
log_prob = dist.log_prob(action).sum(dim=-1, keepdim=True)
self.train()
return action[0].cpu().numpy(), value[0].cpu().numpy(), h[0].cpu().numpy(), log_prob[0].cpu().numpy(), dist.entropy()[0].cpu().numpy()
class LSTMActorCritic(ActorCritic):
def __init__(self,
obs_dim,
hidden_state_dim,
norm_dim=(0,),
device=torch.device(CPU),
normalize_obs=False,
normalize_value=False,
**kwargs):
super().__init__(obs_dim=obs_dim,
norm_dim=norm_dim,
device=device,
normalize_obs=normalize_obs,
normalize_value=normalize_value,
**kwargs)
self.hidden_state_dim = hidden_state_dim
def _convert_hidden_state_to_tuple(self, h):
hidden_state = h[..., :self.hidden_state_dim].contiguous()
cell_state = h[..., self.hidden_state_dim:].contiguous()
return (hidden_state, cell_state)
def _convert_tuple_to_hidden_state(self, h):
return torch.cat((h[0], h[1]), dim=-1)
def initialize_hidden_state(self):
return torch.zeros((1, self.hidden_state_dim * 2))
def lstm_forward(self, x, h, lengths, **kwargs):
batch_size = h.shape[0]
seq_len = h.shape[1]
if lengths is None:
lengths = torch.ones(batch_size, dtype=torch.int)
h = h.transpose(0, 1)[[0]]
x = x.reshape(batch_size, seq_len, -1)
h = self._convert_hidden_state_to_tuple(h.to(self.device))
x = torch.nn.utils.rnn.pack_padded_sequence(
x, lengths, batch_first=True, enforce_sorted=False)
x, h = self.lstm_layer(x, h)
output, input_sizes = torch.nn.utils.rnn.pad_packed_sequence(x, batch_first=True)
x = output[range(output.shape[0]), input_sizes - 1, :]
h = self._convert_tuple_to_hidden_state(h).transpose(0, 1)
return x, h
def evaluate_action(self, x, h, a, lengths, **kwargs):
dist, value, _ = self.forward(x, h, lengths=lengths)
log_prob = dist.log_prob(a.clone().detach().to(self.device)).sum(dim=-1, keepdim=True)
return log_prob, value, dist.entropy()
class QActorCritic(ActorCritic):
def __init__(self,
obs_dim,
norm_dim=(0,),
device=torch.device(CPU),
normalize_obs=False,
normalize_value=False,
**kwargs):
super().__init__(obs_dim=obs_dim,
norm_dim=norm_dim,
device=device,
normalize_obs=normalize_obs,
normalize_value=normalize_value,
**kwargs)
def _q_vals(self, x, h, a):
input = torch.cat((x, a), dim=1)
q1_val = self._q1(input)
q2_val = self._q2(input)
min_q = torch.min(q1_val, q2_val)
return min_q, q1_val, q2_val, h
def q_vals(self, x, h, a, **kwargs):
x = self._extract_features(x)
a = a.to(self.device)
return self._q_vals(x, h, a)
def act_lprob(self, x, h, **kwargs):
dist, _, _ = self(x, h)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(dim=-1, keepdim=True)
return action, log_prob
def act_stats(self, x, h, **kwargs):
dist, val, _ = self(x, h)
action = dist.rsample()
return action, dist.mean, dist.variance, dist.entropy(), val
def lprob(self, x, h, a, **kwargs):
dist, _, _ = self(x, h)
return dist.log_prob(a).sum(dim=-1, keepdim=True)
def forward(self, x, h, **kwargs):
raise NotImplementedError
@property
def policy_parameters(self):
return self._policy.parameters()
@property
def qs_parameters(self):
return list(self._q1.parameters()) + list(self._q2.parameters())
class SoftActorCritic(ActorCritic):
def __init__(self,
obs_dim,
initial_alpha=1.,
norm_dim=(0,),
device=torch.device(CPU),
normalize_obs=False,
normalize_value=False,
**kwargs):
super().__init__(obs_dim=obs_dim,
norm_dim=norm_dim,
device=device,
normalize_obs=normalize_obs,
normalize_value=normalize_value,
**kwargs)
assert initial_alpha > 0.
self._log_alpha = nn.Parameter(torch.ones(1) * torch.log(torch.tensor(initial_alpha)))
def _q_vals(self, x, h, a):
input = torch.cat((x, a), dim=1)
q1_val = self._q1(input)
q2_val = self._q2(input)
min_q = torch.min(q1_val, q2_val)
return min_q, q1_val, q2_val, h
def q_vals(self, x, h, a, **kwargs):
x = self._extract_features(x)
a = a.to(self.device)
return self._q_vals(x, h, a)
def act_lprob(self, x, h, **kwargs):
dist, _, _ = self(x, h)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(dim=-1, keepdim=True)
return action, log_prob
def forward(self, x, h, **kwargs):
raise NotImplementedError
@property
def log_alpha(self):
return self._log_alpha
@property
def alpha(self):
return torch.exp(self._log_alpha)
@property
def policy_parameters(self):
return self._policy.parameters()
@property
def qs_parameters(self):
return list(self._q1.parameters()) + list(self._q2.parameters())
@property
def soft_update_parameters(self):
return self.qs_parameters
class SquashedGaussianSoftActorCritic(SoftActorCritic):
def __init__(self,
obs_dim,
initial_alpha=1.,
eps=1e-7,
norm_dim=(0,),
device=torch.device(CPU),
normalize_obs=False,
normalize_value=False,
**kwargs):
super().__init__(obs_dim=obs_dim,
initial_alpha=initial_alpha,
norm_dim=norm_dim,
device=device,
normalize_obs=normalize_obs,
normalize_value=normalize_value,
**kwargs)
self._eps = eps
self._squash_gaussian = TanhTransform()
def _q_vals(self, x, a):
input = torch.cat((x, a), dim=1)
q1_val = self._q1(input)
q2_val = self._q2(input)
min_q = torch.min(q1_val, q2_val)
return min_q, q1_val, q2_val
def _lprob(self, dist, a, t_a):
return torch.sum(dist.log_prob(a) - self._squash_gaussian.log_abs_det_jacobian(a, t_a), dim=-1, keepdim=True)
def q_vals(self, x, h, a, **kwargs):
a = a.to(self.device)
x = self._extract_features(x)
min_q, q1_val, q2_val = self._q_vals(x, a)
return min_q, q1_val, q2_val, h
def act_lprob(self, x, h, **kwargs):
dist, _, _ = self.forward(x, h)
action = dist.rsample()
t_action = self._squash_gaussian(action)
log_prob = self._lprob(dist, action, t_action)
return t_action, log_prob
def compute_action(self, x, h):
self.eval()
with torch.no_grad():
dist, value, h = self.forward(x, h=h)
action = dist.rsample()
t_action = self._squash_gaussian(action)
log_prob = self._lprob(dist, action, t_action)
self.train()
return t_action[0].cpu().numpy(), value[0].cpu().numpy(), h[0].cpu().numpy(), log_prob[0].cpu().numpy(), dist.entropy()[0].cpu().numpy(), dist.mean[0].cpu().numpy(), dist.variance[0].cpu().numpy()
def deterministic_action(self, x, h):
self.eval()
with torch.no_grad():
dist, value, h = self.forward(x, h=h)
action = dist.mean
t_action = self._squash_gaussian(action)
log_prob = self._lprob(dist, action, t_action)
self.train()
return t_action[0].cpu().numpy(), value[0].cpu().numpy(), h[0].cpu().numpy(), log_prob[0].cpu().numpy(), dist.entropy()[0].cpu().numpy()
def forward(self, x, h, **kwargs):
x = self._extract_features(x)
a_mean, a_raw_std = torch.chunk(self._policy(x), chunks=2, dim=1)
a_std = F.softplus(a_raw_std) + self._eps
dist = Normal(a_mean, a_std)
t_a_mean = self._squash_gaussian(a_mean)
min_q, _, _ = self._q_vals(x, t_a_mean)
val = min_q - self.alpha * self._lprob(dist, a_mean, t_a_mean)
return dist, val, h
|
71414
|
import os
train_src="../dynet_nmt/data/train.de-en.de.wmixerprep"
train_tgt="../dynet_nmt/data/train.de-en.en.wmixerprep"
dev_src="../dynet_nmt/data/valid.de-en.de"
dev_tgt="../dynet_nmt/data/valid.de-en.en"
test_src="../dynet_nmt/data/test.de-en.de"
test_tgt="../dynet_nmt/data/test.de-en.en"
for temp in [0.5]:
job_name = 'iwslt14.raml.corrupt_ngram.t%.3f' % temp
train_log = 'train.' + job_name + '.log'
model_name = 'model.' + job_name
decode_file = 'iwslt14.test.en.raml.corrupt_ngram.t%.3f' % temp
job_file = 'scripts/train.%s.sh' % job_name
with open(job_file, 'w') as f:
f.write("""#!/bin/sh
python nmt.py \
--cuda \
--mode test \
--load_model models/{model_name}.bin \
--beam_size 5 \
--decode_max_time_step 100 \
--save_to_file decode/{decode_file} \
--test_src {test_src} \
--test_tgt {test_tgt}
echo "test result" >> logs/{train_log}
perl multi-bleu.perl {test_tgt} < decode/{decode_file} >> logs/{train_log}
""".format(model_name=model_name, temp=temp,
train_src=train_src, train_tgt=train_tgt,
dev_src=dev_src, dev_tgt=dev_tgt,
test_src=test_src, test_tgt=test_tgt,
train_log=train_log, decode_file=decode_file))
os.system('bash submit_job.sh %s' % job_file)
|
71485
|
import os
import typing
from datetime import datetime
from importlib import import_module
from typing import Dict, Callable, Optional, List, Tuple
from twitchbot.database import CustomCommand
from twitchbot.message import Message
from .config import cfg
from .enums import CommandContext
from .util import get_py_files, get_file_name
from .util import temp_syspath
if typing.TYPE_CHECKING:
from .modloader import Mod
DEFAULT_COOLDOWN_BYPASS = 'bypass_cooldown'
DEFAULT_COOLDOWN = 0
__all__ = (
'Command', 'commands', 'command_exist', 'load_commands_from_directory', 'DummyCommand', 'CustomCommandAction',
'ModCommand', 'SubCommand', 'get_command', 'CUSTOM_COMMAND_PLACEHOLDERS', 'command_last_execute',
'get_time_since_execute', 'reset_command_last_execute', 'is_command_off_cooldown', 'is_command_on_cooldown',
'update_command_last_execute', 'set_command_permission')
class Command:
def __init__(self, name: str, prefix: str = None, func: Callable = None, global_command: bool = True,
context: CommandContext = CommandContext.DEFAULT_COMMAND_CONTEXT, permission: str = None, syntax: str = None,
help: str = None, aliases: List[str] = None, cooldown: int = DEFAULT_COOLDOWN,
cooldown_bypass: str = DEFAULT_COOLDOWN_BYPASS, hidden: bool = False, parent: 'Command' = None):
"""
:param name: name of the command (without the prefix)
:param prefix: prefix require before the command name (defaults the the configs prefix if None)
:param func: the function that the commands executes
:param global_command: should the command be registered globally?
:param context: the context through which calling the command is allowed
:param permission: permission needed to run the command in chat
:param syntax: help message for how to use the command, <> is required, () is optional
:param help: help message for the command, used with the `help` command
:param aliases: aliases for this same command, only works if global_command is True
:param cooldown: time between when this command when can be run, 0 means the command be run without any delay and is default value
:param cooldown_bypass: permission that allows those who have it to bypass the commands cooldown
:param hidden: hides the command from the output of the commands command
:param parent: parent command for this command, allows for this command to be a subcommand
"""
self.hidden = hidden
self.cooldown_bypass = cooldown_bypass
self.cooldown: int = cooldown
self.aliases: List[str] = aliases if aliases is not None else []
self.help: str = help
self.syntax: str = syntax
self.permission: str = permission
self.context: CommandContext = context
self.prefix: str = (prefix if prefix is not None else cfg.prefix).lower()
self.func: Callable = func
self.name: str = name.lower()
self.sub_cmds: Dict[str, Command] = {}
self.parent: Optional[Command] = None
self.update_parent_command(parent)
if global_command:
commands[self.fullname] = self
# register all aliases passed to this functions
if aliases is not None:
for alias in aliases:
commands[self.prefix + alias] = self
def update_parent_command(self, parent: 'Command' = None):
if parent is None:
if self.parent is not None and self.name in self.parent.sub_cmds:
del self.parent.sub_cmds[self.name]
self.parent = None
else:
self.parent = parent
if self.name in self.parent.sub_cmds:
del self.parent.sub_cmds[self.name]
parent.sub_cmds[self.name] = self
@property
def fullname(self) -> str:
return self.prefix + self.name
def parent_chain(self) -> List['Command']:
"""
returns a list with the chain of commands leading to this command
also includes this command
ex (with subcommands used):
c subcommands b subcommands a
parent_chain() returns [a, b, c]
:return: list of parents leading to this command, plus the current command at the end
"""
parents = [self]
parent = self.parent
while parent:
parents.append(parent)
parent = parent.parent
return parents[::-1]
def _get_cmd_func(self, args) -> Tuple['Callable', List[str]]:
"""returns a tuple of the final commands command function and the remaining argument"""
if not self.sub_cmds or not args or args[0].lower() not in self.sub_cmds:
return self.func, args
return self.sub_cmds[args[0].lower()]._get_cmd_func(args[1:])
def get_sub_cmd(self, args) -> Tuple['Command', Tuple[str]]:
"""
returns the final command in a sub-command chain from the args passed to the this function
the sub-command chain is based off of the current command this function is called on
args is a list or tuple of strings
"""
if not self.sub_cmds or not args or args[0].lower() not in self.sub_cmds:
return self, args
return self.sub_cmds[args[0].lower()].get_sub_cmd(args[1:])
async def execute(self, msg: Message):
func, args = self._get_cmd_func(msg.parts[1:])
await func(msg, *args)
async def has_permission_to_run_from_msg(self, origin_msg: Message):
from .event_util import forward_event_with_results
from .enums import Event
for parent in self.parent_chain():
if (parent.permission
and not all(await forward_event_with_results(Event.on_permission_check, origin_msg, parent, channel=origin_msg.channel_name))):
return False
return True
# decorator support
def __call__(self, func) -> 'Command':
self.func = func
return self
def __str__(self):
return f'<{self.__class__.__name__} fullname={repr(self.fullname)} parent={self.parent}>'
def __getitem__(self, item):
return self.sub_cmds.get(item.lower()) or self.sub_cmds.get(item.lower()[1:])
def __repr__(self):
return f'<{self.__class__.__name__} fullname={self.fullname}>'
class SubCommand(Command):
def __init__(self, parent: Command, name: str, func: Callable = None, permission: str = None, syntax: str = None,
help: str = None, cooldown: int = DEFAULT_COOLDOWN, cooldown_bypass: str = DEFAULT_COOLDOWN_BYPASS, hidden: bool = False):
super().__init__(name=name, prefix='', func=func, permission=permission, syntax=syntax, help=help,
global_command=False, cooldown=cooldown, cooldown_bypass=cooldown_bypass, hidden=hidden, parent=parent)
# self.parent: Command = parent
# self.update_parent_command(parent)
# self.parent.sub_cmds[self.name] = self
class DummyCommand(Command):
def __init__(self, name: str, prefix: str = None, global_command: bool = True,
context: CommandContext = CommandContext.DEFAULT_COMMAND_CONTEXT, permission: str = None, syntax: str = None,
help: str = None, aliases: List[str] = None, hidden: bool = False, parent: Command = None):
super().__init__(name=name, prefix=prefix, func=self.exec, global_command=global_command,
context=context, permission=permission, syntax=syntax, help=help, aliases=aliases, hidden=hidden, parent=parent)
async def exec(self, msg: Message, *args):
"""the function called when the dummy command is executed"""
if self.sub_cmds:
await msg.reply(f'command options: {", ".join(name for name, cmd in self.sub_cmds.items() if not cmd.hidden)}')
else:
await msg.reply('no sub-commands were found for this command')
def add_sub_cmd(self, name: str) -> 'DummyCommand':
"""adds a new DummyCommand to the current DummyCommand as a sub-command, then returns the new DummyCommand"""
cmd = DummyCommand(name, prefix='', global_command=False)
self.sub_cmds[cmd.fullname] = cmd
return cmd
def _calc_channel_live_time(msg) -> str:
if msg.channel.live:
return format((msg.channel.stats.started_at - datetime.now()).total_seconds() / 3600, '.1f')
return '[NOT LIVE]'
CUSTOM_COMMAND_PLACEHOLDERS = (
(
'%user',
lambda msg: f'@{msg.author}'
),
(
'%uptime',
_calc_channel_live_time
),
(
'%channel',
lambda msg: msg.channel_name
),
)
class CustomCommandAction(Command):
def __init__(self, cmd):
super().__init__(cmd.name, prefix='', func=self.execute, global_command=False, hidden=True)
self.cmd: CustomCommand = cmd
self.cooldown = 0
async def execute(self, msg: Message):
resp = self.cmd.response
for placeholder, func in CUSTOM_COMMAND_PLACEHOLDERS:
if placeholder in resp:
resp = resp.replace(placeholder, func(msg))
await msg.channel.send_message(resp)
class ModCommand(Command):
def __init__(self, mod_name: str, name: str, prefix: str = None, func: Callable = None, global_command: bool = True,
context: CommandContext = CommandContext.DEFAULT_COMMAND_CONTEXT, permission: str = None, syntax: str = None,
help: str = None, cooldown: int = DEFAULT_COOLDOWN, cooldown_bypass: str = DEFAULT_COOLDOWN_BYPASS, hidden: bool = False,
parent: Command = None):
super().__init__(name=name, prefix=prefix, func=func, global_command=global_command, context=context,
permission=permission, syntax=syntax, help=help, cooldown=cooldown,
cooldown_bypass=cooldown_bypass, hidden=hidden, parent=parent)
self.mod_name = mod_name
@property
def mod(self):
from .modloader import mods
return mods[self.mod_name]
async def execute(self, msg: Message):
func, args = self._get_cmd_func(msg.parts[1:])
if 'self' in func.__code__.co_varnames:
await func(self.mod, msg, *args)
else:
await func(msg, *args)
commands: Dict[str, Command] = {}
command_last_execute: Dict[Tuple[str, str], datetime] = {}
def _create_cooldown_key(channel: str, cmd: str) -> Tuple[str, str]:
return channel.lower(), cmd.lower()
def is_command_off_cooldown(channel: str, cmd: str, cooldown: int = None) -> bool:
if not command_exist(cmd):
return True
return get_time_since_execute(channel, cmd) >= (cooldown or get_command(cmd).cooldown)
def is_command_on_cooldown(channel: str, cmd: str, cooldown: int = None) -> bool:
return not is_command_off_cooldown(channel, cmd, cooldown)
def get_time_since_execute(channel: str, cmd: str) -> int:
last_execute = command_last_execute.get(_create_cooldown_key(channel, cmd), datetime.min)
return int(abs((last_execute - datetime.now()).total_seconds()))
def update_command_last_execute(channel: str, cmd: str):
command_last_execute[_create_cooldown_key(channel, cmd)] = datetime.now()
def reset_command_last_execute(channel: str, cmd: str):
command_last_execute[_create_cooldown_key(channel, cmd)] = datetime.min
def load_commands_from_directory(path):
print(f'loading commands from {path}')
path = os.path.abspath(path)
if not os.path.exists(path):
return
with temp_syspath(path):
for file in get_py_files(path):
fname = get_file_name(file)
mod = import_module(fname)
def command_exist(name: str) -> bool:
"""
returns a bool indicating if a command exists,
tries added a configs prefix to the name if not found initially,
does not check for custom commands
"""
return any(cmd in commands for cmd in (name, cfg.prefix + name))
def get_command(name: str) -> Optional[Command]:
"""
gets a commands,
tries added a configs prefix to the name if not found initally,
returns None if not exist, does not get custom commands
"""
return commands.get(name) or commands.get(cfg.prefix + name)
def set_command_permission(cmd: str, new_permission: Optional[str]) -> bool:
"""
overrides a command's previous permission and sets to the new one from `new_permission`
:param cmd: command to override permission for
:param new_permission: new permission to set for `cmd`
:return:
"""
command = get_command(cmd)
if not command:
return False
command.permission = new_permission
return True
|
71489
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Iterator
import time
import json
from seq2seq.helpers import sequence_accuracy
from seq2seq.ReaSCAN_dataset import ReaSCANDataset
import pdb
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger = logging.getLogger(__name__)
def predict_and_save(dataset: ReaSCANDataset, model: nn.Module, output_file_path: str, max_decoding_steps: int,
max_testing_examples=None, **kwargs):
"""
Predict all data in dataset with a model and write the predictions to output_file_path.
:param dataset: a dataset with test examples
:param model: a trained model from model.py
:param output_file_path: a path where a .json file with predictions will be saved.
:param max_decoding_steps: after how many steps to force quit decoding
:param max_testing_examples: after how many examples to stop predicting, if None all examples will be evaluated
"""
cfg = locals().copy()
with open(output_file_path, mode='w') as outfile:
output = []
with torch.no_grad():
i = 0
for (input_sequence, derivation_spec, situation_spec, output_sequence, target_sequence,
attention_weights_commands, attention_weights_situations, position_accuracy) in predict(
dataset.get_data_iterator(batch_size=1), model=model, max_decoding_steps=max_decoding_steps,
pad_idx=dataset.target_vocabulary.pad_idx, sos_idx=dataset.target_vocabulary.sos_idx,
eos_idx=dataset.target_vocabulary.eos_idx):
i += 1
accuracy = sequence_accuracy(output_sequence, target_sequence[0].tolist()[1:-1])
input_str_sequence = dataset.array_to_sentence(input_sequence[0].tolist(), vocabulary="input")
input_str_sequence = input_str_sequence[1:-1] # Get rid of <SOS> and <EOS>
target_str_sequence = dataset.array_to_sentence(target_sequence[0].tolist(), vocabulary="target")
target_str_sequence = target_str_sequence[1:-1] # Get rid of <SOS> and <EOS>
output_str_sequence = dataset.array_to_sentence(output_sequence, vocabulary="target")
output.append({"input": input_str_sequence, "prediction": output_str_sequence,
"derivation": derivation_spec,
"target": target_str_sequence, "situation": situation_spec,
"attention_weights_input": attention_weights_commands,
"attention_weights_situation": attention_weights_situations,
"accuracy": accuracy,
"exact_match": True if accuracy == 100 else False,
"position_accuracy": position_accuracy})
logger.info("Wrote predictions for {} examples.".format(i))
json.dump(output, outfile, indent=4)
return output_file_path
def predict(data_iterator: Iterator, model: nn.Module, max_decoding_steps: int, pad_idx: int, sos_idx: int,
eos_idx: int, max_examples_to_evaluate=None) -> torch.Tensor:
"""
Loop over all data in data_iterator and predict until <EOS> token is reached.
:param data_iterator: iterator containing the data to predict
:param model: a trained model from model.py
:param max_decoding_steps: after how many steps to abort decoding
:param pad_idx: the padding idx of the target vocabulary
:param sos_idx: the start-of-sequence idx of the target vocabulary
:param eos_idx: the end-of-sequence idx of the target vocabulary
:param: max_examples_to_evaluate: after how many examples to break prediction, if none all are predicted
"""
# Disable dropout and other regularization.
model.eval()
start_time = time.time()
# Loop over the data.
i = 0
for (input_sequence, input_lengths, derivation_spec, situation, situation_spec, target_sequence,
target_lengths, agent_positions, target_positions) in data_iterator:
i += 1
if max_examples_to_evaluate:
if i > max_examples_to_evaluate:
break
# Encode the input sequence.
encoded_input = model.encode_input(commands_input=input_sequence,
commands_lengths=input_lengths,
situations_input=situation)
# For efficiency
projected_keys_visual = model.visual_attention.key_layer(
encoded_input["encoded_situations"]) # [bsz, situation_length, dec_hidden_dim]
projected_keys_textual = model.textual_attention.key_layer(
encoded_input["encoded_commands"]["encoder_outputs"]) # [max_input_length, bsz, dec_hidden_dim]
# Iteratively decode the output.
output_sequence = []
contexts_situation = []
hidden = model.attention_decoder.initialize_hidden(
model.tanh(model.enc_hidden_to_dec_hidden(encoded_input["hidden_states"])))
token = torch.tensor([sos_idx], dtype=torch.long, device=device)
decoding_iteration = 0
attention_weights_commands = []
attention_weights_situations = []
while token != eos_idx and decoding_iteration <= max_decoding_steps:
(output, hidden, context_situation, attention_weights_command,
attention_weights_situation) = model.decode_input(
target_token=token, hidden=hidden, encoder_outputs=projected_keys_textual,
input_lengths=input_lengths, encoded_situations=projected_keys_visual)
output = F.log_softmax(output, dim=-1)
token = output.max(dim=-1)[1]
output_sequence.append(token.data[0].item())
attention_weights_commands.append(attention_weights_command.tolist())
attention_weights_situations.append(attention_weights_situation.tolist())
contexts_situation.append(context_situation.unsqueeze(1))
decoding_iteration += 1
if output_sequence[-1] == eos_idx:
output_sequence.pop()
attention_weights_commands.pop()
attention_weights_situations.pop()
if model.auxiliary_task:
target_position_scores = model.auxiliary_task_forward(torch.cat(contexts_situation, dim=1).sum(dim=1))
auxiliary_accuracy_target = model.get_auxiliary_accuracy(target_position_scores, target_positions)
else:
auxiliary_accuracy_agent, auxiliary_accuracy_target = 0, 0
yield (input_sequence, derivation_spec, situation_spec, output_sequence, target_sequence,
attention_weights_commands, attention_weights_situations, auxiliary_accuracy_target)
elapsed_time = time.time() - start_time
logging.info("Predicted for {} examples.".format(i))
logging.info("Done predicting in {} seconds.".format(elapsed_time))
|
71559
|
import os
import numpy
import numpy as np
import torch
from dg_util.python_utils import misc_util
from torch import nn
numpy.set_printoptions(precision=4)
torch.set_printoptions(precision=4, sci_mode=False)
def batch_norm_layer(channels):
return nn.BatchNorm2d(channels)
def nonlinearity():
return nn.ReLU(inplace=True)
NONLINEARITY = nonlinearity
NORM_LAYER = batch_norm_layer
TIME_STR = misc_util.get_time_str()
BASE_LOG_DIR = "logs"
CHECK_FOR_NEW_DATA = False
IMAGENET_MEAN = np.array([0.485, 0.456, 0.406], dtype=np.float32) * 255
IMAGENET_STD = np.array([0.229, 0.224, 0.225], dtype=np.float32) * 255
COOKIE_PATH = os.path.join(os.path.dirname(__file__), "youtube_scrape", "cookies.txt")
|
71569
|
import npc
import pytest
def test_creates_character(campaign):
result = npc.commands.create_character.werewolf('wer<NAME>', 'cahalith')
character = campaign.get_character('werewolf mann.nwod')
assert result.success
assert character.exists()
assert campaign.get_absolute(result.openable[0]) == str(character)
def test_adds_group_tags(campaign):
result = npc.commands.create_character.werewolf('werewolf mann', 'cahalith', groups=['fork', 'spoon'])
data = campaign.get_character_data('werewolf mann.nwod')
assert 'fork' in data.tags('group')
assert 'spoon' in data.tags('group')
def test_duplicate_character(campaign):
npc.commands.create_character.werewolf('wer<NAME>', 'cahalith')
result = npc.commands.create_character.werewolf('werewolf mann', 'cahalith')
assert not result.success
def test_adds_auspice(campaign):
npc.commands.create_character.werewolf('werewolf mann', 'cahalith')
data = campaign.get_character_data('werewolf mann.nwod')
assert 'Cahalith' in data.tags['auspice']
def test_adds_tribe(campaign):
npc.commands.create_character.werewolf('werewolf mann', 'cahalith', tribe='Bone Talons')
data = campaign.get_character_data('werewolf mann.nwod')
assert 'Bone Talons' in data.tags['tribe']
def test_adds_pack(campaign):
npc.commands.create_character.werewolf('werewolf mann', 'cahalith', pack='Foobars')
data = campaign.get_character_data('werewolf mann.nwod')
assert 'Foobars' in data.tags['pack']
|
71571
|
from odoo import fields, models
class IrModel(models.Model):
_inherit = 'ir.model'
rest_api = fields.Boolean('REST API', default=True,
help="Allow this model to be fetched through REST API")
|
71593
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("auditlog", "0005_logentry_additional_data_verbose_name"),
]
operations = [
migrations.AlterField(
model_name="logentry",
name="object_pk",
field=models.CharField(
verbose_name="object pk", max_length=255, db_index=True
),
),
]
|
71624
|
import os
import torch
from typing import List, Tuple
from torch import nn
from transformers import BertConfig, BertModel, BertPreTrainedModel, BertTokenizerFast, AutoTokenizer
from repconc.models.repconc import RepCONC
class TCTEncoder(BertPreTrainedModel):
def __init__(self, config: BertConfig):
BertPreTrainedModel.__init__(self, config)
self.bert = BertModel(config, add_pooling_layer=False)
self.config.pooling = "mean"
self.config.similarity_metric = "METRIC_IP"
def forward(self, input_ids, attention_mask, return_dict=False):
outputs = self.bert(input_ids, attention_mask, return_dict=True)
token_embeds = outputs.last_hidden_state[:, 4:, :]
input_mask_expanded = attention_mask[:, 4:].unsqueeze(-1).expand(token_embeds.size()).float()
text_embeds = torch.sum(token_embeds * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
if return_dict:
outputs.embedding = text_embeds
return outputs
else:
return text_embeds
@property
def language_model(self):
return self.bert
def tct_repconc_from_pretrained(load_dir, use_constraint, sk_epsilon, sk_iters):
dense_encoder = TCTEncoder.from_pretrained(os.path.join(load_dir, 'dense_encoder'))
repconc = RepCONC(
dense_encoder.config,
dense_encoder,
use_constraint=use_constraint,
sk_epsilon=sk_epsilon,
sk_iters=sk_iters)
repconc.load_state_dict(torch.load(os.path.join(load_dir, "pytorch_model.bin"), map_location="cpu"))
return repconc
class TCTTokenizerFast(BertTokenizerFast):
'''
ANCE lowers text before tokenization
'''
def __call__(self, text, input_text_type, max_length=None, add_special_tokens=False, **kwargs):
# TCT does not add special tokens and expands queries to a fixed length
if input_text_type == "query":
max_length = 36
text = ['[CLS] [Q] ' + query + '[MASK]' * 36 for query in text ]
elif input_text_type == "doc":
text = ['[CLS] [D] ' + doc for doc in text ]
else:
raise NotImplementedError()
return super().__call__(text, max_length=max_length, add_special_tokens=False, **kwargs)
if __name__ == "__main__":
print(AutoTokenizer.from_pretrained("castorini/tct_colbert-v2-hnp-msmarco"))
print("Test tokenizer")
import inspect
for tokenizer_class in [AutoTokenizer, TCTTokenizerFast]:
tokenizer = tokenizer_class.from_pretrained("castorini/tct_colbert-v2-hnp-msmarco")
print(tokenizer.__class__, tokenizer)
text_lst = ["I am TCT tokenizer"]
input_text_type = {"input_text_type": "doc"} if "input_text_type" in inspect.getfullargspec(tokenizer.__call__)[0] else {}
print(tokenizer.convert_ids_to_tokens(tokenizer(
text_lst,
add_special_tokens=True,
max_length=36,
truncation=True, **input_text_type)['input_ids'][0]))
input_text_type = {"input_text_type": "query"} if "input_text_type" in inspect.getfullargspec(tokenizer.__call__)[0] else {}
print(tokenizer.convert_ids_to_tokens(tokenizer(text_lst, add_special_tokens=True, max_length=36, truncation=True, **input_text_type)['input_ids'][0]))
|
71645
|
import time
from compactor.process import Process
from compactor.context import Context
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class WebProcess(Process):
@Process.install('ping')
def ping(self, from_pid, body):
log.info("Received ping")
def respond():
time.sleep(0.5)
self.send(from_pid, "pong")
self.context.loop.add_callback(respond)
@Process.install('pong')
def pong(self, from_pid, body):
log.info("Received pong")
def respond():
time.sleep(0.5)
self.send(from_pid, "ping")
self.context.loop.add_callback(respond)
def listen(identifier):
"""
Launch a listener and return the compactor context.
"""
context = Context()
process = WebProcess(identifier)
context.spawn(process)
log.info("Launching PID %s", process.pid)
return process, context
if __name__ == '__main__':
a, a_context = listen("web(1)")
b, b_context = listen("web(2)")
a_context.start()
b_context.start()
# Kick off the game of ping/pong by sending a message to B from A
a.send(b.pid, "ping")
while a_context.isAlive() or b_context.isAlive():
time.sleep(0.5)
|
71748
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from hubspot.cms.performance.api.public_performance_api import PublicPerformanceApi
|
71749
|
from roscraco.helper import validator
from roscraco.exception import RouterSettingsError
class WirelessSettings(object):
"""Represents all available Wireless settings for a router."""
SECURITY_TYPE_NONE = 'none'
SECURITY_TYPE_WEP64 = 'wep64'
SECURITY_TYPE_WEP128 = 'wep128'
SECURITY_TYPE_WPA = 'wpa'
SECURITY_TYPE_WPA2 = 'wpa2'
#: List of properties to export using export()
PROPERTIES = (
'security_type', 'ssid', 'is_enabled', 'is_broadcasting_ssid',
'channel', 'password'
)
def __init__(self):
self._supports_wireless = True
self._ssid = None
self._enabled_status = True
self._ssid_broadcast_status = True
self._channel = None
self._password = <PASSWORD>
self._internal_params = {}
self._supported_security_types = set([self.__class__.SECURITY_TYPE_NONE])
self._security_type = None
self._supports_ascii_wep_passwords = True
self._supports_auto_channel = True
self._changes_require_reboot = True
def set_auto_channel_support(self, value):
self._supports_auto_channel = bool(value)
@property
def supports_auto_channel(self):
"""Tells whether auto channel is supported.
Channel 0 is considered the auto channel, because that's
how most routers represent the ``Auto`` value.
Some devices, however, do not support Auto channel at all.
"""
return self._supports_auto_channel
def add_security_support(self, security_type):
"""Adds a new security type to the list of supported
security types.
"""
self._supported_security_types.add(security_type)
@property
def supported_security_types(self):
return self._supported_security_types
def set_security_type(self, security_type):
self._security_type = security_type
@property
def security_type_is_wep(self):
"""Tells whether the current security type is WEP.
Returns true for both WEP64 and WEP128.
"""
return self._security_type in (self.__class__.SECURITY_TYPE_WEP64, self.__class__.SECURITY_TYPE_WEP128)
@property
def security_type_is_wpa(self):
"""Tells whether the current security type is WPA.
Returns true for both WPA and WPA2.
"""
return self._security_type in (self.__class__.SECURITY_TYPE_WPA, self.__class__.SECURITY_TYPE_WPA2)
@property
def security_type(self):
return self._security_type
def set_reboot_requirement_status(self, value):
self._changes_require_reboot = bool(value)
@property
def changes_require_reboot(self):
"""Tells whether the router needs rebooting
for changes to take effect.
"""
return self._changes_require_reboot
def set_support_status(self, value):
self._supports_wireless = bool(value)
@property
def is_supported(self):
"""Tells whether the router supports wireless (most of them do)."""
return self._supports_wireless
def set_ssid(self, value):
self._ssid = value
@property
def ssid(self):
"""The current SSID (wireless network name)."""
return self._ssid
def set_enabled_status(self, value):
self._enabled_status = bool(value)
@property
def is_enabled(self):
return self._enabled_status
def set_ssid_broadcast_status(self, value):
self._ssid_broadcast_status = bool(value)
@property
def is_broadcasting_ssid(self):
"""Tells whether the SSID status is being broadcasted publicly.
If it is, than the network is publicly visible by anyone.
"""
return self._ssid_broadcast_status
def set_channel(self, value):
self._channel = int(value)
@property
def channel(self):
"""The transmission channel for wireless communications."""
return self._channel
def set_password(self, value):
self._password = value
@property
def password(self):
"""The current password for the given security type.
The password is sometimes None for some routers, to indicate
that the password cannot be determined.
Some routers hide the current password from their web-interface,
so we can't detect it (but that doesn't mean that we can't change it
with a new one).
"""
return self._password
@property
def is_wep_password_in_hex(self):
"""Tells whether the current WEP password is in HEX or in ASCII.
Detecting this allows us to set the ASCII/HEX
field in the management interface automatically.
"""
if not self.security_type_is_wep:
raise RouterSettingsError('Not using WEP, but trying to inspect password!')
bit_length = 128 if self.security_type == self.__class__.SECURITY_TYPE_WEP128 else 64
return validator.is_wep_password_in_hex(self.password, bit_length)
def set_ascii_wep_password_support_status(self, value):
self._supports_ascii_wep_passwords = bool(value)
@property
def supports_ascii_wep_passwords(self):
"""Tells whether the current router supports ASCII passwords
for WEP security.
Some devices only support HEX passwords.
"""
return self._supports_ascii_wep_passwords
def set_internal_param(self, key, value):
self._internal_params[key] = value
def get_internal_param(self, key):
return self._internal_params[key] if key in self._internal_params else None
def validate(self):
errors = {}
if not validator.is_valid_ssid(self.ssid):
errors['ssid'] = 'Invalid SSID: %s' % self.ssid
# most routers use channel 0 as the 'Auto' channel
channel_min = 0 if self.supports_auto_channel else 1
if not (channel_min <= self.channel <= 13):
errors['channel'] = 'Invalid channel %d' % self.channel
if self.security_type not in self._supported_security_types:
errors['security_type'] = 'Invalid security type: %s' % self.security_type
else:
result = self.__validate_password()
if result is not None:
errors['password'] = result
return errors
def ensure_valid(self):
errors = self.validate()
if len(errors) != 0:
raise RouterSettingsError(str(errors))
def __validate_password(self):
if self.security_type in (self.__class__.SECURITY_TYPE_WPA, self.__class__.SECURITY_TYPE_WPA2):
if not validator.is_valid_wpa_psk_password(self.password):
return 'Invalid WPA PSK password: %s' % self.password
if self.security_type in (self.__class__.SECURITY_TYPE_WEP64, self.__class__.SECURITY_TYPE_WEP128):
bit_length = 128 if self.security_type == self.__class__.SECURITY_TYPE_WEP128 else 64
if not validator.is_valid_wep_password(self.password, bit_length):
return 'Invalid WEP password for bit length %d: %s' % (bit_length, self.password)
# Some devices only support HEX values for the WEP password field
if not self.supports_ascii_wep_passwords and not self.is_wep_password_in_hex:
return 'ASCII WEP passwords are not supported!'
return None
def eq(self, other, skip_attrs=()):
# WEP passwords that use HEX are not case-sensitive, so we want
# to validate them separately
if self.security_type_is_wep and other.security_type_is_wep and \
self.is_wep_password_in_hex and other.is_wep_password_in_hex:
skip_attrs = skip_attrs + ('password',)
try:
if self.password.lower() != other.password.lower():
return False
except AttributeError:
return False
# Don't try to compare passwords when there's no security type
if self.security_type == self.__class__.SECURITY_TYPE_NONE and \
other.security_type == self.__class__.SECURITY_TYPE_NONE:
skip_attrs = skip_attrs + ('password',)
for attr in self.__class__.PROPERTIES:
if attr in skip_attrs:
continue
if getattr(self, attr, None) != getattr(other, attr, None):
#print('[%s] %s != %s' % (
# attr,
# getattr(self, attr, None),
# getattr(other, attr, None)
#))
return False
return True
def __eq__(self, other):
return self.eq(other)
def __ne__(self, other):
return not self == other
def __hash__(self):
return id(self)
def export(self):
"""Exports the most important settings attributes,
omitting any internal attributes.
"""
export = {}
for attr in self.__class__.PROPERTIES:
export[attr] = getattr(self, attr, None)
return export
|
71768
|
try:
from unittest import mock
except ImportError:
import mock
from rest_email_auth import serializers
@mock.patch(
"rest_email_auth.serializers.models.EmailAddress.send_confirmation",
autospec=True,
)
def test_create(mock_send_confirmation, user_factory):
"""
Test creating a new email address from the serializer.
Creating a new email address should also send a confirmation email
for the provided address. If the user does not have a primary email
address, the created one should be marked as the primary.
"""
user = user_factory()
data = {"email": "<EMAIL>"}
serializer = serializers.EmailSerializer(data=data)
assert serializer.is_valid()
email = serializer.save(user=user)
assert email.email == data["email"]
assert email.is_primary
# Make sure a confirmation email was sent
assert mock_send_confirmation.call_count == 1
@mock.patch(
"rest_email_auth.serializers.models.EmailAddress.send_duplicate_notification", # noqa
autospec=True,
)
def test_create_duplicate(
mock_duplicate_notification, email_factory, user_factory
):
"""
Attempting to add an email address that already exists should send a
notification to the existing email.
"""
email = email_factory()
user = user_factory()
data = {"email": email.email}
serializer = serializers.EmailSerializer(data=data)
assert serializer.is_valid(), serializer.errors
serializer.save(user=user)
assert mock_duplicate_notification.call_count == 1
assert mock_duplicate_notification.call_args[0] == (email,)
@mock.patch(
"rest_email_auth.serializers.models.EmailAddress.send_confirmation",
autospec=True,
)
def test_create_non_primary(
mock_send_confirmation, email_factory, user_factory
):
"""
If the user already has a primary email address, the created email
should not be marked as the user's primary.
"""
user = user_factory()
email_factory(is_primary=True, user=user)
data = {"email": "test<EMAIL>"}
serializer = serializers.EmailSerializer(data=data)
assert serializer.is_valid()
email = serializer.save(user=user)
assert email.email == data["email"]
assert not email.is_primary
# Make sure a confirmation email was sent
assert mock_send_confirmation.call_count == 1
def test_serialize(email_factory):
"""
Test serializing an email address.
"""
email = email_factory()
serializer = serializers.EmailSerializer(email)
expected = {
"id": email.id,
"created_at": email.created_at.isoformat(),
"email": email.email,
"is_primary": email.is_primary,
"is_verified": email.is_verified,
}
assert serializer.data == expected
def test_update_is_primary(email_factory):
"""
If an email address is verified, it should be able to be marked as
the user's primary address.
"""
email = email_factory(is_primary=False, is_verified=True)
data = {"is_primary": True}
serializer = serializers.EmailSerializer(email, data=data, partial=True)
assert serializer.is_valid()
with mock.patch.object(
email, "set_primary", autospec=True
) as mock_set_primary:
email = serializer.save()
assert mock_set_primary.call_count == 1
def test_update_is_primary_false(email_factory):
"""
Updating 'is_primary' to false should not call set_primary.
"""
email = email_factory(is_primary=True, is_verified=True)
data = {"is_primary": False}
serializer = serializers.EmailSerializer(email, data=data, partial=True)
assert serializer.is_valid()
with mock.patch.object(
email, "set_primary", autospec=True
) as mock_set_primary:
email = serializer.save()
assert mock_set_primary.call_count == 0
def test_validate_changed_email(email_factory):
"""
If a bound serializer attempts to change the email address of its
instance it should not be valid.
"""
email = email_factory(email="<EMAIL>")
data = {"email": "<EMAIL>"}
serializer = serializers.EmailSerializer(email, data=data)
assert not serializer.is_valid()
assert set(serializer.errors.keys()) == {"email"}
def test_validate_create_primary():
"""
Attempting to create a primary email address should not be valid. It
should only be valid to mark a verified email address as the primary
unless this is the user's first email.
"""
data = {"email": "<EMAIL>", "is_primary": True}
serializer = serializers.EmailSerializer(data=data)
assert not serializer.is_valid()
assert set(serializer.errors.keys()) == {"is_primary"}
def test_validate_email_lowercase_domain():
"""
The registration serializer should not change an email address with
a lowercase domain.
"""
email = "<EMAIL>"
serializer = serializers.EmailSerializer()
assert serializer.validate_email(email) == email
def test_validate_email_mixed_case_domain():
"""
If the domain portion of the email is mixed case, it should be
converted to lowercase.
"""
email = "<EMAIL>"
expected = "<EMAIL>"
serializer = serializers.EmailSerializer()
assert serializer.validate_email(email) == expected
def test_validate_make_unverified_primary(email_factory):
"""
Attempting to mark an existing but unverified email address as the
primary should not be valid.
"""
email = email_factory(is_primary=False, is_verified=False)
data = {"is_primary": True}
serializer = serializers.EmailSerializer(email, data=data, partial=True)
assert not serializer.is_valid()
assert set(serializer.errors.keys()) == {"is_primary"}
|
71802
|
from __future__ import print_function
import codecs
import logging
import os
import sys
from optparse import OptionParser
from pypugjs.utils import process
def convert_file():
support_compilers_list = [
'django',
'jinja',
'underscore',
'mako',
'tornado',
'html',
]
available_compilers = {}
for i in support_compilers_list:
try:
compiler_class = __import__(
'pypugjs.ext.%s' % i, fromlist=['pypugjs']
).Compiler
except ImportError as e:
logging.warning(e)
else:
available_compilers[i] = compiler_class
usage = "usage: %prog [options] [file [output]]"
parser = OptionParser(usage)
parser.add_option(
"-o", "--output", dest="output", help="Write output to FILE", metavar="FILE"
)
# use a default compiler here to sidestep making a particular
# compiler absolutely necessary (ex. django)
default_compiler = sorted(available_compilers.keys())[0]
parser.add_option(
"-c",
"--compiler",
dest="compiler",
choices=list(available_compilers.keys()),
default=default_compiler,
type="choice",
help=(
"COMPILER must be one of %s, default is %s"
% (', '.join(list(available_compilers.keys())), default_compiler)
),
)
parser.add_option(
"-e",
"--ext",
dest="extension",
help="Set import/extends default file extension",
metavar="FILE",
)
options, args = parser.parse_args()
file_output = options.output or (args[1] if len(args) > 1 else None)
compiler = options.compiler
if options.extension:
extension = '.%s' % options.extension
elif options.output:
extension = os.path.splitext(options.output)[1]
else:
extension = None
if compiler in available_compilers:
import six
if len(args) >= 1:
template = codecs.open(args[0], 'r', encoding='utf-8').read()
elif six.PY3:
template = sys.stdin.read()
else:
template = codecs.getreader('utf-8')(sys.stdin).read()
output = process(
template,
compiler=available_compilers[compiler],
staticAttrs=True,
extension=extension,
)
if file_output:
outfile = codecs.open(file_output, 'w', encoding='utf-8')
outfile.write(output)
elif six.PY3:
sys.stdout.write(output)
else:
codecs.getwriter('utf-8')(sys.stdout).write(output)
else:
raise Exception('You must have %s installed!' % compiler)
if __name__ == '__main__':
convert_file()
|
71809
|
from hachoir_parser.game.zsnes import ZSNESFile
from hachoir_parser.game.spider_man_video import SpiderManVideoFile
from hachoir_parser.game.laf import LafFile
from hachoir_parser.game.blp import BLP1File, BLP2File
from hachoir_parser.game.uasset import UAssetFile
|
71812
|
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from math import ceil
from constants import ENV_NAMES
import seaborn # sets some style parameters automatically
COLORS = [(57, 106, 177), (218, 124, 48)]
def switch_to_outer_plot(fig):
ax0 = fig.add_subplot(111, frame_on=False)
ax0.set_xticks([])
ax0.set_yticks([])
return ax0
def ema(data_in, smoothing=0):
data_out = np.zeros_like(data_in)
curr = np.nan
for i in range(len(data_in)):
x = data_in[i]
if np.isnan(curr):
curr = x
else:
curr = (1 - smoothing) * x + smoothing * curr
data_out[i] = curr
return data_out
def plot_data_mean_std(ax, data_y, color_idx=0, data_x=None, x_scale=1, smoothing=0, first_valid=0, label=None):
color = COLORS[color_idx]
hexcolor = '#%02x%02x%02x' % color
data_y = data_y[:,first_valid:]
nx, num_datapoint = np.shape(data_y)
if smoothing > 0:
for i in range(nx):
data_y[i,...] = ema(data_y[i,...], smoothing)
if data_x is None:
data_x = (np.array(range(num_datapoint)) + first_valid) * x_scale
data_mean = np.mean(data_y, axis=0)
data_std = np.std(data_y, axis=0, ddof=1)
ax.plot(data_x, data_mean, color=hexcolor, label=label, linestyle='solid', alpha=1, rasterized=True)
ax.fill_between(data_x, data_mean - data_std, data_mean + data_std, color=hexcolor, alpha=.25, linewidth=0.0, rasterized=True)
def read_csv(filename, key_name):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
key_index = -1
values = []
for line_num, row in enumerate(csv_reader):
row = [x.lower() for x in row]
if line_num == 0:
idxs = [i for i, val in enumerate(row) if val == key_name]
key_index = idxs[0]
else:
values.append(row[key_index])
return np.array(values, dtype=np.float32)
def plot_values(ax, all_values, title=None, max_x=0, label=None, **kwargs):
if max_x > 0:
all_values = all_values[...,:max_x]
if ax is not None:
plot_data_mean_std(ax, all_values, label=label, **kwargs)
ax.set_title(title)
return all_values
def plot_experiment(run_directory_prefix, titles=None, suffixes=[''], normalization_ranges=None, key_name='eprewmean', **kwargs):
run_folders = [f'{run_directory_prefix}{x}' for x in range(3)]
num_envs = len(ENV_NAMES)
will_normalize_and_reduce = normalization_ranges is not None
if will_normalize_and_reduce:
num_visible_plots = 1
f, axarr = plt.subplots()
else:
num_visible_plots = num_envs
dimx = dimy = ceil(np.sqrt(num_visible_plots))
f, axarr = plt.subplots(dimx, dimy, sharex=True)
for suffix_idx, suffix in enumerate(suffixes):
all_values = []
game_weights = [1] * num_envs
for env_idx in range(num_envs):
env_name = ENV_NAMES[env_idx]
label = suffix if env_idx == 0 else None # only label the first graph to avoid legend duplicates
print(f'loading results from {env_name}...')
if num_visible_plots == 1:
ax = axarr
else:
dimy = len(axarr[0])
ax = axarr[env_idx // dimy][env_idx % dimy]
csv_files = [f"results/{resid}/progress-{env_name}{'-' if len(suffix) > 0 else ''}{suffix}.csv" for resid in run_folders]
curr_ax = None if will_normalize_and_reduce else ax
raw_data = np.array([read_csv(file, key_name) for file in csv_files])
values = plot_values(curr_ax, raw_data, title=env_name, color_idx=suffix_idx, label=label, **kwargs)
if will_normalize_and_reduce:
game_range = normalization_ranges[env_name]
game_min = game_range[0]
game_max = game_range[1]
game_delta = game_max - game_min
sub_values = game_weights[env_idx] * (np.array(values) - game_min) / (game_delta)
all_values.append(sub_values)
if will_normalize_and_reduce:
normalized_data = np.sum(all_values, axis=0)
normalized_data = normalized_data / np.sum(game_weights)
title = 'Mean Normalized Score'
plot_values(ax, normalized_data, title=None, color_idx=suffix_idx, label=suffix, **kwargs)
if len(suffixes) > 1:
if num_visible_plots == 1:
ax.legend(loc='lower right')
else:
f.legend(loc='lower right', bbox_to_anchor=(.5, 0, .5, 1))
return f, axarr
|
71813
|
import torch
from torch import nn, Tensor
from torch.nn import functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, c1, c2, s=1, downsample= None, no_relu=False) -> None:
super().__init__()
self.conv1 = nn.Conv2d(c1, c2, 3, s, 1, bias=False)
self.bn1 = nn.BatchNorm2d(c2)
self.conv2 = nn.Conv2d(c2, c2, 3, 1, 1, bias=False)
self.bn2 = nn.BatchNorm2d(c2)
self.downsample = downsample
self.no_relu = no_relu
def forward(self, x: Tensor) -> Tensor:
identity = x
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.downsample is not None: identity = self.downsample(x)
out += identity
return out if self.no_relu else F.relu(out)
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, c1, c2, s=1, downsample=None, no_relu=False) -> None:
super().__init__()
self.conv1 = nn.Conv2d(c1, c2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(c2)
self.conv2 = nn.Conv2d(c2, c2, 3, s, 1, bias=False)
self.bn2 = nn.BatchNorm2d(c2)
self.conv3 = nn.Conv2d(c2, c2 * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(c2 * self.expansion)
self.downsample = downsample
self.no_relu = no_relu
def forward(self, x: Tensor) -> Tensor:
identity = x
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.downsample is not None: identity = self.downsample(x)
out += identity
return out if self.no_relu else F.relu(out)
class ConvBN(nn.Sequential):
def __init__(self, c1, c2, k, s=1, p=0):
super().__init__(
nn.Conv2d(c1, c2, k, s, p, bias=False),
nn.BatchNorm2d(c2)
)
class Conv2BN(nn.Sequential):
def __init__(self, c1, ch, c2, k, s=1, p=0):
super().__init__(
nn.Conv2d(c1, ch, k, s, p, bias=False),
nn.BatchNorm2d(ch),
nn.ReLU(),
nn.Conv2d(ch, c2, k, s, p, bias=False),
nn.BatchNorm2d(c2)
)
class Stem(nn.Sequential):
def __init__(self, c1, c2):
super().__init__(
nn.Conv2d(c1, c2, 3, 2, 1),
nn.BatchNorm2d(c2),
nn.ReLU(),
nn.Conv2d(c2, c2, 3, 2, 1),
nn.BatchNorm2d(c2),
nn.ReLU()
)
class Scale(nn.Sequential):
def __init__(self, c1, c2, k, s=None, p=0):
super().__init__(
nn.AvgPool2d(k, s, p, ),
nn.BatchNorm2d(c1),
nn.ReLU(),
nn.Conv2d(c1, c2, 1, bias=False)
)
class ConvModule(nn.Sequential):
def __init__(self, c1, c2, k, s=1, p=0):
super().__init__(
nn.BatchNorm2d(c1),
nn.ReLU(),
nn.Conv2d(c1, c2, k, s, p, bias=False)
)
class DAPPM(nn.Module):
def __init__(self, c1, ch, c2):
super().__init__()
self.scale1 = Scale(c1, ch, 5, 2, 2)
self.scale2 = Scale(c1, ch, 9, 4, 4)
self.scale3 = Scale(c1, ch, 17, 8, 8)
self.scale4 = Scale(c1, ch, 1)
self.scale0 = ConvModule(c1, ch, 1)
self.process1 = ConvModule(ch, ch, 3, 1, 1)
self.process2 = ConvModule(ch, ch, 3, 1, 1)
self.process3 = ConvModule(ch, ch, 3, 1, 1)
self.process4 = ConvModule(ch, ch, 3, 1, 1)
self.compression = ConvModule(ch*5, c2, 1)
self.shortcut = ConvModule(c1, c2, 1)
def forward(self, x: Tensor) -> Tensor:
outs = [self.scale0(x)]
outs.append(self.process1((F.interpolate(self.scale1(x), size=x.shape[-2:], mode='bilinear', align_corners=True) + outs[-1])))
outs.append(self.process2((F.interpolate(self.scale2(x), size=x.shape[-2:], mode='bilinear', align_corners=True) + outs[-1])))
outs.append(self.process3((F.interpolate(self.scale3(x), size=x.shape[-2:], mode='bilinear', align_corners=True) + outs[-1])))
outs.append(self.process4((F.interpolate(self.scale4(x), size=x.shape[-2:], mode='bilinear', align_corners=True) + outs[-1])))
out = self.compression(torch.cat(outs, dim=1)) + self.shortcut(x)
return out
class SegHead(nn.Module):
def __init__(self, c1, ch, c2, scale_factor=None):
super().__init__()
self.bn1 = nn.BatchNorm2d(c1)
self.conv1 = nn.Conv2d(c1, ch, 3, 1, 1, bias=False)
self.bn2 = nn.BatchNorm2d(ch)
self.conv2 = nn.Conv2d(ch, c2, 1)
self.scale_factor = scale_factor
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(F.relu(self.bn1(x)))
x = self.conv2(F.relu(self.bn2(x)))
if self.scale_factor is not None:
H, W = x.shape[-2] * self.scale_factor, x.shape[-1] * self.scale_factor
x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)
return x
class DDRNet(nn.Module):
def __init__(self, backbone: str = None, num_classes: int = 19) -> None:
super().__init__()
planes, spp_planes, head_planes = [32, 64, 128, 256, 512], 128, 64
self.conv1 = Stem(3, planes[0])
self.layer1 = self._make_layer(BasicBlock, planes[0], planes[0], 2)
self.layer2 = self._make_layer(BasicBlock, planes[0], planes[1], 2, 2)
self.layer3 = self._make_layer(BasicBlock, planes[1], planes[2], 2, 2)
self.layer4 = self._make_layer(BasicBlock, planes[2], planes[3], 2, 2)
self.layer5 = self._make_layer(Bottleneck, planes[3], planes[3], 1, 2)
self.layer3_ = self._make_layer(BasicBlock, planes[1], planes[1], 2)
self.layer4_ = self._make_layer(BasicBlock, planes[1], planes[1], 2)
self.layer5_ = self._make_layer(Bottleneck, planes[1], planes[1], 1)
self.compression3 = ConvBN(planes[2], planes[1], 1)
self.compression4 = ConvBN(planes[3], planes[1], 1)
self.down3 = ConvBN(planes[1], planes[2], 3, 2, 1)
self.down4 = Conv2BN(planes[1], planes[2], planes[3], 3, 2, 1)
self.spp = DAPPM(planes[-1], spp_planes, planes[2])
self.seghead_extra = SegHead(planes[1], head_planes, num_classes, 8)
self.final_layer = SegHead(planes[2], head_planes, num_classes, 8)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_pretrained(self, pretrained: str = None) -> None:
if pretrained:
self.load_state_dict(torch.load(pretrained, map_location='cpu'), strict=False)
def _make_layer(self, block, inplanes, planes, depths, s=1) -> nn.Sequential:
downsample = None
if inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion, 1, s, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = [block(inplanes, planes, s, downsample)]
inplanes = planes * block.expansion
for i in range(1, depths):
if i == depths - 1:
layers.append(block(inplanes, planes, no_relu=True))
else:
layers.appned(block(inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x: Tensor) -> Tensor:
H, W = x.shape[-2] // 8, x.shape[-1] // 8
layers = []
x = self.conv1(x)
x = self.layer1(x)
layers.append(x)
x = self.layer2(F.relu(x))
layers.append(x)
x = self.layer3(F.relu(x))
layers.append(x)
x_ = self.layer3_(F.relu(layers[1]))
x = x + self.down3(F.relu(x_))
x_ = x_ + F.interpolate(self.compression3(F.relu(layers[2])), size=(H, W), mode='bilinear', align_corners=True)
if self.training: x_aux = self.seghead_extra(x_)
x = self.layer4(F.relu(x))
layers.append(x)
x_ = self.layer4_(F.relu(x_))
x = x + self.down4(F.relu(x_))
x_ = x_ + F.interpolate(self.compression4(F.relu(layers[3])), size=(H, W), mode='bilinear', align_corners=True)
x_ = self.layer5_(F.relu(x_))
x = F.interpolate(self.spp(self.layer5(F.relu(x))), size=(H, W), mode='bilinear', align_corners=True)
x_ = self.final_layer(x + x_)
return (x_, x_aux) if self.training else x_
if __name__ == '__main__':
model = DDRNet()
# model.init_pretrained('checkpoints/backbones/ddrnet/ddrnet_23slim.pth')
# model.load_state_dict(torch.load('checkpoints/pretrained/ddrnet/ddrnet_23slim_city.pth', map_location='cpu'))
x = torch.zeros(2, 3, 224, 224)
outs = model(x)
for y in outs:
print(y.shape)
|
71846
|
from datetime import datetime
from datetime import timezone
from uuid import UUID
from bson import ObjectId
UUID_1_EPOCH = datetime(1582, 10, 15, tzinfo=timezone.utc)
UUID_TICKS = 10000000
UUID_VARIANT_1 = 0b1000000000000000
def is_uuid(candidate):
"""Determine if this is a uuid"""
try:
UUID(candidate)
return True
except ValueError:
return False
def validate_uuid(function):
def validate(**kwargs):
candidate = kwargs.get("id_")
if not is_uuid(candidate):
return f"ID: {candidate} is not a valid UUID", 400
else:
return function(**kwargs)
return validate
def convert_objectid_to_uuid(object_id):
"""Convert an ObjectId to a UUID"""
if isinstance(object_id, str) and not is_uuid(object_id) and ObjectId.is_valid(object_id):
object_id = ObjectId(object_id)
if not isinstance(object_id, ObjectId):
return object_id
unix_time = object_id.generation_time.astimezone(timezone.utc)
hex_string = str(object_id)
counter = int(hex_string[18:], 16)
uuid_time = "1{:015x}".format(
int((unix_time + (unix_time - UUID_1_EPOCH)).timestamp() * UUID_TICKS)
)
uuid_clock = "{:04x}".format(UUID_VARIANT_1 | (counter & 0x3FFF))
uuid_node = "1" + hex_string[8:18].rjust(11, "0")
string_uuid = "{}-{}-{}-{}-{}".format(
uuid_time[-8:], uuid_time[4:8], uuid_time[:4], uuid_clock, uuid_node
)
converted_uuid = UUID(string_uuid)
return str(converted_uuid)
|
71897
|
from matplotlib.offsetbox import AnchoredOffsetbox, AuxTransformBox, VPacker,\
TextArea, AnchoredText, DrawingArea, AnnotationBbox
from mpl_toolkits.axes_grid1.anchored_artists import \
AnchoredDrawingArea, AnchoredAuxTransformBox, \
AnchoredEllipse, AnchoredSizeBar
|
72068
|
import os
import shutil
import subprocess
import sys
import re
import glob
from colors import prGreen,prCyan,prRed
TRACES_DIR = './.fpchecker/traces'
TRACES_FILES = TRACES_DIR+'/'+'trace'
STRACE = 'strace'
SUPPORTED_COMPILERS = set([
'nvcc',
'c++',
'cc',
'gcc',
'g++',
'xlc',
'xlC',
'xlc++',
'xlc_r',
'xlc++_r',
'mpic',
'mpic++',
'mpicxx',
'mpicc',
'mpixlc',
'mpixlC',
'mpixlf',
'mpif77',
'mpif90',
'clang',
'clang++',
'gfortran',
'xlf',
'xlf-gpu',
'xlf2003',
'xlf2003-gpu',
'xlf2003_r',
'xlf2003_r-gpu',
'xlf2008',
'xlf2008-gpu',
'xlf2008_r',
'xlf2008_r-gpu',
'xlf90',
'xlf90-gpu',
'xlf90_r',
'xlf90_r-gpu',
'xlf95',
'xlf95-gpu',
'xlf95_r',
'xlf95_r-gpu',
'xlf_r',
'xlf_r-gpu'
])
SUPPORTED_TOOLS = set([
'ar',
'ranlib',
'bin2c'
])
# Examples of top commands
# [pid 83362] execve("/usr/tce/packages/cuda/cuda-9.2.148/bin/nvcc",
# [pid 63885] execve("/bin/sh", ["/bin/sh", "-c", "cd /usr/workspace/wsa/laguna/fpchecker/FPChecker/tests/tracing_tool/dynamic/test_cmake_simple/build/src/util && /usr/tcetmp/bin/c++ -o CMakeFiles/util.dir/util.cpp.o -c /usr/workspace/wsa/laguna/fpchecker/FPChecker/tests/tracing_tool/dynamic/test_cmake_simple/src/util/util.cpp"]
# Saves Compilation commands
class CommandsTracing:
#open("/usr/tcetmp/packages/spack/opt/spack/linux-redhat7-ppc64le/gcc-4.8.5/gcc-4.9.3-3clrxj5wz2i54h
#[pid 8690] execve("/usr/tcetmp/bin/c++", ["/usr/tcetmp/bin/c++", "CMakeFiles/main.dir/src/main.cpp.o", "-o", "main"]
pidPattern = re.compile("^\[pid\s+[0-9]+\] ")
# clone(child_stack=NULL, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0x200000044f60) = 55734
# vfork() = 55729
childSpawn_clone = re.compile("^clone\(.+=\s+[0-9]+")
childSpawn_fork = re.compile("^vfork\(\).+=\s+[0-9]+")
# Chdir call
# chdir("/usr/workspace/wsa/laguna/fpchecker/clang_tool/wrapper/apps/RAJA_perf/RAJAPerf/build_ilaguna_build/tpl/RAJA") = 0
chdirPattern = re.compile("^chdir\(.+\s+=\s+[0-9]+")
# Fork from root:
# vfork(strace: Process 22625 attached
# Other forks:
# [pid 95927] stat("/usr/gapps/resmpi/llvm/ppc64le/llvm-openmp-trunk-install/lib/tls/power9/altivec", strace: Process 95932 attached
# [pid 22631] vfork(strace: Process 22634 attached
# [pid 78391] clone(child_stack=NULL, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0x200000044f60) = 78392
# [pid 86430] clone(strace: Process 86431 attached
#attachPattern1 = re.compile("vfork\(strace\:\s+Process\s+[0-9]+\s+attached")
#attachPattern_clone = re.compile("clone\(.+=\s+[0-9]+")
#attachPattern_attach = re.compile("Process\s+[0-9]+\s+attached")
# Process creation patterns:
# We trace vfork() and clone()
#[pid 69813] clone(child_stack=NULL, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0x200000044f60) = 69814
# [pid 129570] <... clone resumed>child_stack=NULL, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0x200000044f60) = 129601
#[pid 69807] <... vfork resumed>) = 69808
childCreationPattern_clone_1 = re.compile("^\[pid\s+[0-9]+\] clone\(.+=\s+[0-9]+")
childCreationPattern_clone_2 = re.compile("^\[pid\s+[0-9]+\] \<\.\.\. clone resumed\>.+=\s+[0-9]+")
childCreationPattern_fork = re.compile("^\[pid\s+[0-9]+\] .+vfork.+=\s+[0-9]+")
readPattern = re.compile("^\[pid\s+[0-9]+\] read\(")
writePattern = re.compile("^\[pid\s+[0-9]+\] write\(")
def __init__(self, make_command):
self.traced_commands = []
self.make_command = make_command
self.childTree = {}
self.parentTree = {}
self.tracedPIDs = set([])
def getTracesDir(self):
return TRACES_DIR
def isChildSpawn(self, line):
child_fork = self.childSpawn_fork.search(line)
child_clone = self.childSpawn_clone.search(line)
pid = None
if child_fork != None or child_clone != None:
pid = line.split()[-1:][0]
return pid
def isMakeCommand(self, line):
ret = False
if "execve(\"" in line:
# execve("/usr/tcetmp/bin/make", ["make", "-j"], 0x7fffffffb780 /* 128 vars */) = 0
cmd = line.split(', [')[1].split('], ')[0]
cmd = cmd.replace('"','')
cmd = cmd.replace(',','')
cmd = cmd.split()
#print(cmd, self.make_command)
if cmd == self.make_command:
return True
return ret
def getRootFile(self):
# Find root file
files = glob.glob(TRACES_DIR+'/trace.*')
root_file = ''
for f in files:
#print('Checking', f)
with open(f) as fd:
first_line = fd.readline()
if self.isMakeCommand(first_line):
root_file = f
break
#print('Root file', root_file)
if root_file == '':
prRed('Error: root file not found')
exit(-1)
return root_file
# Check if it is a chdir() system call
# chdir("/usr/workspace/wsa/laguna/fpchecker/clang_tool/wrapper/apps/RAJA_perf/RAJAPerf/build_ilaguna_build/tpl/RAJA") = 0
def isChangeDir(self, line):
chdir_found = self.chdirPattern.search(line)
newDir = None
if chdir_found != None:
if line.split()[2] == '0': # check it ends with 0
newDir = line
return newDir
# Old implementation of recursive search
# It has a bug on the cwd (it's kept for any recent process)
# We want to unset the cwd once the process examination exits
#
# def recursiveTreeTraversal(self, fileName):
# with open(fileName) as fd:
# for line in fd:
# # Save current dir
# cwd = self.isChangeDir(line)
# if cwd != None:
# print('Found chdir: ', cwd, 'file:', fileName)
# self.currentWorkingDir = cwd
#
# # Check if it's a top command, and it if so
# topCmd = self.isTopCommand(line)
# if topCmd != None:
# # Add CWD and command
# print('Adding:')
# print('self.currentWorkingDir: ', self.currentWorkingDir)
# print('line:', line)
# self.traced_commands.append((self.currentWorkingDir, line))
# return
#
# # Check if child is created
# childPID = self.isChildSpawn(line)
# if childPID != None:
# childFileName = TRACES_DIR + '/trace.' + childPID
# self.recursiveTreeTraversal(childFileName)
def recursiveTreeTraversal(self, fileName, chdirCmd):
lastSeenCHDIR = chdirCmd
with open(fileName) as fd:
for line in fd:
# Save current dir
cwd = self.isChangeDir(line)
if cwd != None:
lastSeenCHDIR = cwd
# Check if it's a top command, and it if so
topCmd = self.isTopCommand(line)
if topCmd != None:
# Add CWD and command
self.traced_commands.append((lastSeenCHDIR, line))
return
# Check if child is created
childPID = self.isChildSpawn(line)
if childPID != None:
childFileName = TRACES_DIR + '/trace.' + childPID
self.recursiveTreeTraversal(childFileName, lastSeenCHDIR)
def analyzeTraces(self):
#prveTreeTraversal(root_file)
prGreen('Searching root PID...')
root_file = self.getRootFile()
print('root:', root_file)
prGreen('Analyzing traces...')
self.recursiveTreeTraversal(root_file, '')
def getProcessID(self, line):
p = self.pidPattern.match(line)
#print('match', p)
if p != None:
pid = line.split()[1].split(']')[0]
else:
pid = 'root'
return pid
def buildChildTree(self, line):
pid = self.getProcessID(line)
child = None
child_clone_1 = self.childCreationPattern_clone_1.search(line)
child_clone_2 = self.childCreationPattern_clone_2.search(line)
child_fork = self.childCreationPattern_fork.search(line)
read_pattern = self.readPattern.search(line)
write_pattern = self.writePattern.search(line)
if child_clone_1 != None and read_pattern == None and write_pattern == None:
child = line.split()[-1:][0]
elif child_clone_2 != None and read_pattern == None and write_pattern == None:
child = line.split()[-1:][0]
elif child_fork != None and read_pattern == None and write_pattern == None:
child = line.split()[-1:][0]
if child != None: # found child creation
if pid not in self.childTree:
self.childTree[pid] = [child]
else:
self.childTree[pid].append(child)
self.parentTree[child] = pid
if pid in self.tracedPIDs:
self.tracedPIDs.add(child)
def isASupportedCompiler(self, line):
for compiler in SUPPORTED_COMPILERS:
if line.endswith('/'+compiler): #or line == compiler:
return True
for tool in SUPPORTED_TOOLS:
if line.endswith('/'+tool):
return True
return False
# If it's a top command we do not trace their child commands
def isTopCommand(self, line):
baseExecutable = None
if "execve(\"" in line:
strCmd = line.split('execve(')[1].split(',')[0]
# Shell command
if strCmd.endswith('/sh"'):
cmd = line.split('["')[1].split(']')[0]
cmd = cmd.replace(', ','')
cmd = cmd.replace('"', '')
tokens = cmd.split()
for t in tokens:
if self.isASupportedCompiler(t):
baseExecutable = ' '.join(tokens)
strCmd = strCmd.replace('"', '')
if self.isASupportedCompiler(strCmd):
baseExecutable = strCmd
return baseExecutable
# [pid 78395] write(1, "[ 33%] Linking CXX static library libutil.a\n", 44[ 33%] Linking CXX static library libutil.a
def printStdOut(self, line):
if 'write(1' in line:
if 'Building' in line or 'Linking' in line:
l = line.split(', ')[1].replace('"','')
prGreen(l)
def saveCompilingCommands(self, l):
#l = line.decode('utf-8')
pid = self.getProcessID(l)
cmd = self.isTopCommand(l)
if cmd != None:
if pid not in self.tracedPIDs:
self.tracedPIDs.add(pid)
self.traced_commands.append(l)
#print('-->', cmd)
self.buildChildTree(l)
self.printStdOut(l)
# Check if the command invokes chaning directories
# If not, we change to the CWD
def commandIvokesChangeDir(self, line):
tokens = line.split()
if 'cd' in tokens:
idx = tokens.index('cd')
path = tokens[idx+1]
if os.path.exists(path):
return True
return False
def formatCommandForExecution(self, cwd, line):
if line.startswith('execve('):
line = line.split(', [')[1:]
line = ' '.join(line).split(']')[0]
line = line.replace(', ',' ')
line = line.replace('"', '')
line = line.replace('\\', '')
# Split commands if needed
allCommands = re.split('\&\&|\;', line)
newCommand = []
for cmd in allCommands:
if '/sh -c' in cmd:
cmd = ' '.join(cmd.split()[2:]) # remove /bin/sh -c
if '-E ' in cmd: # Remove commands that only run the preprocessor with -E
continue
if not self.commandIvokesChangeDir(line):
cmd = 'cd ' + cwd + ' && ' + cmd
newCommand.append(cmd)
line = ' && '.join(newCommand)
return line
def writeToFile(self):
fileNameRaw = TRACES_DIR + '/raw_traces.txt'
prGreen('Saving raw traces in '+fileNameRaw)
fd = open(fileNameRaw, 'w')
for line in self.traced_commands:
fd.write(str(line)+'\n')
fd.close()
fileNameExec = TRACES_DIR + '/executable_traces.txt'
prGreen('Saving executable traces in '+fileNameExec)
fd = open(fileNameExec, 'w')
for l in self.traced_commands:
#line = l[1]
cwd, line = l
#if l[0] != '':
# cwd = l[0].split('"')[1]
#else:
# cwd = '.'
if cwd != '':
cwd = cwd.split('"')[1]
else:
cwd = '.'
line = self.formatCommandForExecution(cwd, line)
fd.write(line+'\n')
fd.close()
def replayTraces(self, fileName):
fd = open(fileName, 'r')
for line in fd:
self.saveCompilingCommands(line)
fd.close()
def createTracesDir(self):
if os.path.exists(TRACES_DIR):
shutil.rmtree(TRACES_DIR)
os.makedirs(TRACES_DIR)
def startTracing(self):
self.createTracesDir()
trace_command = [STRACE, '-o', TRACES_FILES, '-ff', '-s', '9999'] + self.make_command
process = subprocess.Popen(trace_command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Poll process for new output until finished
c = 0
while True:
nextline = process.stdout.readline()
#nextline = process.stderr.readline()
if process.poll() is not None or nextline.decode('utf-8') == '':
break
l = nextline.decode('utf-8')[:-1]
print(l)
#self.saveCompilingCommands(l)
#fd.write(l)
(stdout_data, stderr_data) = process.communicate()
exitCode = process.returncode
if (exitCode == 0):
return (stdout_data, stderr_data)
else:
sys.exit('Error in input: ' + str(self.make_command))
if __name__ == '__main__':
#l = 'execve("/usr/tce/packages/cuda/cuda-9.2.148/bin/nvcc", ["/usr/tce/packages/cuda/cuda-9.2.148/bin/nvcc", "-ccbin=clang++", "-restrict", "-gencode=arch=compute_70,code=sm_70", "-O3", "--expt-extended-lambda", "-Xcompiler=-fPIC", "-Wno-deprecated-gpu-targets", "-shared", "-dlink", "CMakeFiles/kripke.exe.dir/src/kripke.cpp.o", "-o", "CMakeFiles/kripke.exe.dir/cmake_device_link.o", "-L/usr/tce/packages/cuda/cuda-9.2.148/nvidia/targets/ppc64le-linux/lib/stubs", "-L/usr/tce/packages/cuda/cuda-9.2.148/nvidia/targets/ppc64le-linux/lib", "lib/libchai.a", "lib/libRAJA.a", "/usr/tce/packages/cuda/cuda-9.2.148/lib64/libcudart_static.a", "-lpthread", "-ldl", "lib/libkripke.a", "lib/libumpire.a", "-lcudadevrt", "-lcudart_static", "-lrt"], 0x7fffffffb8b8 /* 129 vars */) = 0\n'
#strace = CommandsTracing(['make', '-j'])
#ret = strace.isTopCommand(l)
#print(l)
#print('ret:', ret)
#exit()
#strace.analyzeTraces()
#strace.traced_commands.append(('', l))
#strace.writeToFile()
#exit()
cmd = sys.argv[1:]
strace = CommandsTracing(cmd)
#strace.startTracing()
strace.analyzeTraces()
strace.writeToFile()
|
72070
|
from .cifar import Cifar10DataProvider, Cifar100DataProvider, \
Cifar10AugmentedDataProvider, Cifar100AugmentedDataProvider
from .svhn import SVHNDataProvider
def get_data_provider_by_name(name, train_params):
"""Return required data provider class"""
if name == 'C10':
return Cifar10DataProvider(**train_params)
if name == 'C10+':
return Cifar10AugmentedDataProvider(**train_params)
if name == 'C100':
return Cifar100DataProvider(**train_params)
if name == 'C100+':
return Cifar100AugmentedDataProvider(**train_params)
if name == 'SVHN':
return SVHNDataProvider(**train_params)
else:
print("Sorry, data provider for `%s` dataset "
"was not implemented yet" % name)
exit()
|
72100
|
class Socks5Error(Exception):
pass
class NoVersionAllowed(Socks5Error):
pass
class NoCommandAllowed(Socks5Error):
pass
class NoATYPAllowed(Socks5Error):
pass
class AuthenticationError(Socks5Error):
pass
class NoAuthenticationAllowed(AuthenticationError):
pass
|
72115
|
import os
import sys
import cv2
import numpy as np
from imageio import imread
import json
import argparse
import visualization.visualizer.shader
from PyQt5 import QtWidgets, QtGui, QtOpenGL
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
import PyQt5.QtCore as QtCore
import glm
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from visualization.visualizer.Viewer import Utils
from visualization.visualizer.Viewer import LayoutView
class TopWindow(QMainWindow):
def __init__(self, img, layout, floor_reverse=False, parent=None):
super().__init__(parent)
sizeObject = QtWidgets.QDesktopWidget().screenGeometry(-1)
[self.h, self.w] = [sizeObject.height(), sizeObject.width()]
ratio = 0.9
self.h = int(self.h * ratio)
self.w = int(self.w * ratio)
self.setGeometry(20, 60, self.w, self.h)
self.setWindowTitle("Layout Visualizer")
self.centeralWidget = QWidget(self)
self.layout = layout
self.LayoutViewer = LayoutView.GLWindow(img, main=self, parent=self.centeralWidget)
wallNum, wallPoints, lines, mesh = Utils.Label2Mesh(Utils.OldFormat2Mine(self.layout), floor_reverse)
self.LayoutViewer.updateLayoutMesh(wallNum, wallPoints, lines, mesh)
layout = QGridLayout()
layout.setRowStretch(0, 1)
layout.setColumnStretch(0, 1)
layout.addWidget(self.LayoutViewer, 0, 0, 1, 1)
self.centeralWidget.setLayout(layout)
self.setCentralWidget(self.centeralWidget)
def enterEvent(self, event):
self.setFocus(True)
def visualize_3d(layout, img):
app = QtWidgets.QApplication(sys.argv)
window = TopWindow(img, layout=layout)
window.show()
# cv2.waitKey()
sys.exit(app.exec_())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='360 Layout Visualizer',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--img', type=str, required=True, help='The panorama path')
parser.add_argument('--json', type=str, required=True, help='The output json path')
args = parser.parse_args()
img = imread(args.img, pilmode='RGB')
with open(args.json, 'r') as f:
layout = json.load(f)
visualize_3d(layout, img)
|
72127
|
import unittest
try:
import torch_butterfly
BUTTERFLY = True
except ImportError:
BUTTERFLY = False
class TestCase(unittest.TestCase):
def test(self, butterfly=False):
pass
@unittest.skipIf(not BUTTERFLY, "torch_butterfly not found")
def test_butterfly(self, **kwargs):
self.test(butterfly=True, **kwargs)
|
72138
|
import logging
class LogHelper():
handler = None
@staticmethod
def setup():
FORMAT = '[%(levelname)s] %(asctime)s - %(name)s - %(message)s'
LogHelper.handler = logging.StreamHandler()
LogHelper.handler.setLevel(logging.DEBUG)
LogHelper.handler.setFormatter(logging.Formatter(FORMAT))
LogHelper.get_logger(LogHelper.__name__).info("Log Helper set up")
@staticmethod
def get_logger(name,level=logging.DEBUG):
l = logging.getLogger(name)
l.setLevel(level)
l.addHandler(LogHelper.handler)
return l
|
72160
|
import logging
import platform
from unittest.mock import Mock
from sanic import __version__
from sanic.application.logo import BASE_LOGO
from sanic.application.motd import MOTDTTY
def test_logo_base(app, run_startup):
logs = run_startup(app)
assert logs[0][1] == logging.DEBUG
assert logs[0][2] == BASE_LOGO
def test_logo_false(app, run_startup):
app.config.LOGO = False
logs = run_startup(app)
banner, port = logs[1][2].rsplit(":", 1)
assert logs[0][1] == logging.INFO
assert banner == "Goin' Fast @ http://127.0.0.1"
assert int(port) > 0
def test_logo_true(app, run_startup):
app.config.LOGO = True
logs = run_startup(app)
assert logs[0][1] == logging.DEBUG
assert logs[0][2] == BASE_LOGO
def test_logo_custom(app, run_startup):
app.config.LOGO = "My Custom Logo"
logs = run_startup(app)
assert logs[0][1] == logging.DEBUG
assert logs[0][2] == "My Custom Logo"
def test_motd_with_expected_info(app, run_startup):
logs = run_startup(app)
assert logs[1][2] == f"Sanic v{__version__}"
assert logs[3][2] == "mode: debug, single worker"
assert logs[4][2] == "server: sanic"
assert logs[5][2] == f"python: {platform.python_version()}"
assert logs[6][2] == f"platform: {platform.platform()}"
def test_motd_init():
_orig = MOTDTTY.set_variables
MOTDTTY.set_variables = Mock()
motd = MOTDTTY(None, "", {}, {})
motd.set_variables.assert_called_once()
MOTDTTY.set_variables = _orig
def test_motd_display(caplog):
motd = MOTDTTY(" foobar ", "", {"one": "1"}, {"two": "2"})
with caplog.at_level(logging.INFO):
motd.display()
version_line = f"Sanic v{__version__}".center(motd.centering_length)
assert (
"".join(caplog.messages)
== f"""
ββββββββββββββββββββββββββββββββββ
β {version_line} β
β β
βββββββββββββββββββββββββ¬βββββββββ€
β foobar β one: 1 β
| ββββββββββ€
β β two: 2 β
βββββββββββββββββββββββββ΄βββββββββ
"""
)
|
72167
|
import json
import sqlite3
from scrapekit.logs import log_path
conn = sqlite3.connect(':memory:')
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def log_parse(scraper):
path = log_path(scraper)
with open(path, 'r') as fh:
for line in fh:
data = json.loads(line)
if data.get('scraperName') != scraper.name:
continue
yield data
def load(scraper):
conn.row_factory = dict_factory
conn.execute("""CREATE TABLE IF NOT EXISTS log (scraperId text,
taskName text, scraperStartTime datetime, asctime text,
levelname text, taskId text)""")
conn.commit()
for data in log_parse(scraper):
conn.execute("""INSERT INTO log (scraperId, taskName,
scraperStartTime, asctime, levelname, taskId) VALUES
(?, ?, ?, ?, ?, ?)""",
(data.get('scraperId'), data.get('taskName'),
data.get('scraperStartTime'), data.get('asctime'),
data.get('levelname'), data.get('taskId')))
conn.commit()
def query(sql, **kwargs):
rp = conn.execute(sql, kwargs)
for row in rp.fetchall():
yield row
|
72177
|
import os
from flask import g, request, jsonify, make_response
from flask_sqlalchemy import SQLAlchemy
from .app import app
from .conf import CONFIG
from .utils import get_allowed_service
from DeviceManager.Logger import Log
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = CONFIG.get_db_url()
app.config['SQLALCHEMY_BINDS'] = {}
LOGGER = Log().color_log()
# adapted from https://gist.github.com/miikka/28a7bd77574a00fcec8d
class MultiTenantSQLAlchemy(SQLAlchemy):
def check_binds(self, bind_key):
binds = app.config.get('SQLALCHEMY_BINDS')
if binds.get(bind_key, None) is None:
binds[bind_key] = CONFIG.get_db_url()
app.config['SQLALCHEMY_BINDS'] = binds
def choose_tenant(self, bind_key):
if hasattr(g, 'tenant'):
raise RuntimeError('Switching tenant in the middle of the request.')
g.tenant = bind_key
def get_engine(self, app=None, bind=None):
if bind is None:
if not hasattr(g, 'tenant'):
raise RuntimeError('No tenant chosen.')
bind = g.tenant
self.check_binds(bind)
return super().get_engine(app=app, bind=bind)
SINGLE_TENANT = os.environ.get('SINGLE_TENANT', False)
if SINGLE_TENANT:
db = SQLAlchemy(app)
else:
db = MultiTenantSQLAlchemy(app)
@app.before_request
def before_request():
try:
tenant = get_allowed_service(request.headers['authorization'])
db.choose_tenant(tenant)
except KeyError:
error = {"message": "No authorization token has been supplied", "status": 401}
LOGGER.error(f' {error["message"]} - {error["status"]}.')
return make_response(jsonify(error), 401)
except ValueError:
error = {"message": "Invalid authentication token", "status": 401}
LOGGER.error(f' {error["message"]} - {error["status"]}.')
return make_response(jsonify(error), 401)
|
72192
|
import numpy as np
class Renderer:
def __init__(self, height, width, config):
self.height = height
self.width = width
self.content = None
self.zbuffer = None
self.m = None
self.f = 1.0
self.resize(height, width)
self.colors = config.colors
self.bonds = config.bonds
self.btoggle = len(self.bonds) > 0
self.pos, self.sym = np.array(config.coordinates), config.symbols
self.ztoggle = True
self.zoom = 1.0
self.rot = np.identity(3)
self.rotcounter = [0, 0, 0]
self.draw_scene()
def draw_scene(self):
"""
A super simple rasterizer. For now, just draw single character atom symbols at their rounded x and y
positions.
:return: True if nothing bad happened.
"""
mx, my = self.m
rot = np.matmul(self.pos, self.rot)
self.clear()
# Draw bonds
for bond in self.bonds:
i, j = bond
# if bond is (i, j) with i == j, just draw the label (no bonds)
if i == j:
x, y, z = rot[i]
xp, yp = round(float(x) * self.f * self.zoom + mx), round(float(y) * self.zoom + my)
if 1 < xp < self.width - 2 and 1 < yp < self.height - 3 and float(z) < self.zbuffer[yp][xp]:
self.zbuffer[yp][xp] = float(z)
self.content[yp][xp] = self.sym[i][0].upper() + "," + self.colors[self.sym[i].upper()]
# else draw the bond with the labels at the end points
else:
# Draw the two labels at the end points
xa, ya, za = rot[i]
xa = float(xa) * self.f * self.zoom + mx
ya = float(ya) * self.zoom + my
xb, yb, zb = rot[j]
xb = float(xb) * self.f * self.zoom + mx
yb = float(yb) * self.zoom + my
xap, yap = round(xa), round(ya)
xbp, ybp = round(xb), round(yb)
if 1 < xap < self.width - 2 and 1 < yap < self.height - 3 and float(za) < self.zbuffer[yap][xap]:
self.zbuffer[yap][xap] = float(za)
self.content[yap][xap] = self.sym[i][0].upper() + "," + self.colors[self.sym[i].upper()]
if 1 < xbp < self.width - 2 and 1 < ybp < self.height - 3 and float(zb) < self.zbuffer[ybp][xbp]:
self.zbuffer[ybp][xbp] = float(zb)
self.content[ybp][xbp] = self.sym[j][0].upper() + "," + self.colors[self.sym[j].upper()]
if not self.btoggle:
continue
# Then start at xap+1 and go to xbp-1, drawing line segments
sy = -1 if ya > yb else 1
sx = -1 if xa > xb else 1
sz = -1 if za > zb else 1
dx = float((xb - xa) / (yb - ya)) if abs(yb - ya) > 0 else 0
dy = float((yb - ya) / (xb - xa)) if abs(xb - xa) > 0 else 0
dz = float((zb - za) / (xb - xa)) if abs(xb - xa) > 0 else 0
if abs(dy) <= 1:
for k in range(1, abs(xap - xbp)):
xk = xap + sx * k
yk = round(float(ya) + sx * k * dy)
zk = round((float(za) + sz * k * dz))
if 1 < xk < self.width - 2 and 1 < yk < self.height - 3 and float(zk) < \
self.zbuffer[yk][xk]:
col = self.colors[self.sym[i].upper()] if k < abs(xap - xbp) / 2 else self.colors[
self.sym[j].upper()]
self.zbuffer[yk][xk] = float(zk)
self.content[yk][xk] = "Β·,%s" % col
else:
for k in range(1, abs(yap - ybp)):
xk = round((float(xa) + sy * k * dx))
yk = yap + sy * k
zk = round((float(za) + sz * k * dz))
if 1 < xk < self.width - 2 and 1 < yk < self.height - 3 and float(zk) < \
self.zbuffer[yk][xk]:
col = self.colors[self.sym[i].upper()] if k < abs(yap - ybp) / 2 else self.colors[
self.sym[j].upper()]
self.zbuffer[yk][xk] = float(zk)
self.content[yk][xk] = "Β·,%s" % col
return True
def rotate(self, direction):
"""
Set an internal rotation matrix that is applied to the coordinates before every render.
:param direction: 1 and -1 are x and -x, 2 is either z/y, depending on whether the ztoggle is active or not
"""
if direction == 1:
self.rot = np.matmul(self.rot, [[1.0, 0.0, 0.0], [0.0, 0.9962, -0.0872], [0.0, 0.0872, 0.9962]])
if self.rotcounter[0] + 5 > 360:
self.rotcounter[0] = 0
self.rotcounter[0] += 5
elif direction == -1:
self.rot = np.matmul(self.rot, [[1.0, 0.0, 0.0], [0.0, 0.9962, 0.0872], [0.0, -0.0872, 0.9962]])
if self.rotcounter[0] - 5 < 0:
self.rotcounter[0] = 360
self.rotcounter[0] -= 5
elif direction == 2 and self.ztoggle:
self.rot = np.matmul(self.rot, [[0.9962, -0.0872, 0.0], [0.0872, 0.9962, 0.0], [0.0, 0.0, 1.0]])
if self.rotcounter[2] + 5 > 360:
self.rotcounter[2] = 0
else:
self.rotcounter[2] += 5
elif direction == -2 and self.ztoggle:
self.rot = np.matmul(self.rot, [[0.9962, 0.0872, 0.0], [-0.0872, 0.9962, 0.0], [0.0, 0.0, 1.0]])
if self.rotcounter[2] - 5 < 0:
self.rotcounter[2] = 360
else:
self.rotcounter[2] -= 5
elif direction == 2:
self.rot = np.matmul(self.rot, [[0.9962, 0.0, 0.0872], [0.0, 1.0, 0.0], [-0.0872, 0.0, 0.9962]])
if self.rotcounter[1] + 5 > 360:
self.rotcounter[1] = 0
else:
self.rotcounter[1] += 5
elif direction == -2:
self.rot = np.matmul(self.rot, [[0.9962, 0.0, -0.0872], [0.0, 1.0, 0.0], [0.0872, 0.0, 0.9962]])
if self.rotcounter[1] - 5 < 0:
self.rotcounter[1] = 360
else:
self.rotcounter[1] -= 5
def reset_view(self):
"""
Reset the view to the starting values.
"""
self.zoom = 1.0
self.rotcounter = [0, 0, 0]
self.rot = np.identity(3)
self.m = round(self.width / 2), round(self.height / 2)
def resize(self, height, width):
"""
Resize the screen. Known issue: crashes if the resize is faster than the framerate.
"""
self.height = height
self.width = width
self.content = [[" ,0"] * self.width for n in range(self.height - 2)]
self.zbuffer = [[10000.0] * self.width for n in range(self.height - 2)]
self.m = round(self.width / 2), round(self.height / 2)
# Since terminal characters are higher than wide, I correct for this by multiplying the x by f
# so that it appears wider. 2.25 is what looks good on my terminals, but might be
# nice to have a general way of determining the optimal value
self.f = 2
def clear(self):
"""
Clear the canvas and redraw the border.
"""
for i in range(self.height - 2):
for j in range(self.width):
self.zbuffer[i][j] = 10000.0
for i in range(self.height - 2):
for j in range(self.width):
if i == 0 and j == 0:
self.content[i][j] = "β,0"
elif (i == 0 or i == self.height - 3) and 0 < j < self.width - 1:
self.content[i][j] = "β,0"
elif i == 0 and j == self.width - 1:
self.content[i][j] = "β,0"
elif i < self.height - 3 and (j == 0 or j == self.width - 1):
self.content[i][j] = "β,0"
elif i == self.height - 3 and j == 0:
self.content[i][j] = "β,0"
elif i == self.height - 3 and j == self.width - 1:
self.content[i][j] = "β,0"
else:
self.content[i][j] = " ,0"
|
72201
|
import pathlib
def iter_examples():
example_dir = pathlib.Path(__file__).parent.absolute()
for fp in example_dir.iterdir():
if fp.name.startswith("_") or fp.suffix != ".py":
continue
yield fp
|
72214
|
from typing import Sequence, Dict
from anoncreds.protocol.globals import LARGE_VPRIME, LARGE_MVECT, LARGE_E_START, \
LARGE_ETILDE, \
LARGE_VTILDE, LARGE_UTILDE, LARGE_RTILDE, LARGE_ALPHATILDE, ITERATIONS, \
DELTA
from anoncreds.protocol.primary.primary_proof_common import calcTge, calcTeq
from anoncreds.protocol.types import PrimaryClaim, Predicate, PrimaryInitProof, \
PrimaryEqualInitProof, PrimaryPrecicateGEInitProof, PrimaryProof, \
PrimaryEqualProof, PrimaryPredicateGEProof, \
ID, ClaimInitDataType, ClaimAttributeValues
from anoncreds.protocol.utils import splitRevealedAttrs, fourSquares
from anoncreds.protocol.wallet.prover_wallet import ProverWallet
from config.config import cmod
class PrimaryClaimInitializer:
def __init__(self, wallet: ProverWallet):
self._wallet = wallet
async def genClaimInitData(self, schemaId: ID) -> ClaimInitDataType:
pk = await self._wallet.getPublicKey(schemaId)
ms = await self._wallet.getMasterSecret(schemaId)
vprime = cmod.randomBits(LARGE_VPRIME)
N = pk.N
Rms = pk.Rms
S = pk.S
U = (S ** vprime) * (Rms ** ms) % N
return ClaimInitDataType(U=U, vPrime=vprime)
async def preparePrimaryClaim(self, schemaId: ID, claim: PrimaryClaim):
claimInitDat = await self._wallet.getPrimaryClaimInitData(schemaId)
newV = claim.v + claimInitDat.vPrime
claim = claim._replace(v=newV)
return claim
class PrimaryProofBuilder:
def __init__(self, wallet: ProverWallet):
self._wallet = wallet
async def initProof(self, schemaId, c1: PrimaryClaim,
revealedAttrs: Sequence[str],
predicates: Sequence[Predicate],
m1Tilde, m2Tilde, claimAttributes: Dict[str, ClaimAttributeValues]) -> PrimaryInitProof:
if not c1:
return None
eqProof = await self._initEqProof(schemaId, c1, revealedAttrs,
m1Tilde, m2Tilde, claimAttributes)
geProofs = []
for predicate in predicates:
geProof = await self._initGeProof(schemaId, eqProof, c1,
predicate, claimAttributes)
geProofs.append(geProof)
return PrimaryInitProof(eqProof, geProofs)
async def finalizeProof(self, schemaId, cH,
initProof: PrimaryInitProof) -> PrimaryProof:
if not initProof:
return None
cH = cmod.integer(cH)
eqProof = await self._finalizeEqProof(schemaId, cH,
initProof.eqProof)
geProofs = []
for initGeProof in initProof.geProofs:
geProof = await self._finalizeGeProof(schemaId, cH, initGeProof,
eqProof)
geProofs.append(geProof)
return PrimaryProof(eqProof, geProofs)
async def _initEqProof(self, schemaId, c1: PrimaryClaim,
revealedAttrs: Sequence[str], m1Tilde, m2Tilde, claimAttributes: Dict[str, ClaimAttributeValues]) \
-> PrimaryEqualInitProof:
m2Tilde = m2Tilde if m2Tilde else cmod.integer(
cmod.randomBits(LARGE_MVECT))
revealedAttrs, unrevealedAttrs = splitRevealedAttrs(
claimAttributes, [a.name for a in revealedAttrs])
mtilde = self._getMTilde(unrevealedAttrs)
Ra = cmod.integer(cmod.randomBits(LARGE_VPRIME))
pk = await self._wallet.getPublicKey(ID(schemaId=schemaId))
A, e, v = c1.A, c1.e, c1.v
Aprime = A * (pk.S ** Ra) % pk.N
vprime = (v - e * Ra)
eprime = e - (2 ** LARGE_E_START)
etilde = cmod.integer(cmod.randomBits(LARGE_ETILDE))
vtilde = cmod.integer(cmod.randomBits(LARGE_VTILDE))
Rur = 1 % pk.N
for k, value in unrevealedAttrs.items():
if k in claimAttributes:
Rur = Rur * (pk.R[k] ** mtilde[k])
Rur *= pk.Rms ** m1Tilde
Rur *= pk.Rctxt ** m2Tilde
# T = ((Aprime ** etilde) * Rur * (pk.S ** vtilde)) % pk.N
T = calcTeq(pk, Aprime, etilde, vtilde, mtilde, m1Tilde, m2Tilde,
unrevealedAttrs.keys())
return PrimaryEqualInitProof(c1, Aprime, T, etilde, eprime, vtilde,
vprime, mtilde, m1Tilde, m2Tilde,
unrevealedAttrs.keys(), revealedAttrs)
async def _initGeProof(self, schemaId, eqProof: PrimaryEqualInitProof,
c1: PrimaryClaim, predicate: Predicate, claimAttributes: Dict[str, ClaimAttributeValues]) \
-> PrimaryPrecicateGEInitProof:
# gen U for Delta
pk = await self._wallet.getPublicKey(ID(schemaId=schemaId))
k, value = predicate.attrName, predicate.value
delta = claimAttributes[k].encoded - value
if delta < 0:
raise ValueError("Predicate is not satisfied")
u = fourSquares(delta)
# prepare C list
r = {}
T = {}
CList = []
for i in range(0, ITERATIONS):
r[str(i)] = cmod.integer(cmod.randomBits(LARGE_VPRIME))
T[str(i)] = (pk.Z ** u[str(i)]) * (pk.S ** r[str(i)]) % pk.N
CList.append(T[str(i)])
r[DELTA] = cmod.integer(cmod.randomBits(LARGE_VPRIME))
T[DELTA] = (pk.Z ** delta) * (pk.S ** r[DELTA]) % pk.N
CList.append(T[DELTA])
# prepare Tau List
utilde = {}
rtilde = {}
for i in range(0, ITERATIONS):
utilde[str(i)] = cmod.integer(cmod.randomBits(LARGE_UTILDE))
rtilde[str(i)] = cmod.integer(cmod.randomBits(LARGE_RTILDE))
rtilde[DELTA] = cmod.integer(cmod.randomBits(LARGE_RTILDE))
alphatilde = cmod.integer(cmod.randomBits(LARGE_ALPHATILDE))
TauList = calcTge(pk, utilde, rtilde, eqProof.mTilde[k], alphatilde, T)
return PrimaryPrecicateGEInitProof(CList, TauList, u, utilde, r, rtilde,
alphatilde, predicate, T)
async def _finalizeEqProof(self, schemaId, cH,
initProof: PrimaryEqualInitProof) -> PrimaryEqualProof:
e = initProof.eTilde + (cH * initProof.ePrime)
v = initProof.vTilde + (cH * initProof.vPrime)
m = {}
claimAttributes = await self._wallet.getClaimAttributes(ID(schemaId=schemaId))
for k in initProof.unrevealedAttrs:
m[str(k)] = initProof.mTilde[str(k)] + (
cH * claimAttributes[str(k)].encoded)
ms = await self._wallet.getMasterSecret(ID(schemaId=schemaId))
m1 = initProof.m1Tilde + (cH * ms)
m2 = initProof.m2Tilde + (cH * initProof.c1.m2)
return PrimaryEqualProof(e, v, m, m1, m2, initProof.Aprime,
initProof.revealedAttrs)
async def _finalizeGeProof(self, schemaId, cH,
initProof: PrimaryPrecicateGEInitProof,
eqProof: PrimaryEqualProof) \
-> PrimaryPredicateGEProof:
u = {}
r = {}
urproduct = 0
for i in range(0, ITERATIONS):
u[str(i)] = initProof.uTilde[str(i)] + cH * initProof.u[str(i)]
r[str(i)] = initProof.rTilde[str(i)] + cH * initProof.r[str(i)]
urproduct += initProof.u[str(i)] * initProof.r[str(i)]
r[DELTA] = initProof.rTilde[DELTA] + cH * initProof.r[DELTA]
alpha = initProof.alphaTilde + cH * (initProof.r[DELTA] - urproduct)
k = initProof.predicate.attrName
return PrimaryPredicateGEProof(u, r, alpha, eqProof.m[str(k)],
initProof.T, initProof.predicate)
def _getMTilde(self, unrevealedAttrs):
mtilde = {}
for key, value in unrevealedAttrs.items():
mtilde[key] = cmod.integer(cmod.randomBits(LARGE_MVECT))
return mtilde
|
72223
|
import token as TOKEN
__all__ = ("EXCEPT", "INDENT", "LINE_LENGTH", "TOKEN")
LINE_LENGTH = 100 # line length to use while formatting code
TOKEN.COLONEQUAL = 0xFF # for versions that have assignment expressions implemented
SPACE = " "
INDENT_LENGTH = 4 # length of indentation to use
INDENT = SPACE * INDENT_LENGTH # actual string to apply as one indent
NEWLINES = {TOKEN.NEWLINE, TOKEN.NL} # tokens that are considered as newlines
EXCEPT = { # symbols after which we do not expect to have {...} used for indentation
TOKEN.NEWLINE, # \n
TOKEN.NL, # \n
TOKEN.LPAR, # (
TOKEN.LSQB, # [
TOKEN.COLON, # :
TOKEN.COMMA, # ,
TOKEN.SEMI, # ;
TOKEN.PLUS, # +
TOKEN.MINUS, # -
TOKEN.STAR, # *
TOKEN.SLASH, # /
TOKEN.VBAR, # |
TOKEN.AMPER, # &
TOKEN.LESS, # <
TOKEN.GREATER, # >
TOKEN.EQUAL, # =
TOKEN.DOT, # .
TOKEN.PERCENT, # %
TOKEN.LBRACE, # {
TOKEN.EQEQUAL, # ==
TOKEN.NOTEQUAL, # !=
TOKEN.LESSEQUAL, # <=
TOKEN.GREATEREQUAL, # >=
TOKEN.TILDE, # ~
TOKEN.CIRCUMFLEX, # ^
TOKEN.LEFTSHIFT, # <<
TOKEN.RIGHTSHIFT, # >>
TOKEN.DOUBLESTAR, # **
TOKEN.PLUSEQUAL, # +=
TOKEN.MINEQUAL, # -=
TOKEN.STAREQUAL, # *=
TOKEN.SLASHEQUAL, # /=
TOKEN.PERCENTEQUAL, # %=
TOKEN.AMPEREQUAL, # &=
TOKEN.VBAREQUAL, # |=
TOKEN.CIRCUMFLEXEQUAL, # ^=
TOKEN.LEFTSHIFTEQUAL, # <<=
TOKEN.RIGHTSHIFTEQUAL, # >>=
TOKEN.DOUBLESTAREQUAL, # **=
TOKEN.DOUBLESLASH, # //
TOKEN.DOUBLESLASHEQUAL, # //=
TOKEN.AT, # @
TOKEN.ATEQUAL, # @=
TOKEN.RARROW, # ->
TOKEN.COLONEQUAL, # :=
}
|
72266
|
import os, sys
#import GaussianRunPack
from GaussianRunPack import GaussianDFTRun
#test_sdf = GaussianRunPack.GaussianDFTRun('B3LYP', '3-21G*',12, 'OPT energy deen nmr uv homolumo',infilename,0)
#test_sdf = GaussianRunPack.GaussianDFTRun('B3LYP', '3-21G*',1, 'OPT energy deen nmr uv homolumo',infilename,0)
#outdic = test_sdf.run_gaussian()
ind=1
calc_sdf = GaussianDFTRun('B3LYP', '3-21G*', 1, 'OPT energy deen nmr uv homolumo', 'CheckMolopt'+str(ind)+'.sdf', 0)
outdic = calc_sdf.run_gaussian()
#print(test_sdf.read_sdf())
print(outdic)
print('uv', outdic['uv'][0][0], 'deen', outdic['deen'], 'uv', outdic['uv'][1][0])
#gap = test_sdf.Extract_values(infilename,1,0,1,1,0)
#print (gap)
#print(Energy[-1])
|
72279
|
import pytest
from ruptures.datasets import pw_constant
from ruptures.show import display
from ruptures.show.display import MatplotlibMissingError
@pytest.fixture(scope="module")
def signal_bkps():
signal, bkps = pw_constant()
return signal, bkps
def test_display_with_options(signal_bkps):
try:
signal, bkps = signal_bkps
fig, axarr = display(signal, bkps)
fig, axarr = display(signal, bkps, bkps)
figsize = (20, 10) # figure size
fig, axarr = display(
signal,
bkps,
figsize=figsize,
)
fig, axarr = display(
signal[:, 0],
bkps,
figsize=figsize,
)
except MatplotlibMissingError:
pytest.skip("matplotlib is not installed")
def test_display_without_options(signal_bkps):
try:
signal, bkps = signal_bkps
fig, axarr = display(signal, bkps)
fig, axarr = display(signal, bkps, bkps)
figsize = (20, 10) # figure size
fig, axarr = display(signal, bkps)
fig, axarr = display(signal[:, 0], bkps)
except MatplotlibMissingError:
pytest.skip("matplotlib is not installed")
def test_display_with_new_options(signal_bkps):
try:
signal, bkps = signal_bkps
fig, axarr = display(signal, bkps)
fig, axarr = display(signal, bkps, bkps)
fig, axarr = display(signal, bkps, facecolor="k", edgecolor="b")
fig, axarr = display(signal[:, 0], bkps, facecolor="k", edgecolor="b")
except MatplotlibMissingError:
pytest.skip("matplotlib is not installed")
def test_display_with_computed_chg_pts_options(signal_bkps):
try:
signal, bkps = signal_bkps
fig, axarr = display(signal, bkps)
fig, axarr = display(signal, bkps, bkps)
fig, axarr = display(signal, bkps, bkps, computed_chg_pts_color="k")
fig, axarr = display(
signal, bkps, bkps, computed_chg_pts_color="k", computed_chg_pts_linewidth=3
)
fig, axarr = display(
signal,
bkps,
bkps,
computed_chg_pts_color="k",
computed_chg_pts_linewidth=3,
computed_chg_pts_linestyle="--",
)
fig, axarr = display(
signal,
bkps,
bkps,
computed_chg_pts_color="k",
computed_chg_pts_linewidth=3,
computed_chg_pts_linestyle="--",
computed_chg_pts_alpha=1.0,
)
except MatplotlibMissingError:
pytest.skip("matplotlib is not installed")
|
72323
|
from flask_restful import Resource
from flask_restful.reqparse import RequestParser
from pajbot.managers.db import DBManager
from pajbot.models.playsound import Playsound
from pajbot.models.sock import SocketClientManager
from pajbot.modules import PlaysoundModule
from pajbot.web.utils import requires_level
from pajbot.managers.adminlog import AdminLogManager
class PlaysoundAPI(Resource):
@requires_level(500)
def put(self, playsound_name, **options):
playsound_name = PlaysoundModule.massage_name(playsound_name)
if not PlaysoundModule.validate_name(playsound_name):
return (
{
"error": "Invalid Playsound name. The playsound name may only contain lowercase latin letters, 0-9, -, or _. No spaces :rage:"
},
400,
)
post_parser = RequestParser()
post_parser.add_argument("link", required=True)
args = post_parser.parse_args()
try:
link = args["link"]
except (ValueError, KeyError):
return {"error": "Invalid `link` parameter."}, 400
with DBManager.create_session_scope() as db_session:
count = db_session.query(Playsound).filter(Playsound.name == playsound_name).count()
if count >= 1:
return "Playsound already exists", 400
# the rest of the parameters are initialized with defaults
playsound = Playsound(name=playsound_name, link=link)
db_session.add(playsound)
log_msg = f"The {playsound_name} playsound has been added"
AdminLogManager.add_entry("Playsound added", options["user"], log_msg)
return "OK", 200
@requires_level(500)
def post(self, playsound_name, **options):
# require JSON so the cooldown can be null
post_parser = RequestParser()
post_parser.add_argument("link", required=True)
post_parser.add_argument("volume", type=int, required=True)
post_parser.add_argument("cooldown", type=int, required=False)
post_parser.add_argument("enabled", type=bool, required=False)
args = post_parser.parse_args()
link = args["link"]
if not PlaysoundModule.validate_link(link):
return "Empty or bad link, links must start with https:// and must not contain spaces", 400
volume = args["volume"]
if not PlaysoundModule.validate_volume(volume):
return "Bad volume argument", 400
# cooldown is allowed to be null/None
cooldown = args.get("cooldown", None)
if not PlaysoundModule.validate_cooldown(cooldown):
return "Bad cooldown argument", 400
enabled = args["enabled"]
if enabled is None:
return "Bad enabled argument", 400
with DBManager.create_session_scope() as db_session:
playsound = db_session.query(Playsound).filter(Playsound.name == playsound_name).one_or_none()
if playsound is None:
return "Playsound does not exist", 404
raw_edited_data = {
"link": (playsound.link, link),
"volume": (playsound.volume, volume),
"cooldown": (playsound.cooldown, cooldown),
}
# make a dictionary with all the changed values (except for enabled, which has a special case below)
filtered_edited_data = {k: v for k, v in raw_edited_data.items() if v[0] != v[1]}
log_msg = f"The {playsound_name} playsound has been updated: "
log_msg_changes = []
if playsound.enabled != enabled:
log_msg_changes.append("enabled" if enabled else "disabled")
# iterate over changed values and push them to the log msg
for edited_key, values in filtered_edited_data.items():
log_msg_changes.append(f"{edited_key} {values[0]} to {values[1]}")
log_msg += ", ".join(log_msg_changes)
playsound.link = link
playsound.volume = volume
playsound.cooldown = cooldown
playsound.enabled = enabled
db_session.add(playsound)
if len(log_msg_changes):
AdminLogManager.add_entry("Playsound edited", options["user"], log_msg)
return "OK", 200
@requires_level(500)
def delete(self, playsound_name, **options):
with DBManager.create_session_scope() as db_session:
playsound = db_session.query(Playsound).filter(Playsound.name == playsound_name).one_or_none()
if playsound is None:
return "Playsound does not exist", 404
log_msg = f"The {playsound.name} playsound has been removed"
AdminLogManager.add_entry("Playsound removed", options["user"], log_msg)
db_session.delete(playsound)
return "OK", 200
class PlayPlaysoundAPI(Resource):
@requires_level(500)
def post(self, playsound_name, **options):
with DBManager.create_session_scope() as db_session:
count = db_session.query(Playsound).filter(Playsound.name == playsound_name).count()
if count <= 0:
return "Playsound does not exist", 404
# explicitly don't check for disabled
SocketClientManager.send("playsound.play", {"name": playsound_name})
return "OK", 200
def init(api):
api.add_resource(PlaysoundAPI, "/playsound/<playsound_name>")
api.add_resource(PlayPlaysoundAPI, "/playsound/<playsound_name>/play")
|
72331
|
from torch import nn
from .PGFA_backbone import PGFABackbone
from .PGFA_pool import PGFAPool
from .PGFA_pose_guided_mask_block import PGFAPoseGuidedMaskBlock
from .PGFA_reduction import PGFAReduction
from .PGFA_classifier import PGFAClassifier
class PGFA(nn.Module):
def __init__(self, cfg):
super(PGFA, self).__init__()
self.backbone = PGFABackbone(cfg)
self.pool = PGFAPool(cfg)
self.pose_guide_mask_block = PGFAPoseGuidedMaskBlock(cfg)
self.reduction = PGFAReduction(cfg)
if hasattr(cfg.model, 'num_classes') and cfg.model.num_classes > 0:
self.classifier = PGFAClassifier(cfg)
def backbone_forward(self, in_dict):
return self.backbone(in_dict)
def pool_forward(self, in_dict, cfg):
return self.pool(in_dict, cfg)
def pose_guided_mask_block_forward(self, in_dict, out_dict, cfg):
return self.pose_guide_mask_block(in_dict, out_dict, cfg)
def reduction_forward(self, in_dict, cfg):
return self.reduction(in_dict, cfg)
def classifier_forward(self, in_dict, cfg):
return self.classifier(in_dict, cfg)
def forward(self, in_dict, cfg, forward_type='Supervised'):
in_dict = self.backbone_forward(in_dict)
out_dict = self.pool_forward(in_dict, cfg)
out_dict = self.pose_guided_mask_block_forward(in_dict, out_dict, cfg)
out_dict = self.reduction_forward(out_dict, cfg)
if hasattr(self, 'classifier'):
out_dict = self.classifier_forward(out_dict, cfg)
return out_dict
|
72342
|
from amaranth_boards.zturn_lite_z010 import *
from amaranth_boards.zturn_lite_z010 import __all__
import warnings
warnings.warn("instead of nmigen_boards.zturn_lite_z010, use amaranth_boards.zturn_lite_z010",
DeprecationWarning, stacklevel=2)
|
72395
|
import numpy as np
from scipy.spatial import distance
from Quaternions import Quaternions
import Animation
import AnimationStructure
def constrain(positions, constraints):
"""
Constrain animation positions given
a number of VerletParticles constrains
Parameters
----------
positions : (F, J, 3) ndarray
array of joint positions for
F frames and J joints
constraints : [(int, int, float, float, float)]
A list of constraints in the format:
(Joint1, Joint2, Masses1, Masses2, Lengths)
Returns
-------
positions : (F, J, 3) ndarray
joint positions for F
frames and J joints constrained
using the supplied constraints
"""
from VerletParticles import VerletParticles
particles = VerletParticles(positions, gravity=0.0, timestep=0.0)
for i, j, w0, w1, l in constraints:
particles.add_length_constraint(i, j, w0, w1, l)
return particles.constrain()
def extremities(positions, count, **kwargs):
"""
List of most extreme frame indices
Parameters
----------
positions : (F, J, 3) ndarray
array of joint positions for
F frames and J joints
count : int
Number of indices to return,
does not include first and last
frame which are always included
static : bool
Find extremities where root
translation has been removed
Returns
-------
indices : (C) ndarray
Returns C frame indices of the
most extreme frames including
the first and last frames.
Therefore if `count` it specified
as `4` will return and array of
`6` indices.
"""
if kwargs.pop('static', False):
positions = positions - positions[:,0][:,np.newaxis,:]
positions = positions.reshape((len(positions), -1))
distance_matrix = distance.squareform(distance.pdist(positions))
keys = [0]
for _ in range(count-1):
keys.append(int(np.argmax(np.min(distance_matrix[keys], axis=0))))
return np.array(keys)
def load_to_maya(positions, names=None, parents=None, color=None, radius=0.1, thickness=5.0):
import pymel.core as pm
import maya.mel as mel
if names is None:
names = ['joint_%i' % i for i in xrange(positions.shape[1])]
if color is None:
color = (0.5, 0.5, 0.5)
mpoints = []
frames = range(1, len(positions)+1)
for i, name in enumerate(names):
#try:
# point = pm.PyNode(name)
#except pm.MayaNodeError:
# point = pm.sphere(p=(0,0,0), n=name, radius=radius)[0]
point = pm.sphere(p=(0,0,0), n=name, radius=radius)[0]
jpositions = positions[:,i]
for j,attr,attr_name in zip(xrange(3),
[point.tx, point.ty, point.tz],
["_translateX", "_translateY", "_translateZ"]):
conn = attr.listConnections()
if len(conn) == 0:
curve = pm.nodetypes.AnimCurveTU(n=name + attr_name)
pm.connectAttr(curve.output, attr)
else:
curve = conn[0]
curve.addKeys(frames, jpositions[:,j])
mpoints.append(point)
if parents != None:
for i, p in enumerate(parents):
if p == -1: continue
pointname = names[i]
parntname = names[p]
conn = pm.PyNode(pointname).t.listConnections()
if len(conn) != 0: continue
curve = pm.curve(p=[[0,0,0],[0,1,0]], d=1, n=names[i]+'_curve')
pm.connectAttr(pointname+'.t', names[i]+'_curve.cv[0]')
pm.connectAttr(parntname+'.t', names[i]+'_curve.cv[1]')
pm.select(curve)
pm.runtime.AttachBrushToCurves()
stroke = pm.selected()[0]
brush = pm.listConnections(stroke.getChildren()[0]+'.brush')[0]
pm.setAttr(brush+'.color1', color)
pm.setAttr(brush+'.globalScale', thickness)
pm.setAttr(brush+'.endCaps', 1)
pm.setAttr(brush+'.tubeSections', 20)
mel.eval('doPaintEffectsToPoly(1,0,0,1,100000);')
mpoints += [stroke, curve]
return pm.group(mpoints, n='AnimationPositions'), mpoints
def load_from_maya(root, start, end):
import pymel.core as pm
def rig_joints_list(s, js):
for c in s.getChildren():
if 'Geo' in c.name(): continue
if isinstance(c, pm.nodetypes.Joint): js = rig_joints_list(c, js); continue
if isinstance(c, pm.nodetypes.Transform): js = rig_joints_list(c, js); continue
return [s] + js
joints = rig_joints_list(root, [])
names = map(lambda j: j.name(), joints)
positions = np.empty((end - start, len(names), 3))
original_time = pm.currentTime(q=True)
pm.currentTime(start)
for i in range(start, end):
pm.currentTime(i)
for j in joints: positions[i-start, names.index(j.name())] = j.getTranslation(space='world')
pm.currentTime(original_time)
return positions, names
def loop(positions, forward='z'):
fid = 'xyz'.index(forward)
data = positions.copy()
trajectory = data[:,0:1,fid].copy()
data[:,:,fid] -= trajectory
diff = data[0] - data[-1]
data += np.linspace(
0, 1, len(data))[:,np.newaxis,np.newaxis] * diff[np.newaxis]
data[:,:,fid] += trajectory
return data
def extend(positions, length, forward='z'):
fid = 'xyz'.index(forward)
data = positions.copy()
while len(data) < length:
next = positions[1:].copy()
next[:,:,fid] += data[-1,0,fid]
data = np.concatenate([data, next], axis=0)
return data[:length]
def redirect(positions, joint0, joint1, forward='z'):
forwarddir = {
'x': np.array([[[1,0,0]]]),
'y': np.array([[[0,1,0]]]),
'z': np.array([[[0,0,1]]]),
}[forward]
direction = (positions[:,joint0] - positions[:,joint1]).mean(axis=0)[np.newaxis,np.newaxis]
direction = direction / np.sqrt(np.sum(direction**2))
rotation = Quaternions.between(direction, forwarddir).constrained_y()
return rotation * positions
|
72417
|
import numpy as np
import torch
from torch.utils.data import Dataset
import numpy as np
from ad3 import factor_graph as fg
try:
import cPickle as pickle
except:
import pickle
from tqdm import tqdm
import time
from .random_pgm_data import RandomPGMData, worker_init_fn
len = 100000
class RandomPGMHop(Dataset):
def __init__(self, chain_length, hop_order=9, ret_efeature_pw=True, size=len):
self.chain_length = chain_length
self.hop_order = hop_order if hop_order >> 1 else hop_order + 1
self.half_hop = self.hop_order >> 1
self.ret_efeature_pw = ret_efeature_pw
self.size = size
def __len__(self):
return self.size
def _generate_graph(self):
g = fg.PFactorGraph()
var_list = []
for i in range(self.chain_length):
v = g.create_multi_variable(2)
v.set_log_potentials(self.lops[i])
var_list.append(v)
for i in range(self.chain_length - 1):
g.create_factor_dense([var_list[i], var_list[i + 1]], self.pws[i][0])
for i in range(self.chain_length - self.hop_order + 1):
v_list = [
var_list[j].get_state(1) for j in range(i, i + self.hop_order)
]
g.create_factor_budget(v_list, self.cap[i + self.half_hop])
return g
def _get_node_feature(self):
self.lops = np.random.uniform(0.0, 1.0, (self.chain_length, 2))
return np.transpose(self.lops.astype(np.float32), [1, 0])
def _get_edge_feature_pw(self):
self.pws = np.zeros(shape=[self.chain_length, 2, 4], dtype=np.float32)
for i in range(self.chain_length - 1):
# pws_to_right = np.random.randn(2, 2)
pws_to_right = np.zeros([2, 2])
pws_to_right[1, 1] = np.random.uniform(0, 2)
pws_to_left = np.transpose(pws_to_right)
self.pws[i] = [list(pws_to_right.reshape(-1)), list(pws_to_left.reshape(-1))]
efeature = np.zeros(shape=[self.chain_length, 3, 4], dtype=np.float32)
for i in range(self.chain_length):
e_self = np.zeros(4)
e_left = self.pws[i-1][1] if i > 0 else e_self.copy()
e_right = self.pws[i][0] if i < self.chain_length-1 else e_self.copy()
efeature[i, 0] = e_left
efeature[i, 1] = e_self
efeature[i, 2] = e_right
return np.transpose(efeature, [2, 0, 1])
def _generate_edge_feature_hop(self):
self.cap = list(np.random.randint(low=1, high=self.hop_order, size=self.chain_length))
half_hop = self.hop_order >> 1
max_cap = np.zeros(self.hop_order)
max_cap[self.hop_order-1] = 1
efeature = np.zeros(shape=[self.chain_length, self.hop_order], dtype=np.float32)
for i in range(half_hop, self.chain_length - half_hop):
efeature[i, self.cap[i]] = 1
for i in range(half_hop):
efeature[i, self.hop_order-1] = 1
for i in range(self.chain_length - half_hop, self.chain_length):
efeature[i, self.hop_order-1] = 1
return np.expand_dims(np.transpose(efeature, [1, 0]), -1)
''' passing info from node to factor '''
efeature = np.zeros(shape=[self.chain_length, self.hop_order, self.hop_order], dtype=np.float32)
for i in range(self.chain_length):
cur_cap = np.zeros(self.hop_order)
cur_cap[self.cap[i]] = 1
for j in range(i - half_hop, i + half_hop + 1):
efeature[i, j-i+half_hop] = max_cap.copy() \
if j < 0 or j >= self.chain_length else cur_cap.copy()
return np.transpose(efeature, [2, 0, 1])
''' passing info from factor to node '''
efeature2 = np.zeros(shape=[self.chain_length, self.hop_order, self.hop_order], dtype=np.float32)
for i in range(self.chain_length):
for j in range(i-half_hop, i+half_hop+1):
if j < 0 or j >= self.chain_length:
efeature2[i, j-i+half_hop] = max_cap.copy()
else:
cur_cap = np.zeros(self.hop_order)
cur_cap[self.cap[j]] = 1
efeature2[i, j-i+half_hop] = cur_cap
return np.transpose(efeature, [2, 0, 1]), np.transpose(efeature2, [2, 0, 1])
def __getitem__(self, index):
node_feature = self._get_node_feature()
efeature_pw = self._get_edge_feature_pw()
efeature_hop = self._generate_edge_feature_hop()
''' exact solution '''
g = self._generate_graph()
val, post, _, stat = g.solve(tol=1e-6, branch_and_bound=True)
post = np.reshape(np.asarray(post), [self.chain_length, 2])
assign = np.argmax(post, axis=1)
''' approx solution '''
g = self._generate_graph()
val1, post1, _, status = g.solve(branch_and_bound=False)
post1 = np.reshape(np.asarray(post1), [self.chain_length, 2])
assign1 = np.argmax(post1, axis=1)
if self.ret_efeature_pw:
return node_feature, efeature_pw, efeature_hop, assign, assign1
else:
pws = np.expand_dims(np.transpose(self.pws[:, 0, :], [1, 0]), -1)
return node_feature, pws, efeature_hop, assign, assign1
if __name__ == '__main__':
rpgm = RandomPGMHop(chain_length=6, hop_order=5, ret_efeature_pw=False)
node_feature, efeature_pw, efeature_hop, assign, assign1 = rpgm[0]
print('node_feature', node_feature.shape, np.transpose(node_feature, [1, 0]))
print('efeature_pw', efeature_pw.shape, np.transpose(efeature_pw, [1, 2, 0]))
print('efeature_hop', efeature_hop.shape, np.transpose(efeature_hop, [1, 2, 0]))
print('assign', assign)
print('assign1', assign1)
|
72426
|
input = """
num(2).
node(a).
p(N) :- num(N), #count{Y:node(Y)} = N1, <=(N,N1).
"""
output = """
{node(a), num(2)}
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.