max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
python/seldon_core/__init__.py | juldou/seldon-core | 3,049 | 12793436 | from seldon_core.version import __version__
from .storage import Storage
|
第11章/program/baidu/pipelines.py | kingname/SourceCodeOfBook | 274 | 12793461 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
class BaiduPipeline(object):
def __init__(self):
host = settings['MONGODB_HOST']
port = settings['MONGODB_PORT']
db_name = settings['MONGODB_DBNAME']
client = pymongo.MongoClient(host=host, port=port)
db = client[db_name]
self.post = db[settings['MONGODB_DOCNAME']]
def process_item(self, item, spider):
person_info = dict(item)
self.post.insert(person_info)
return item
|
reactivated/apps.py | silviogutierrez/reactivated | 178 | 12793464 | import importlib
import json
import logging
import os
import subprocess
from typing import Any, Dict, NamedTuple, Tuple
from django.apps import AppConfig
from django.conf import settings
from . import (
definitions_registry,
extract_views_from_urlpatterns,
global_types,
template_registry,
type_registry,
value_registry,
)
from .serialization import create_schema
logger = logging.getLogger("django.server")
def get_urls_schema() -> Dict[str, Any]:
urlconf = importlib.import_module(settings.ROOT_URLCONF)
urlpatterns = urlconf.urlpatterns # type: ignore[attr-defined]
from django.urls import converters
from django.urls.resolvers import RoutePattern
converter_mapping = {
converters.IntConverter: "number",
converters.StringConverter: "string",
converters.UUIDConverter: "string",
converters.SlugConverter: "string",
converters.PathConverter: "string",
}
urls = extract_views_from_urlpatterns(urlpatterns) # type: ignore[no-untyped-call]
reverse = {}
for _, regex, name, pattern in urls:
if not isinstance(pattern, RoutePattern):
continue
reverse[name or regex] = {
"route": f"/{regex}",
"args": {
arg_name: converter_mapping.get(arg_converter.__class__, "string")
for arg_name, arg_converter in pattern.converters.items()
},
}
return reverse
def get_types_schema() -> Any:
""" The package json-schema-to-typescript does expose a way to
automatically export any interface it sees. However, this can bloat our
generated files.
Instead, while creating the schema, we occasionally run into types that we
want available globally but are not directly referenced by templates.
These aren't exported by `json-schem-to-typescript` because they're
referenced using `tsType`, so the libraary is unaware of their usage.
So we register them in `globals` and force `json-schema-to-typescript` to
expose them.
We can't just add these types to the `type_registry` because that's only
parsed once when generating the parent tuple.
We could explore doing two passes in the future.
See `unreachableDefinitions` in json-schema-to-typescript
"""
type_registry["globals"] = Any # type: ignore[assignment]
context_processors = []
from .serialization.context_processors import create_context_processor_type
for engine in settings.TEMPLATES:
if engine["BACKEND"] == "reactivated.backend.JSX":
context_processors.extend(engine["OPTIONS"]["context_processors"]) # type: ignore[index]
type_registry["Context"] = create_context_processor_type(context_processors)
ParentTuple = NamedTuple("ParentTuple", type_registry.items()) # type: ignore[misc]
parent_schema, definitions = create_schema(ParentTuple, definitions_registry)
definitions_registry.update(definitions)
return {
"definitions": definitions,
**{
**definitions["reactivated.apps.ParentTuple"],
"properties": {
**definitions["reactivated.apps.ParentTuple"]["properties"],
"globals": {
"type": "object",
"additionalProperties": False,
"required": list(global_types.keys()),
"properties": global_types,
},
},
},
}
def get_templates() -> Dict[str, Tuple[Any]]:
return template_registry
def get_values() -> Dict[str, Any]:
return value_registry
def get_schema() -> str:
schema = {
"urls": get_urls_schema(),
"templates": get_templates(),
"types": get_types_schema(),
"values": get_values(),
}
return json.dumps(schema, indent=4)
class ReactivatedConfig(AppConfig):
name = "reactivated"
def ready(self) -> None:
"""
Django's dev server actually starts twice. So we prevent generation on
the first start. TODO: handle noreload.
"""
schema = get_schema()
if (
os.environ.get("WERKZEUG_RUN_MAIN") == "true"
or os.environ.get("RUN_MAIN") == "true"
):
# Triggers for the subprocess of the dev server after restarts or initial start.
pass
is_server_started = "DJANGO_SEVER_STARTING" in os.environ
if is_server_started is False:
os.environ["DJANGO_SEVER_STARTING"] = "true"
return
generate_schema(schema)
def generate_schema(schema: str, skip_cache: bool = False) -> None:
"""
For development usage only, this requires Node and Python installed
You can use this function for your E2E test prep.
"""
logger.info("Generating interfaces and client side code")
encoded_schema = schema.encode()
import hashlib
digest = hashlib.sha1(encoded_schema).hexdigest().encode()
if skip_cache is False and os.path.exists("client/generated/index.tsx"):
with open("client/generated/index.tsx", "r+b") as existing:
already_generated = existing.read()
if digest in already_generated:
logger.info("Skipping generation as nothing has changed")
return
#: Note that we don't pass the file object to stdout, because otherwise
# webpack gets confused with the half-written file when we make updates.
# Maybe there's a way to force it to be a single atomic write? I tried
# open('w+b', buffering=0) but no luck.
process = subprocess.Popen(
["node", "./node_modules/reactivated/generator.js"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
)
out, error = process.communicate(encoded_schema)
os.makedirs("client/generated", exist_ok=True)
with open("client/generated/index.tsx", "w+b") as output:
output.write(b"// Digest: %s\n" % digest)
output.write(out)
logger.info("Finished generating.")
|
examples/sync_cmdclass_pyproject/sync_cmdclass_pyproject/__init__.py | linshoK/pysen | 423 | 12793467 | from typing import Any, Callable, Optional, Sequence, Set, Tuple
def foo(
a: Any,
b: Callable[[], Tuple[int, int, str]],
c: Set[str],
d: Optional[Sequence[int]] = None,
e: Any = None,
) -> None:
pass
print("Hello world")
foo(a=1, b=lambda: (1, 2, "hoge"), c=set(), d=None, e=None)
|
mmdet/ops/corner_pool/__init__.py | vanyalzr/mmdetection | 274 | 12793477 | <reponame>vanyalzr/mmdetection
from .corner_pool import CornerPool
__all__ = ['CornerPool']
|
plugin/lighthouse/reader/__init__.py | x9090/lighthouse | 1,741 | 12793490 | from .coverage_reader import CoverageReader
|
nn/framework.py | thunlp/Chinese_NRE | 272 | 12793497 | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .encoder import BiLstmEncoder
from .classifier import AttClassifier
from torch.autograd import Variable
from torch.nn import functional, init
class MGLattice_model(nn.Module):
def __init__(self, data):
super(MGLattice_model, self).__init__()
# MG-Lattice encoder
self.encoder = BiLstmEncoder(data)
# Attentive classifier
self.classifier = AttClassifier(data)
def forward(self, gaz_list, word_inputs, biword_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, pos1_inputs, pos2_inputs, ins_label, scope):
# ins_num * seq_len * hidden_dim
hidden_out = self.encoder.get_seq_features(gaz_list, word_inputs, biword_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, pos1_inputs, pos2_inputs)
# batch_size * num_classes
logit = self.classifier.get_logit(hidden_out, ins_label, scope)
return logit
|
src/sklearn_evaluation/SQLiteTracker.py | abcnishant007/sklearn-evaluation | 351 | 12793506 | <gh_stars>100-1000
from uuid import uuid4
import sqlite3
import json
import pandas as pd
from sklearn_evaluation.table import Table
class SQLiteTracker:
"""A simple experiment tracker using SQLite
:doc:`Click here <../user_guide/SQLiteTracker>` to see the user guide.
Parameters
----------
path
Database location
"""
def __init__(self, path: str):
self.conn = sqlite3.connect(path)
cur = self.conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS experiments (
uuid TEXT NOT NULL UNIQUE,
created TIMESTAMP default current_timestamp,
parameters TEXT,
comment TEXT
)
""")
cur.close()
def __getitem__(self, uuid):
"""Get experiment with a given uuid
"""
# TODO: make it work for a list of uuids
return pd.read_sql('SELECT * FROM experiments WHERE uuid = ?',
self.conn,
params=[uuid],
index_col='uuid')
def recent(self, n=5, normalize=False):
"""Get most recent experiments as a pandas.DataFrame
"""
query = """
SELECT uuid, created, parameters, comment
FROM experiments
ORDER BY created DESC
LIMIT ?
"""
df = pd.read_sql(query, self.conn, params=[n], index_col='uuid')
if normalize:
# parse and normalize json
parameters = pd.json_normalize(
df.pop('parameters').apply(lambda s: json.loads(s))).set_index(
df.index)
df = df.join(parameters)
# re order columns to show "comment" at the end
comment = df.pop('comment')
df.insert(len(df.columns), 'comment', comment)
return df
def query(self, code):
"""Query the database, returns a pandas.DataFrame
Examples
--------
>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker(':memory:') # example in-memory db
>>> tracker.insert('my_uuid', {'a': 1})
>>> df = tracker.query(
... "SELECT uuid, json_extract(parameters, '$.a') FROM experiments")
"""
df = pd.read_sql(code, self.conn)
if 'uuid' in df:
df = df.set_index('uuid')
return df
def new(self):
"""Create a new experiment, returns a uuid
"""
uuid = uuid4().hex
cur = self.conn.cursor()
cur.execute(
"""
INSERT INTO experiments (uuid)
VALUES(?)
""", [uuid])
cur.close()
self.conn.commit()
return uuid
def update(self, uuid, parameters):
"""Update the parameters of an empty experiment given its uuid
"""
self._can_update(uuid)
cur = self.conn.cursor()
cur.execute(
"""
UPDATE experiments
SET parameters = ?
WHERE uuid = ?
""", [json.dumps(parameters), uuid])
cur.close()
self.conn.commit()
def insert(self, uuid, parameters):
"""Insert a new experiment
"""
cur = self.conn.cursor()
cur.execute(
"""
INSERT INTO experiments (uuid, parameters)
VALUES(?, ?)
""", [uuid, json.dumps(parameters)])
cur.close()
self.conn.commit()
def comment(self, uuid, comment):
"""Add a comment to an experiment given its uuid
"""
# TODO: add overwrite (false by default) and append options
cur = self.conn.cursor()
cur.execute(
"""
UPDATE experiments
SET comment = ?
WHERE uuid = ?
""", [comment, uuid])
cur.close()
self.conn.commit()
def _recent(self, n=5, fmt='html'):
if fmt not in {'html', 'plain'}:
raise ValueError('fmt must be one "html" or "plain"')
cur = self.conn.cursor()
cur.execute(
"""
SELECT uuid, created, parameters, comment
FROM experiments
ORDER BY created DESC
LIMIT ?
""", [n])
res = cur.fetchall()
table = Table(res, header=['uuid', 'created', 'parameters', 'comment'])
title_template = '<h4> {} </h4>' if fmt == 'html' else '{}\n'
title = title_template.format(type(self).__name__)
if not len(table):
title += '(No experiments saved yet)'
if fmt == 'plain':
title += '\n'
if len(table):
footer = (('<br>' if fmt == 'html' else '\n') +
'(Most recent experiments)')
else:
footer = ''
return (title + (table.to_html() if fmt == 'html' else str(table)) +
footer)
def _can_update(self, uuid):
"""Check if an experiment with a given uuid can be updated
"""
cur = self.conn.cursor()
cur.execute(
"""
SELECT parameters
FROM experiments
WHERE uuid = ?
""", [uuid])
row = cur.fetchone()
exists = row is not None
if exists:
empty = row[0] is None
if not empty:
raise ValueError('Cannot update non-empty experiment with '
'uuid "{}"'.format(uuid))
else:
raise ValueError('Cannot update experiment with '
'uuid "{}" because it does '
'not exist'.format(uuid))
def __repr__(self):
return self._recent(fmt='plain')
def _repr_html_(self):
return self._recent(fmt='html')
def __del__(self):
self.conn.close()
|
multipole-graph-neural-operator/utilities.py | vir-k01/graph-pde | 121 | 12793514 | <gh_stars>100-1000
import torch
import numpy as np
import scipy.io
import h5py
import sklearn.metrics
from torch_geometric.data import Data
import torch.nn as nn
from scipy.ndimage import gaussian_filter
#################################################
#
# Utilities
#
#################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()
self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float
self.file_path = file_path
self.data = None
self.old_mat = None
self._load_file()
def _load_file(self):
try:
self.data = scipy.io.loadmat(self.file_path)
self.old_mat = True
except:
self.data = h5py.File(self.file_path)
self.old_mat = False
def load_file(self, file_path):
self.file_path = file_path
self._load_file()
def read_field(self, field):
x = self.data[field]
if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))
if self.to_float:
x = x.astype(np.float32)
if self.to_torch:
x = torch.from_numpy(x)
if self.to_cuda:
x = x.cuda()
return x
def set_cuda(self, to_cuda):
self.to_cuda = to_cuda
def set_torch(self, to_torch):
self.to_torch = to_torch
def set_float(self, to_float):
self.to_float = to_float
# normalization, pointwise gaussian
class UnitGaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
# x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T
self.mean = torch.mean(x, 0)
self.std = torch.std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
if sample_idx is None:
std = self.std + self.eps # n
mean = self.mean
else:
if len(self.mean.shape) == len(sample_idx[0].shape):
std = self.std[sample_idx] + self.eps # batch*n
mean = self.mean[sample_idx]
if len(self.mean.shape) > len(sample_idx[0].shape):
std = self.std[:,sample_idx]+ self.eps # T*batch*n
mean = self.mean[:,sample_idx]
# x is in shape of batch*n or T*batch*n
x = (x * std) + mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, Gaussian
class GaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(GaussianNormalizer, self).__init__()
self.mean = torch.mean(x)
self.std = torch.std(x)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
x = (x * (self.std + self.eps)) + self.mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, scaling by range
class RangeNormalizer(object):
def __init__(self, x, low=0.0, high=1.0):
super(RangeNormalizer, self).__init__()
mymin = torch.min(x, 0)[0].view(-1)
mymax = torch.max(x, 0)[0].view(-1)
self.a = (high - low)/(mymax - mymin)
self.b = -self.a*mymax + high
def encode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = self.a*x + self.b
x = x.view(s)
return x
def decode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = (x - self.b)/self.a
x = x.view(s)
return x
#loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
#Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y):
return self.rel(x, y)
# A simple feedforward neural network
class DenseNet(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = nn.ModuleList()
for j in range(self.n_layers):
self.layers.append(nn.Linear(layers[j], layers[j+1]))
if j != self.n_layers - 1:
if normalize:
self.layers.append(nn.BatchNorm1d(layers[j+1]))
self.layers.append(nonlinearity())
if out_nonlinearity is not None:
self.layers.append(out_nonlinearity())
def forward(self, x):
for _, l in enumerate(self.layers):
x = l(x)
return x
class DenseNet_sin(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet_sin, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = nn.ModuleList()
for j in range(self.n_layers):
self.layers.append(nn.Linear(layers[j], layers[j+1]))
def forward(self, x):
for j, l in enumerate(self.layers):
x = l(x)
if j != self.n_layers - 1:
x = torch.sin(x)
return x
# generate graphs on square domain
class SquareMeshGenerator(object):
def __init__(self, real_space, mesh_size):
super(SquareMeshGenerator, self).__init__()
self.d = len(real_space)
self.s = mesh_size[0]
assert len(mesh_size) == self.d
if self.d == 1:
self.n = mesh_size[0]
self.grid = np.linspace(real_space[0][0], real_space[0][1], self.n).reshape((self.n, 1))
else:
self.n = 1
grids = []
for j in range(self.d):
grids.append(np.linspace(real_space[j][0], real_space[j][1], mesh_size[j]))
self.n *= mesh_size[j]
self.grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
def ball_connectivity(self, r):
pwd = sklearn.metrics.pairwise_distances(self.grid)
self.edge_index = np.vstack(np.where(pwd <= r))
self.n_edges = self.edge_index.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long)
def gaussian_connectivity(self, sigma):
pwd = sklearn.metrics.pairwise_distances(self.grid)
rbf = np.exp(-pwd**2/sigma**2)
sample = np.random.binomial(1,rbf)
self.edge_index = np.vstack(np.where(sample))
self.n_edges = self.edge_index.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long)
def get_grid(self):
return torch.tensor(self.grid, dtype=torch.float)
def attributes(self, f=None, theta=None):
if f is None:
if theta is None:
edge_attr = self.grid[self.edge_index.T].reshape((self.n_edges,-1))
else:
edge_attr = np.zeros((self.n_edges, 2*self.d+2))
edge_attr[:,0:2*self.d] = self.grid[self.edge_index.T].reshape((self.n_edges,-1))
edge_attr[:, 2 * self.d] = theta[self.edge_index[0]]
edge_attr[:, 2 * self.d +1] = theta[self.edge_index[1]]
else:
xy = self.grid[self.edge_index.T].reshape((self.n_edges,-1))
if theta is None:
edge_attr = f(xy[:,0:self.d], xy[:,self.d:])
else:
edge_attr = f(xy[:,0:self.d], xy[:,self.d:], theta[self.edge_index[0]], theta[self.edge_index[1]])
return torch.tensor(edge_attr, dtype=torch.float)
def get_boundary(self):
s = self.s
n = self.n
boundary1 = np.array(range(0, s))
boundary2 = np.array(range(n - s, n))
boundary3 = np.array(range(s, n, s))
boundary4 = np.array(range(2 * s - 1, n, s))
self.boundary = np.concatenate([boundary1, boundary2, boundary3, boundary4])
def boundary_connectivity2d(self, stride=1):
boundary = self.boundary[::stride]
boundary_size = len(boundary)
vertice1 = np.array(range(self.n))
vertice1 = np.repeat(vertice1, boundary_size)
vertice2 = np.tile(boundary, self.n)
self.edge_index_boundary = np.stack([vertice2, vertice1], axis=0)
self.n_edges_boundary = self.edge_index_boundary.shape[1]
return torch.tensor(self.edge_index_boundary, dtype=torch.long)
def attributes_boundary(self, f=None, theta=None):
# if self.edge_index_boundary == None:
# self.boundary_connectivity2d()
if f is None:
if theta is None:
edge_attr_boundary = self.grid[self.edge_index_boundary.T].reshape((self.n_edges_boundary,-1))
else:
edge_attr_boundary = np.zeros((self.n_edges_boundary, 2*self.d+2))
edge_attr_boundary[:,0:2*self.d] = self.grid[self.edge_index_boundary.T].reshape((self.n_edges_boundary,-1))
edge_attr_boundary[:, 2 * self.d] = theta[self.edge_index_boundary[0]]
edge_attr_boundary[:, 2 * self.d +1] = theta[self.edge_index_boundary[1]]
else:
xy = self.grid[self.edge_index_boundary.T].reshape((self.n_edges_boundary,-1))
if theta is None:
edge_attr_boundary = f(xy[:,0:self.d], xy[:,self.d:])
else:
edge_attr_boundary = f(xy[:,0:self.d], xy[:,self.d:], theta[self.edge_index_boundary[0]], theta[self.edge_index_boundary[1]])
return torch.tensor(edge_attr_boundary, dtype=torch.float)
# generate graphs with sampling
class RandomMeshGenerator(object):
def __init__(self, real_space, mesh_size, sample_size, attr_features=1):
super(RandomMeshGenerator, self).__init__()
self.d = len(real_space)
self.m = sample_size
self.attr_features = attr_features
assert len(mesh_size) == self.d
if self.d == 1:
self.n = mesh_size[0]
self.grid = np.linspace(real_space[0][0], real_space[0][1], self.n).reshape((self.n, 1))
else:
self.n = 1
grids = []
for j in range(self.d):
grids.append(np.linspace(real_space[j][0], real_space[j][1], mesh_size[j]))
self.n *= mesh_size[j]
self.grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
if self.m > self.n:
self.m = self.n
self.idx = np.array(range(self.n))
self.grid_sample = self.grid
def sample(self):
perm = torch.randperm(self.n)
self.idx = perm[:self.m]
self.grid_sample = self.grid[self.idx]
return self.idx
def get_grid(self):
return torch.tensor(self.grid_sample, dtype=torch.float)
def ball_connectivity(self, r, is_forward=False):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample)
self.edge_index = np.vstack(np.where(pwd <= r))
self.n_edges = self.edge_index.shape[1]
if is_forward:
print(self.edge_index.shape)
self.edge_index = self.edge_index[:, self.edge_index[0] >= self.edge_index[1]]
print(self.edge_index.shape)
self.n_edges = self.edge_index.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long)
def torus1d_connectivity(self, r):
grid = self.grid_sample
pwd0 = sklearn.metrics.pairwise_distances(grid, grid)
grid1 = grid
grid1[:,0] = grid[:,0]+1
pwd1 = sklearn.metrics.pairwise_distances(grid, grid1)
PWD = np.stack([pwd0,pwd1], axis=2)
pwd = np.min(PWD, axis=2)
self.edge_index = np.vstack(np.where(pwd <= r))
self.n_edges = self.edge_index.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long)
def gaussian_connectivity(self, sigma):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample)
rbf = np.exp(-pwd**2/sigma**2)
sample = np.random.binomial(1,rbf)
self.edge_index = np.vstack(np.where(sample))
self.n_edges = self.edge_index.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long)
def attributes(self, f=None, theta=None):
if f is None:
if theta is None:
edge_attr = self.grid[self.edge_index.T].reshape((self.n_edges, -1))
else:
theta = theta[self.idx]
edge_attr = np.zeros((self.n_edges, 2 * self.d + 2*self.attr_features))
edge_attr[:, 0:2 * self.d] = self.grid_sample[self.edge_index.T].reshape((self.n_edges, -1))
edge_attr[:, 2 * self.d : 2 * self.d + self.attr_features] = theta[self.edge_index[0]].view(-1, self.attr_features)
edge_attr[:, 2 * self.d + self.attr_features: 2 * self.d + 2*self.attr_features] = theta[self.edge_index[1]].view(-1, self.attr_features)
else:
xy = self.grid_sample[self.edge_index.T].reshape((self.n_edges, -1))
if theta is None:
edge_attr = f(xy[:, 0:self.d], xy[:, self.d:])
else:
theta = theta[self.idx]
edge_attr = f(xy[:, 0:self.d], xy[:, self.d:], theta[self.edge_index[0]], theta[self.edge_index[1]])
return torch.tensor(edge_attr, dtype=torch.float)
# # generate two-level graph
class RandomTwoMeshGenerator(object):
def __init__(self, real_space, mesh_size, sample_size, induced_point):
super(RandomTwoMeshGenerator, self).__init__()
self.d = len(real_space)
self.m = sample_size
self.m_i = induced_point
assert len(mesh_size) == self.d
if self.d == 1:
self.n = mesh_size[0]
self.grid = np.linspace(real_space[0][0], real_space[0][1], self.n).reshape((self.n, 1))
else:
self.n = 1
grids = []
for j in range(self.d):
grids.append(np.linspace(real_space[j][0], real_space[j][1], mesh_size[j]))
self.n *= mesh_size[j]
self.grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
if self.m > self.n:
self.m = self.n
self.idx = np.array(range(self.n))
self.idx_i = self.idx
self.idx_both = self.idx
self.grid_sample = self.grid
self.grid_sample_i = self.grid
self.grid_sample_both = self.grid
def sample(self):
perm = torch.randperm(self.n)
self.idx = perm[:self.m]
self.idx_i = perm[self.m: self.m+self.m_i]
self.idx_both = perm[: self.m+self.m_i]
self.grid_sample = self.grid[self.idx]
self.grid_sample_i = self.grid[self.idx_i]
self.grid_sample_both = self.grid[self.idx_both]
return self.idx, self.idx_i, self.idx_both
def get_grid(self):
return torch.tensor(self.grid_sample, dtype=torch.float), \
torch.tensor(self.grid_sample_i, dtype=torch.float), \
torch.tensor(self.grid_sample_both, dtype=torch.float)
def ball_connectivity(self, r11, r12, r22):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample)
pwd12 = sklearn.metrics.pairwise_distances(self.grid_sample, self.grid_sample_i)
pwd22 = sklearn.metrics.pairwise_distances(self.grid_sample_i)
self.edge_index = np.vstack(np.where(pwd <= r11))
self.edge_index_12 = np.vstack(np.where(pwd12 <= r12))
self.edge_index_12[1,:] = self.edge_index_12[1,:] + self.m
self.edge_index_21 = self.edge_index_12[[1,0],:]
self.edge_index_22 = np.vstack(np.where(pwd22 <= r22)) + self.m
self.n_edges = self.edge_index.shape[1]
self.n_edges_12 = self.edge_index_12.shape[1]
self.n_edges_22 = self.edge_index_22.shape[1]
return torch.tensor(self.edge_index, dtype=torch.long), \
torch.tensor(self.edge_index_12, dtype=torch.long), \
torch.tensor(self.edge_index_21, dtype=torch.long), \
torch.tensor(self.edge_index_22, dtype=torch.long)
def attributes(self, theta=None):
if theta is None:
edge_attr = self.grid_sample_both[self.edge_index.T].reshape((self.n_edges, -1))
edge_attr_12 = self.grid_sample_both[self.edge_index_12.T].reshape((self.n_edges_12, -1))
edge_attr_21 = self.grid_sample_both[self.edge_index_21.T].reshape((self.n_edges_12, -1))
edge_attr_22 = self.grid_sample_both[self.edge_index_22.T].reshape((self.n_edges_22, -1))
else:
theta = theta[self.idx_both]
edge_attr = np.zeros((self.n_edges, 3 * self.d))
edge_attr[:, 0:2 * self.d] = self.grid_sample_both[self.edge_index.T].reshape((self.n_edges, -1))
edge_attr[:, 2 * self.d] = theta[self.edge_index[0]]
edge_attr[:, 2 * self.d + 1] = theta[self.edge_index[1]]
edge_attr_12 = np.zeros((self.n_edges_12, 3 * self.d))
edge_attr_12[:, 0:2 * self.d] = self.grid_sample_both[self.edge_index_12.T].reshape((self.n_edges_12, -1))
edge_attr_12[:, 2 * self.d] = theta[self.edge_index_12[0]]
edge_attr_12[:, 2 * self.d + 1] = theta[self.edge_index_12[1]]
edge_attr_21 = np.zeros((self.n_edges_12, 3 * self.d))
edge_attr_21[:, 0:2 * self.d] = self.grid_sample_both[self.edge_index_21.T].reshape((self.n_edges_12, -1))
edge_attr_21[:, 2 * self.d] = theta[self.edge_index_21[0]]
edge_attr_21[:, 2 * self.d + 1] = theta[self.edge_index_21[1]]
edge_attr_22 = np.zeros((self.n_edges_22, 3 * self.d))
edge_attr_22[:, 0:2 * self.d] = self.grid_sample_both[self.edge_index_22.T].reshape((self.n_edges_22, -1))
edge_attr_22[:, 2 * self.d] = theta[self.edge_index_22[0]]
edge_attr_22[:, 2 * self.d + 1] = theta[self.edge_index_22[1]]
return torch.tensor(edge_attr, dtype=torch.float), \
torch.tensor(edge_attr_12, dtype=torch.float), \
torch.tensor(edge_attr_21, dtype=torch.float), \
torch.tensor(edge_attr_22, dtype=torch.float)
# generate multi-level graph
class RandomMultiMeshGenerator(object):
def __init__(self, real_space, mesh_size, level, sample_sizes):
super(RandomMultiMeshGenerator, self).__init__()
self.d = len(real_space)
self.m = sample_sizes
self.level = level
assert len(sample_sizes) == level
assert len(mesh_size) == self.d
if self.d == 1:
self.n = mesh_size[0]
self.grid = np.linspace(real_space[0][0], real_space[0][1], self.n).reshape((self.n, 1))
else:
self.n = 1
grids = []
for j in range(self.d):
grids.append(np.linspace(real_space[j][0], real_space[j][1], mesh_size[j]))
self.n *= mesh_size[j]
self.grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
self.idx = []
self.idx_all = None
self.grid_sample = []
self.grid_sample_all = None
self.edge_index = []
self.edge_index_down = []
self.edge_index_up = []
self.edge_attr = []
self.edge_attr_down = []
self.edge_attr_up = []
self.n_edges_inner = []
self.n_edges_inter = []
def sample(self):
self.idx = []
self.grid_sample = []
perm = torch.randperm(self.n)
index = 0
for l in range(self.level):
self.idx.append(perm[index: index+self.m[l]])
self.grid_sample.append(self.grid[self.idx[l]])
index = index+self.m[l]
self.idx_all = perm[:index]
self.grid_sample_all = self.grid[self.idx_all]
return self.idx, self.idx_all
def get_grid(self):
grid_out = []
for grid in self.grid_sample:
grid_out.append(torch.tensor(grid, dtype=torch.float))
return grid_out, torch.tensor(self.grid_sample_all, dtype=torch.float)
def ball_connectivity(self, radius_inner, radius_inter):
assert len(radius_inner) == self.level
assert len(radius_inter) == self.level - 1
self.edge_index = []
self.edge_index_down = []
self.edge_index_up = []
self.n_edges_inner = []
self.n_edges_inter = []
edge_index_out = []
edge_index_down_out = []
edge_index_up_out = []
index = 0
for l in range(self.level):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample[l])
edge_index = np.vstack(np.where(pwd <= radius_inner[l])) + index
self.edge_index.append(edge_index)
edge_index_out.append(torch.tensor(edge_index, dtype=torch.long))
self.n_edges_inner.append(edge_index.shape[1])
index = index + self.grid_sample[l].shape[0]
index = 0
for l in range(self.level-1):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample[l], self.grid_sample[l+1])
edge_index = np.vstack(np.where(pwd <= radius_inter[l])) + index
edge_index[1, :] = edge_index[1, :] + self.grid_sample[l].shape[0]
self.edge_index_down.append(edge_index)
edge_index_down_out.append(torch.tensor(edge_index, dtype=torch.long))
self.edge_index_up.append(edge_index[[1,0],:])
edge_index_up_out.append(torch.tensor(edge_index[[1,0],:], dtype=torch.long))
self.n_edges_inter.append(edge_index.shape[1])
index = index + self.grid_sample[l].shape[0]
edge_index_out = torch.cat(edge_index_out, dim=1)
edge_index_down_out = torch.cat(edge_index_down_out, dim=1)
edge_index_up_out = torch.cat(edge_index_up_out, dim=1)
return edge_index_out, edge_index_down_out, edge_index_up_out
def get_edge_index_range(self):
# in order to use graph network's data structure,
# the edge index shall be stored as tensor instead of list
# we concatenate the edge index list and label the range of each level
edge_index_range = torch.zeros((self.level,2), dtype=torch.long)
edge_index_down_range = torch.zeros((self.level-1,2), dtype=torch.long)
edge_index_up_range = torch.zeros((self.level-1,2), dtype=torch.long)
n_edge_index = 0
for l in range(self.level):
edge_index_range[l, 0] = n_edge_index
n_edge_index = n_edge_index + self.edge_index[l].shape[1]
edge_index_range[l, 1] = n_edge_index
n_edge_index = 0
for l in range(self.level-1):
edge_index_down_range[l, 0] = n_edge_index
edge_index_up_range[l, 0] = n_edge_index
n_edge_index = n_edge_index + self.edge_index_down[l].shape[1]
edge_index_down_range[l, 1] = n_edge_index
edge_index_up_range[l, 1] = n_edge_index
return edge_index_range, edge_index_down_range, edge_index_up_range
def attributes(self, theta=None):
self.edge_attr = []
self.edge_attr_down = []
self.edge_attr_up = []
if theta is None:
for l in range(self.level):
edge_attr = self.grid_sample_all[self.edge_index[l].T].reshape((self.n_edges_inner[l], 2*self.d))
self.edge_attr.append(torch.tensor(edge_attr))
for l in range(self.level - 1):
edge_attr_down = self.grid_sample_all[self.edge_index_down[l].T].reshape((self.n_edges_inter[l], 2*self.d))
edge_attr_up = self.grid_sample_all[self.edge_index_up[l].T].reshape((self.n_edges_inter[l], 2*self.d))
self.edge_attr_down.append(torch.tensor(edge_attr_down))
self.edge_attr_up.append(torch.tensor(edge_attr_up))
else:
theta = theta[self.idx_all]
for l in range(self.level):
edge_attr = np.zeros((self.n_edges_inner[l], 2 * self.d + 2))
edge_attr[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index[l].T].reshape(
(self.n_edges_inner[l], 2 * self.d))
edge_attr[:, 2 * self.d] = theta[self.edge_index[l][0]]
edge_attr[:, 2 * self.d + 1] = theta[self.edge_index[l][1]]
self.edge_attr.append(torch.tensor(edge_attr, dtype=torch.float))
for l in range(self.level - 1):
edge_attr_down = np.zeros((self.n_edges_inter[l], 2 * self.d + 2))
edge_attr_up = np.zeros((self.n_edges_inter[l], 2 * self.d + 2))
edge_attr_down[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index_down[l].T].reshape(
(self.n_edges_inter[l], 2 * self.d))
edge_attr_down[:, 2 * self.d] = theta[self.edge_index_down[l][0]]
edge_attr_down[:, 2 * self.d + 1] = theta[self.edge_index_down[l][1]]
self.edge_attr_down.append(torch.tensor(edge_attr_down, dtype=torch.float))
edge_attr_up[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index_up[l].T].reshape(
(self.n_edges_inter[l], 2 * self.d))
edge_attr_up[:, 2 * self.d] = theta[self.edge_index_up[l][0]]
edge_attr_up[:, 2 * self.d + 1] = theta[self.edge_index_up[l][1]]
self.edge_attr_up.append(torch.tensor(edge_attr_up, dtype=torch.float))
edge_attr_out = torch.cat(self.edge_attr, dim=0)
edge_attr_down_out = torch.cat(self.edge_attr_down, dim=0)
edge_attr_up_out = torch.cat(self.edge_attr_up, dim=0)
return edge_attr_out, edge_attr_down_out, edge_attr_up_out
# generate graph, with split and assemble
class RandomGridSplitter(object):
def __init__(self, grid, resolution, d=2, m=200, l=1, radius=0.25):
super(RandomGridSplitter, self).__init__()
self.grid = grid
self.resolution = resolution
self.n = resolution**d
self.d = d
self.m = m
self.l = l
self.radius = radius
assert self.n % self.m == 0
self.num = self.n // self.m # number of sub-grid
def get_data(self, theta, edge_features=1):
data = []
for i in range(self.l):
perm = torch.randperm(self.n)
perm = perm.reshape(self.num, self.m)
for j in range(self.num):
idx = perm[j,:].reshape(-1,)
grid_sample = self.grid.reshape(self.n,-1)[idx]
theta_sample = theta.reshape(self.n,-1)[idx]
X = torch.cat([grid_sample,theta_sample],dim=1)
pwd = sklearn.metrics.pairwise_distances(grid_sample)
edge_index = np.vstack(np.where(pwd <= self.radius))
n_edges = edge_index.shape[1]
edge_index = torch.tensor(edge_index, dtype=torch.long)
if edge_features == 0:
edge_attr = grid_sample[edge_index.T].reshape(n_edges, -1)
else:
edge_attr = np.zeros((n_edges, 2*self.d+2))
a = theta_sample[:,0]
edge_attr[:, :2*self.d] = grid_sample[edge_index.T].reshape(n_edges, -1)
edge_attr[:, 2*self.d] = a[edge_index[0]]
edge_attr[:, 2*self.d+1] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
data.append(Data(x=X, edge_index=edge_index, edge_attr=edge_attr, split_idx=idx))
print('test', len(data), X.shape, edge_index.shape, edge_attr.shape)
return data
def assemble(self, pred, split_idx, batch_size2, sigma=1, cuda=False):
assert len(pred) == len(split_idx)
assert len(pred) == self.num * self.l // batch_size2
out = torch.zeros(self.n, )
if cuda:
out = out.cuda()
for i in range(len(pred)):
pred_i = pred[i].reshape(batch_size2, self.m)
split_idx_i = split_idx[i].reshape(batch_size2, self.m)
for j in range(batch_size2):
pred_ij = pred_i[j,:].reshape(-1,)
idx = split_idx_i[j,:].reshape(-1,)
out[idx] = out[idx] + pred_ij
out = out / self.l
# out = gaussian_filter(out, sigma=sigma, mode='constant', cval=0)
# out = torch.tensor(out, dtype=torch.float)
return out.reshape(-1,)
# generate multi-level graph, with split and assemble
class RandomMultiMeshSplitter(object):
def __init__(self, real_space, mesh_size, level, sample_sizes):
super(RandomMultiMeshSplitter, self).__init__()
self.d = len(real_space)
self.ms = sample_sizes
self.m = sample_sizes[0]
self.level = level
assert len(sample_sizes) == level
assert len(mesh_size) == self.d
if self.d == 1:
self.n = mesh_size[0]
self.grid = np.linspace(real_space[0][0], real_space[0][1], self.n).reshape((self.n, 1))
else:
self.n = 1
grids = []
for j in range(self.d):
grids.append(np.linspace(real_space[j][0], real_space[j][1], mesh_size[j]))
self.n *= mesh_size[j]
self.grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
self.splits = self.n // self.m # number of sub-grid
if self.splits * self.m < self.n:
self.splits = self.splits + 1
print('n:',self.n,' m:',self.m, ' number of splits:', self.splits )
self.perm = None
self.idx = []
self.idx_all = None
self.grid_sample = []
self.grid_sample_all = None
self.edge_index = []
self.edge_index_down = []
self.edge_index_up = []
self.edge_attr = []
self.edge_attr_down = []
self.edge_attr_up = []
self.n_edges_inner = []
self.n_edges_inter = []
def sample(self, new_sample=True, index0=0):
self.idx = []
self.grid_sample = []
if (new_sample) or (self.perm is None):
self.perm = torch.randperm(self.n)
index = index0
for l in range(self.level):
index = index % self.n
index_end = (index+self.ms[l]) % self.n
if index < index_end:
idx = self.perm[index: index_end]
else:
idx = torch.cat((self.perm[index: ],self.perm[: index_end]), dim=0)
self.idx.append(idx)
self.grid_sample.append(self.grid[idx])
index = index_end
if index0 < index_end:
idx_all = self.perm[index0: index_end]
else:
idx_all = torch.cat((self.perm[index0:], self.perm[: index_end]), dim=0)
self.idx_all = idx_all
self.grid_sample_all = self.grid[self.idx_all]
return self.idx, self.idx_all
def get_grid(self):
grid_out = []
for grid in self.grid_sample:
grid_out.append(torch.tensor(grid, dtype=torch.float))
return grid_out, torch.tensor(self.grid_sample_all, dtype=torch.float)
def ball_connectivity(self, radius_inner, radius_inter):
assert len(radius_inner) == self.level
assert len(radius_inter) == self.level - 1
self.edge_index = []
self.edge_index_down = []
self.edge_index_up = []
self.n_edges_inner = []
self.n_edges_inter = []
edge_index_out = []
edge_index_down_out = []
edge_index_up_out = []
index = 0
for l in range(self.level):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample[l])
edge_index = np.vstack(np.where(pwd <= radius_inner[l])) + index
self.edge_index.append(edge_index)
edge_index_out.append(torch.tensor(edge_index, dtype=torch.long))
self.n_edges_inner.append(edge_index.shape[1])
index = index + self.grid_sample[l].shape[0]
index = 0
for l in range(self.level-1):
pwd = sklearn.metrics.pairwise_distances(self.grid_sample[l], self.grid_sample[l+1])
edge_index = np.vstack(np.where(pwd <= radius_inter[l])) + index
edge_index[1, :] = edge_index[1, :] + self.grid_sample[l].shape[0]
self.edge_index_down.append(edge_index)
edge_index_down_out.append(torch.tensor(edge_index, dtype=torch.long))
self.edge_index_up.append(edge_index[[1,0],:])
edge_index_up_out.append(torch.tensor(edge_index[[1,0],:], dtype=torch.long))
self.n_edges_inter.append(edge_index.shape[1])
index = index + self.grid_sample[l].shape[0]
edge_index_out = torch.cat(edge_index_out, dim=1)
edge_index_down_out = torch.cat(edge_index_down_out, dim=1)
edge_index_up_out = torch.cat(edge_index_up_out, dim=1)
return edge_index_out, edge_index_down_out, edge_index_up_out
def get_edge_index_range(self):
# in order to use graph network's data structure,
# the edge index shall be stored as tensor instead of list
# we concatenate the edge index list and label the range of each level
edge_index_range = torch.zeros((self.level,2), dtype=torch.long)
edge_index_down_range = torch.zeros((self.level-1,2), dtype=torch.long)
edge_index_up_range = torch.zeros((self.level-1,2), dtype=torch.long)
n_edge_index = 0
for l in range(self.level):
edge_index_range[l, 0] = n_edge_index
n_edge_index = n_edge_index + self.edge_index[l].shape[1]
edge_index_range[l, 1] = n_edge_index
n_edge_index = 0
for l in range(self.level-1):
edge_index_down_range[l, 0] = n_edge_index
edge_index_up_range[l, 0] = n_edge_index
n_edge_index = n_edge_index + self.edge_index_down[l].shape[1]
edge_index_down_range[l, 1] = n_edge_index
edge_index_up_range[l, 1] = n_edge_index
return edge_index_range, edge_index_down_range, edge_index_up_range
def attributes(self, theta=None):
self.edge_attr = []
self.edge_attr_down = []
self.edge_attr_up = []
if theta is None:
for l in range(self.level):
edge_attr = self.grid_sample_all[self.edge_index[l].T].reshape((self.n_edges_inner[l], 2*self.d))
self.edge_attr.append(torch.tensor(edge_attr))
for l in range(self.level - 1):
edge_attr_down = self.grid_sample_all[self.edge_index_down[l].T].reshape((self.n_edges_inter[l], 2*self.d))
edge_attr_up = self.grid_sample_all[self.edge_index_up[l].T].reshape((self.n_edges_inter[l], 2*self.d))
self.edge_attr_down.append(torch.tensor(edge_attr_down))
self.edge_attr_up.append(torch.tensor(edge_attr_up))
else:
theta = theta[self.idx_all]
for l in range(self.level):
edge_attr = np.zeros((self.n_edges_inner[l], 2 * self.d + 2))
edge_attr[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index[l].T].reshape(
(self.n_edges_inner[l], 2 * self.d))
edge_attr[:, 2 * self.d] = theta[self.edge_index[l][0]]
edge_attr[:, 2 * self.d + 1] = theta[self.edge_index[l][1]]
self.edge_attr.append(torch.tensor(edge_attr, dtype=torch.float))
for l in range(self.level - 1):
edge_attr_down = np.zeros((self.n_edges_inter[l], 2 * self.d + 2))
edge_attr_up = np.zeros((self.n_edges_inter[l], 2 * self.d + 2))
edge_attr_down[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index_down[l].T].reshape(
(self.n_edges_inter[l], 2 * self.d))
edge_attr_down[:, 2 * self.d] = theta[self.edge_index_down[l][0]]
edge_attr_down[:, 2 * self.d + 1] = theta[self.edge_index_down[l][1]]
self.edge_attr_down.append(torch.tensor(edge_attr_down, dtype=torch.float))
edge_attr_up[:, 0:2 * self.d] = self.grid_sample_all[self.edge_index_up[l].T].reshape(
(self.n_edges_inter[l], 2 * self.d))
edge_attr_up[:, 2 * self.d] = theta[self.edge_index_up[l][0]]
edge_attr_up[:, 2 * self.d + 1] = theta[self.edge_index_up[l][1]]
self.edge_attr_up.append(torch.tensor(edge_attr_up, dtype=torch.float))
edge_attr_out = torch.cat(self.edge_attr, dim=0)
edge_attr_down_out = torch.cat(self.edge_attr_down, dim=0)
edge_attr_up_out = torch.cat(self.edge_attr_up, dim=0)
return edge_attr_out, edge_attr_down_out, edge_attr_up_out
def splitter(self, radius_inner, radius_inter, theta_a, theta_all):
# give a test mesh, generate a list of data
data = []
index = 0
for i in range(self.splits):
if i==0:
idx, idx_all = self.sample(new_sample=True, index0=index)
else:
idx, idx_all = self.sample(new_sample=False, index0=index)
index = (index + self.m) % self.n
grid, grid_all = self.get_grid()
edge_index, edge_index_down, edge_index_up = self.ball_connectivity(radius_inner, radius_inter)
edge_index_range, edge_index_down_range, edge_index_up_range = self.get_edge_index_range()
edge_attr, edge_attr_down, edge_attr_up = self.attributes(theta=theta_a)
x = torch.cat([grid_all, theta_all[idx_all,:] ], dim=1)
data.append(Data(x=x,
edge_index_mid=edge_index, edge_index_down=edge_index_down, edge_index_up=edge_index_up,
edge_index_range=edge_index_range, edge_index_down_range=edge_index_down_range, edge_index_up_range=edge_index_up_range,
edge_attr_mid=edge_attr, edge_attr_down=edge_attr_down, edge_attr_up=edge_attr_up,
sample_idx=idx[0]))
return data
def assembler(self, out_list, sample_idx_list, is_cuda=False):
assert len(out_list) == self.splits
if is_cuda:
pred = torch.zeros(self.n, ).cuda()
else:
pred = torch.zeros(self.n, )
for i in range(self.splits):
pred[sample_idx_list[i]] = out_list[i].reshape(-1)
return pred
# generate graph, with split and assemble with downsample
class DownsampleGridSplitter(object):
def __init__(self, grid, resolution, r, m=100, radius=0.15, edge_features=1):
super(DownsampleGridSplitter, self).__init__()
# instead of randomly sample sub-grids, here we downsample sub-grids
self.grid = grid.reshape(resolution, resolution,2)
# self.theta = theta.reshape(resolution, resolution,-1)
# self.y = y.reshape(resolution, resolution,1)
self.resolution = resolution
if resolution%2==1:
self.s = int(((resolution - 1)/r) + 1)
else:
self.s = int(resolution/r)
self.r = r
self.n = resolution**2
self.m = m
self.radius = radius
self.edge_features = edge_features
self.index = torch.tensor(range(self.n), dtype=torch.long).reshape(self.resolution, self.resolution)
def ball_connectivity(self, grid):
pwd = sklearn.metrics.pairwise_distances(grid)
edge_index = np.vstack(np.where(pwd <= self.radius))
n_edges = edge_index.shape[1]
return torch.tensor(edge_index, dtype=torch.long), n_edges
def get_data(self, theta):
theta_d = theta.shape[1]
theta = theta.reshape(self.resolution, self.resolution, theta_d)
data = []
for x in range(self.r):
for y in range(self.r):
grid_sub = self.grid[x::self.r, y::self.r,:].reshape(-1,2)
theta_sub = theta[x::self.r, y::self.r,:].reshape(-1,theta_d)
perm = torch.randperm(self.n)
m = self.m - grid_sub.shape[0]
idx = perm[:m]
grid_sample = self.grid.reshape(self.n,-1)[idx]
theta_sample = theta.reshape(self.n,-1)[idx]
grid_split = torch.cat([grid_sub, grid_sample],dim=0)
theta_split = torch.cat([theta_sub, theta_sample],dim=0)
X = torch.cat([grid_split,theta_split],dim=1)
edge_index, n_edges = self.ball_connectivity(grid_split)
edge_attr = np.zeros((n_edges, 4+self.edge_features*2))
a = theta_split[:, :self.edge_features]
edge_attr[:, :4] = grid_split[edge_index.T].reshape(n_edges, -1)
edge_attr[:, 4:4 + self.edge_features] = a[edge_index[0]]
edge_attr[:, 4 + self.edge_features: 4 + self.edge_features * 2] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
split_idx = torch.tensor([x,y],dtype=torch.long).reshape(1,2)
data.append(Data(x=X, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx))
print('test', len(data), X.shape, edge_index.shape, edge_attr.shape)
return data
def sample(self, theta, Y):
theta_d = theta.shape[1]
theta = theta.reshape(self.resolution, self.resolution, theta_d)
Y = Y.reshape(self.resolution, self.resolution)
x = torch.randint(0,self.r,(1,))
y = torch.randint(0,self.r,(1,))
grid_sub = self.grid[x::self.r, y::self.r, :].reshape(-1, 2)
theta_sub = theta[x::self.r, y::self.r, :].reshape(-1, theta_d)
Y_sub = Y[x::self.r, y::self.r].reshape(-1,)
index_sub = self.index[x::self.r, y::self.r].reshape(-1,)
n_sub = Y_sub.shape[0]
if self.m >= n_sub:
m = self.m - n_sub
perm = torch.randperm(self.n)
idx = perm[:m]
grid_sample = self.grid.reshape(self.n, -1)[idx]
theta_sample = theta.reshape(self.n, -1)[idx]
Y_sample = Y.reshape(self.n, )[idx]
grid_split = torch.cat([grid_sub, grid_sample], dim=0)
theta_split = torch.cat([theta_sub, theta_sample], dim=0)
Y_split = torch.cat([Y_sub, Y_sample], dim=0).reshape(-1,)
index_split = torch.cat([index_sub, idx], dim=0).reshape(-1,)
X = torch.cat([grid_split, theta_split], dim=1)
else:
grid_split = grid_sub
theta_split = theta_sub
Y_split = Y_sub.reshape(-1,)
index_split = index_sub.reshape(-1,)
X = torch.cat([grid_split, theta_split], dim=1)
edge_index, n_edges = self.ball_connectivity(grid_split)
edge_attr = np.zeros((n_edges, 4+self.edge_features*2))
a = theta_split[:, :self.edge_features]
edge_attr[:, :4] = grid_split[edge_index.T].reshape(n_edges, -1)
edge_attr[:, 4:4+self.edge_features] = a[edge_index[0]]
edge_attr[:, 4+self.edge_features: 4+self.edge_features*2] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
split_idx = torch.tensor([x, y], dtype=torch.long).reshape(1, 2)
data = Data(x=X, y=Y_split, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx, sample_idx=index_split)
print('train', X.shape, Y_split.shape, edge_index.shape, edge_attr.shape, index_split.shape)
return data
def assemble(self, pred, split_idx, batch_size2, sigma=1):
assert len(pred) == len(split_idx)
assert len(pred) == self.r**2 // batch_size2
out = torch.zeros((self.resolution,self.resolution))
for i in range(len(pred)):
pred_i = pred[i].reshape(batch_size2, self.m)
split_idx_i = split_idx[i]
for j in range(batch_size2):
pred_ij = pred_i[j,:]
x, y = split_idx_i[j]
if self.resolution%2==1:
if x==0:
nx = self.s
else:
nx = self.s-1
if y==0:
ny = self.s
else:
ny = self.s-1
else:
nx = self.s
ny = self.s
# pred_ij = pred_i[idx : idx + nx * ny]
out[x::self.r, y::self.r] = pred_ij[:nx * ny].reshape(nx,ny)
out = gaussian_filter(out, sigma=sigma, mode='constant', cval=0)
out = torch.tensor(out, dtype=torch.float)
return out.reshape(-1,)
# generate graph on Torus, with split and assemble
class TorusGridSplitter(object):
def __init__(self, grid, resolution, r, m=100, radius=0.15, T=None, edge_features=1, ):
super(TorusGridSplitter, self).__init__()
self.grid = grid.reshape(resolution, resolution,2)
# self.theta = theta.reshape(resolution, resolution,-1)
# self.y = y.reshape(resolution, resolution,1)
self.resolution = resolution
if resolution%2==1:
self.s = int(((resolution - 1)/r) + 1)
else:
self.s = int(resolution/r)
self.r = r
self.n = resolution**2
self.m = m
self.T = T
self.radius = radius
self.edge_features = edge_features
self.index = torch.tensor(range(self.n), dtype=torch.long).reshape(self.resolution, self.resolution)
def pairwise_difference(self,grid1, grid2):
n = grid1.shape[0]
x1 = grid1[:,0]
y1 = grid1[:,1]
x2 = grid2[:,0]
y2 = grid2[:,1]
X1 = np.tile(x1.reshape(n, 1), [1, n])
X2 = np.tile(x2.reshape(1, n), [n, 1])
X_diff = X1 - X2
Y1 = np.tile(y1.reshape(n, 1), [1, n])
Y2 = np.tile(y2.reshape(1, n), [n, 1])
Y_diff = Y1 - Y2
return X_diff, Y_diff
def torus_connectivity(self, grid):
pwd0 = sklearn.metrics.pairwise_distances(grid, grid)
X_diff0, Y_diff0 = self.pairwise_difference(grid, grid)
grid1 = grid
grid1[:,0] = grid[:,0]+1
pwd1 = sklearn.metrics.pairwise_distances(grid, grid1)
X_diff1, Y_diff1 = self.pairwise_difference(grid, grid1)
grid2 = grid
grid2[:, 1] = grid[:, 1] + 1
pwd2 = sklearn.metrics.pairwise_distances(grid, grid2)
X_diff2, Y_diff2 = self.pairwise_difference(grid, grid2)
grid3 = grid
grid3[:, :] = grid[:, :] + 1
pwd3 = sklearn.metrics.pairwise_distances(grid, grid3)
X_diff3, Y_diff3 = self.pairwise_difference(grid, grid3)
grid4 = grid
grid4[:, 0] = grid[:, 0] + 1
grid4[:, 1] = grid[:, 1] - 1
pwd4 = sklearn.metrics.pairwise_distances(grid, grid4)
X_diff4, Y_diff4 = self.pairwise_difference(grid, grid4)
PWD = np.stack([pwd0,pwd1,pwd2,pwd3,pwd4], axis=2)
X_DIFF = np.stack([X_diff0,X_diff1,X_diff2,X_diff3,X_diff4], axis=2)
Y_DIFF = np.stack([Y_diff0, Y_diff1, Y_diff2, Y_diff3, Y_diff4], axis=2)
pwd = np.min(PWD, axis=2)
pwd_index = np.argmin(PWD, axis=2)
edge_index = np.vstack(np.where(pwd <= self.radius))
pwd_index = pwd_index[np.where(pwd <= self.radius)]
PWD_index = (np.where(pwd <= self.radius)[0], np.where(pwd <= self.radius)[1], pwd_index)
distance = PWD[PWD_index]
X_difference = X_DIFF[PWD_index]
Y_difference = Y_DIFF[PWD_index]
n_edges = edge_index.shape[1]
return torch.tensor(edge_index, dtype=torch.long), n_edges, distance, X_difference, Y_difference
def get_data(self, theta, params=None):
theta_d = theta.shape[1]
theta = theta.reshape(self.resolution, self.resolution, theta_d)
data = []
for x in range(self.r):
for y in range(self.r):
grid_sub = self.grid[x::self.r, y::self.r,:].reshape(-1,2)
theta_sub = theta[x::self.r, y::self.r,:].reshape(-1,theta_d)
perm = torch.randperm(self.n)
m = self.m - grid_sub.shape[0]
idx = perm[:m]
grid_sample = self.grid.reshape(self.n,-1)[idx]
theta_sample = theta.reshape(self.n,-1)[idx]
grid_split = torch.cat([grid_sub, grid_sample],dim=0)
theta_split = torch.cat([theta_sub, theta_sample],dim=0)
X = torch.cat([grid_split,theta_split],dim=1)
edge_index, n_edges, distance, X_difference, Y_difference = self.torus_connectivity(grid_split)
edge_attr = np.zeros((n_edges, 3+self.edge_features*2))
a = theta_split[:, :self.edge_features]
edge_attr[:, 0] = X_difference.reshape(n_edges, )
edge_attr[:, 1] = Y_difference.reshape(n_edges, )
edge_attr[:, 2] = distance.reshape(n_edges, )
edge_attr[:, 3:3 + self.edge_features] = a[edge_index[0]]
edge_attr[:, 3 + self.edge_features: 4 + self.edge_features * 2] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
split_idx = torch.tensor([x,y],dtype=torch.long).reshape(1,2)
if params==None:
data.append(Data(x=X, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx))
else:
data.append(Data(x=X, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx, params=params))
print('test', len(data), X.shape, edge_index.shape, edge_attr.shape)
return data
def sample(self, theta, Y):
theta_d = theta.shape[1]
theta = theta.reshape(self.resolution, self.resolution, theta_d)
Y = Y.reshape(self.resolution, self.resolution)
x = torch.randint(0,self.r,(1,))
y = torch.randint(0,self.r,(1,))
grid_sub = self.grid[x::self.r, y::self.r, :].reshape(-1, 2)
theta_sub = theta[x::self.r, y::self.r, :].reshape(-1, theta_d)
Y_sub = Y[x::self.r, y::self.r].reshape(-1,)
index_sub = self.index[x::self.r, y::self.r].reshape(-1,)
n_sub = Y_sub.shape[0]
if self.m >= n_sub:
m = self.m - n_sub
perm = torch.randperm(self.n)
idx = perm[:m]
grid_sample = self.grid.reshape(self.n, -1)[idx]
theta_sample = theta.reshape(self.n, -1)[idx]
Y_sample = Y.reshape(self.n, )[idx]
grid_split = torch.cat([grid_sub, grid_sample], dim=0)
theta_split = torch.cat([theta_sub, theta_sample], dim=0)
Y_split = torch.cat([Y_sub, Y_sample], dim=0).reshape(-1,)
index_split = torch.cat([index_sub, idx], dim=0).reshape(-1,)
X = torch.cat([grid_split, theta_split], dim=1)
else:
grid_split = grid_sub
theta_split = theta_sub
Y_split = Y_sub.reshape(-1,)
index_split = index_sub.reshape(-1,)
X = torch.cat([grid_split, theta_split], dim=1)
edge_index, n_edges, distance, X_difference, Y_difference = self.torus_connectivity(grid_split)
edge_attr = np.zeros((n_edges, 3+self.edge_features*2))
a = theta_split[:, :self.edge_features]
edge_attr[:, 0] = X_difference.reshape(n_edges, )
edge_attr[:, 1] = Y_difference.reshape(n_edges, )
edge_attr[:, 2] = distance.reshape(n_edges, )
edge_attr[:, 3:3+self.edge_features] = a[edge_index[0]]
edge_attr[:, 3+self.edge_features: 4+self.edge_features*2] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
split_idx = torch.tensor([x, y], dtype=torch.long).reshape(1, 2)
data = Data(x=X, y=Y_split, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx, sample_idx=index_split)
print('train', X.shape, Y_split.shape, edge_index.shape, edge_attr.shape, index_split.shape)
return data
def sampleT(self, theta, Y, params=None):
theta_d = theta.shape[1]
theta = theta.reshape(self.resolution, self.resolution, theta_d)
Y = Y.reshape(self.T, self.resolution, self.resolution)
x = torch.randint(0, self.r, (1,))
y = torch.randint(0, self.r, (1,))
grid_sub = self.grid[x::self.r, y::self.r, :].reshape(-1, 2)
theta_sub = theta[x::self.r, y::self.r, :].reshape(-1, theta_d)
Y_sub = Y[:,x::self.r, y::self.r].reshape(self.T,-1)
index_sub = self.index[x::self.r, y::self.r].reshape(-1, )
n_sub = Y_sub.shape[1]
if self.m >= n_sub:
m = self.m - n_sub
perm = torch.randperm(self.n)
idx = perm[:m]
grid_sample = self.grid.reshape(self.n, -1)[idx]
theta_sample = theta.reshape(self.n, -1)[idx]
Y_sample = Y.reshape(self.T, self.n)[:,idx]
grid_split = torch.cat([grid_sub, grid_sample], dim=0)
theta_split = torch.cat([theta_sub, theta_sample], dim=0)
Y_split = torch.cat([Y_sub, Y_sample], dim=1).reshape(self.T,-1)
index_split = torch.cat([index_sub, idx], dim=0).reshape(-1, )
X = torch.cat([grid_split, theta_split], dim=1)
else:
grid_split = grid_sub
theta_split = theta_sub
Y_split = Y_sub.reshape(self.T, -1)
index_split = index_sub.reshape(-1, )
X = torch.cat([grid_split, theta_split], dim=1)
edge_index, n_edges, distance, X_difference, Y_difference = self.torus_connectivity(grid_split)
edge_attr = np.zeros((n_edges, 3 + self.edge_features * 2))
a = theta_split[:, :self.edge_features]
edge_attr[:, 0] = X_difference.reshape(n_edges, )
edge_attr[:, 1] = Y_difference.reshape(n_edges, )
edge_attr[:, 2] = distance.reshape(n_edges, )
edge_attr[:, 3:3 + self.edge_features] = a[edge_index[0]]
edge_attr[:, 3 + self.edge_features: 4 + self.edge_features * 2] = a[edge_index[1]]
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
split_idx = torch.tensor([x, y], dtype=torch.long).reshape(1, 2)
if params==None:
data = Data(x=X, y=Y_split, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx,
sample_idx=index_split)
else:
data = Data(x=X, y=Y_split, edge_index=edge_index, edge_attr=edge_attr, split_idx=split_idx,
sample_idx=index_split, params=params)
print('train', X.shape, Y_split.shape, edge_index.shape, edge_attr.shape, index_split.shape)
return data
def assemble(self, pred, split_idx, batch_size2, sigma=1):
assert len(pred) == len(split_idx)
assert len(pred) == self.r**2 // batch_size2
out = torch.zeros((self.resolution,self.resolution))
for i in range(len(pred)):
pred_i = pred[i].reshape(batch_size2, self.m)
split_idx_i = split_idx[i]
for j in range(batch_size2):
pred_ij = pred_i[j,:]
x, y = split_idx_i[j]
if self.resolution%2==1:
if x==0:
nx = self.s
else:
nx = self.s-1
if y==0:
ny = self.s
else:
ny = self.s-1
else:
nx = self.s
ny = self.s
# pred_ij = pred_i[idx : idx + nx * ny]
out[x::self.r, y::self.r] = pred_ij[:nx * ny].reshape(nx,ny)
out = gaussian_filter(out, sigma=sigma, mode='wrap')
out = torch.tensor(out, dtype=torch.float)
return out.reshape(-1,)
def assembleT(self, pred, split_idx, batch_size2, sigma=1):
# pred is a list (batches) of list (time seq)
assert len(pred) == len(split_idx)
assert len(pred[0]) == self.T
assert len(pred) == self.r**2 // batch_size2
out = torch.zeros((self.T, self.resolution,self.resolution))
for t in range(self.T):
for i in range(len(pred)):
pred_i = pred[i][t].reshape(batch_size2, self.m)
split_idx_i = split_idx[i]
for j in range(batch_size2):
pred_ij = pred_i[j,:]
x, y = split_idx_i[j]
if self.resolution%2==1:
if x==0:
nx = self.s
else:
nx = self.s-1
if y==0:
ny = self.s
else:
ny = self.s-1
else:
nx = self.s
ny = self.s
# pred_ij = pred_i[idx : idx + nx * ny]
out[t, x::self.r, y::self.r] = pred_ij[:nx * ny].reshape(nx,ny)
out = gaussian_filter(out, sigma=sigma, mode='wrap')
out = torch.tensor(out, dtype=torch.float)
return out.reshape(self.T,self.n)
def downsample(data, grid_size, l):
data = data.reshape(-1, grid_size, grid_size)
data = data[:, ::l, ::l]
data = data.reshape(-1, (grid_size // l) ** 2)
return data
def simple_grid(n_x, n_y):
xs = np.linspace(0.0, 1.0, n_x)
ys = np.linspace(0.0, 1.0, n_y)
# xs = np.array(range(n_x))
# ys = np.array(range(n_y))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(xs, ys)]).T
edge_index = []
edge_attr = []
for y in range(n_y):
for x in range(n_x):
i = y * n_x + x
if (x != n_x - 1):
edge_index.append((i, i + 1))
edge_attr.append((1, 0, 0))
edge_index.append((i + 1, i))
edge_attr.append((-1, 0, 0))
if (y != n_y - 1):
edge_index.append((i, i + n_x))
edge_attr.append((0, 1, 0))
edge_index.append((i + n_x, i))
edge_attr.append((0, -1, 0))
X = torch.tensor(grid, dtype=torch.float)
# Exact = torch.tensor(Exact, dtype=torch.float).view(-1)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
return X, edge_index, edge_attr
def grid_edge(n_x, n_y, a=None):
if a != None:
a = a.reshape(n_x, n_y)
xs = np.linspace(0.0, 1.0, n_x)
ys = np.linspace(0.0, 1.0, n_y)
# xs = np.array(range(n_x))
# ys = np.array(range(n_y))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(xs, ys)]).T
edge_index = []
edge_attr = []
for y in range(n_y):
for x in range(n_x):
i = y * n_x + x
if (x != n_x - 1):
d = 1 / n_x
edge_index.append((i, i + 1))
edge_index.append((i + 1, i ))
if a != None:
a1 = a[x, y]
a2 = a[x + 1, y]
edge_attr.append((x / n_x, y / n_y, a1, a2))
edge_attr.append((y/n_y, x/n_x, a2, a1))
if (y != n_y - 1):
d = 1 / n_y
edge_index.append((i, i + n_x))
edge_index.append((i + n_x, i))
if a != None:
a1 = a[x, y]
a2 = a[x, y+1]
edge_attr.append((x/n_x, y/n_y, a1, a2))
edge_attr.append((y/n_y, x/n_x, a2, a1))
X = torch.tensor(grid, dtype=torch.float)
# Exact = torch.tensor(Exact, dtype=torch.float).view(-1)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
return X, edge_index, edge_attr
def grid_edge1d(n_x, a=None):
if a != None:
a = a.reshape(n_x)
xs = np.linspace(0.0, 1.0, n_x)
# xs = np.array(range(n_x))
# ys = np.array(range(n_y))
edge_index = []
edge_attr = []
for x in range(n_x):
i = x
i1 = (x+1)%n_x
edge_index.append((i, i1))
edge_index.append((i1, i ))
i2 = (x + 2) % n_x
edge_index.append((i, i2))
edge_index.append((i2, i ))
if a != None:
a1 = a[x]
a2 = a[x + 1]
edge_attr.append((x / n_x, a1, a2))
edge_attr.append((x / n_x, a2, a1))
X = torch.tensor(xs, dtype=torch.float)
# Exact = torch.tensor(Exact, dtype=torch.float).view(-1)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
return X, edge_index, edge_attr
def grid_edge_aug(n_x, n_y, a):
a = a.reshape(n_x, n_y)
xs = np.linspace(0.0, 1.0, n_x)
ys = np.linspace(0.0, 1.0, n_y)
# xs = np.array(range(n_x))
# ys = np.array(range(n_y))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(xs, ys)]).T
edge_index = []
edge_attr = []
for y in range(n_y):
for x in range(n_x):
i = y * n_x + x
if (x != n_x - 1):
d = 1 / n_x
a1 = a[x, y]
a2 = a[x + 1, y]
edge_index.append((i, i + 1))
edge_attr.append((d, a1, a2, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
edge_index.append((i + 1, i))
edge_attr.append((d, a2, a1, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
if (y != n_y - 1):
d = 1 / n_y
a1 = a[x, y]
a2 = a[x, y+1]
edge_index.append((i, i + n_x))
edge_attr.append((d, a1, a2, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
edge_index.append((i + n_x, i))
edge_attr.append((d, a2, a1, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
X = torch.tensor(grid, dtype=torch.float)
# Exact = torch.tensor(Exact, dtype=torch.float).view(-1)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
return X, edge_index, edge_attr
def grid_edge_aug_full(n_x, n_y, r, a):
n = n_x * n_y
xs = np.linspace(0.0, 1.0, n_x)
ys = np.linspace(0.0, 1.0, n_y)
grid = np.vstack([xx.ravel() for xx in np.meshgrid(xs, ys)]).T
edge_index = []
edge_attr = []
for i1 in range(n):
x1 = grid[i1]
for i2 in range(n):
x2 = grid[i2]
d = np.linalg.norm(x1-x2)
if(d<=r):
a1 = a[i1]
a2 = a[i2]
edge_index.append((i1, i2))
edge_attr.append((d, a1, a2, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
edge_index.append((i2, i1))
edge_attr.append((d, a2, a1, 1 / np.sqrt(np.abs(a1 * a2)),
np.exp(-(d) ** 2), np.exp(-(d / 0.1) ** 2), np.exp(-(d / 0.01) ** 2)))
X = torch.tensor(grid, dtype=torch.float)
# Exact = torch.tensor(Exact, dtype=torch.float).view(-1)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
return X, edge_index, edge_attr
def multi_grid(depth, n_x, n_y, grid, params):
edge_index_global = []
edge_attr_global = []
X_global = []
num_nodes = 0
# build connected graph
for l in range(depth):
h_x_l = n_x // (2 ** l)
h_y_l = n_y // (2 ** l)
n_l = h_x_l * h_y_l
a = downsample(params, n_x, (2 ** l))
if grid == 'grid':
X, edge_index_inner, edge_attr_inner = grid(h_y_l, h_x_l)
elif grid == 'grid_edge':
X, edge_index_inner, edge_attr_inner = grid_edge(h_y_l, h_x_l, a)
elif grid == 'grid_edge_aug':
X, edge_index_inner, edge_attr_inner = grid_edge(h_y_l, h_x_l, a)
# update index
edge_index_inner = edge_index_inner + num_nodes
edge_index_global.append(edge_index_inner)
edge_attr_global.append(edge_attr_inner)
# construct X
# if (is_high):
# X = torch.cat([torch.zeros(n_l, l * 2), X, torch.zeros(n_l, (depth - 1 - l) * 2)], dim=1)
# else:
# X_l = torch.tensor(l, dtype=torch.float).repeat(n_l, 1)
# X = torch.cat([X, X_l], dim=1)
X_global.append(X)
# construct edges
index1 = torch.tensor(range(n_l), dtype=torch.long)
index1 = index1 + num_nodes
num_nodes += n_l
# #construct inter-graph edge
if l != depth-1:
index2 = np.array(range(n_l//4)).reshape(h_x_l//2, h_y_l//2) # torch.repeat is different from numpy
index2 = index2.repeat(2, axis = 0).repeat(2, axis = 1)
index2 = torch.tensor(index2).reshape(-1)
index2 = index2 + num_nodes
index2 = torch.tensor(index2, dtype=torch.long)
edge_index_inter1 = torch.cat([index1,index2], dim=-1).reshape(2,-1)
edge_index_inter2 = torch.cat([index2,index1], dim=-1).reshape(2,-1)
edge_index_inter = torch.cat([edge_index_inter1, edge_index_inter2], dim=1)
edge_attr_inter1 = torch.tensor((0, 0, 1), dtype=torch.float).repeat(n_l, 1)
edge_attr_inter2 = torch.tensor((0, 0,-1), dtype=torch.float).repeat(n_l, 1)
edge_attr_inter = torch.cat([edge_attr_inter1, edge_attr_inter2], dim=0)
edge_index_global.append(edge_index_inter)
edge_attr_global.append(edge_attr_inter)
X = torch.cat(X_global, dim=0)
edge_index = torch.cat(edge_index_global, dim=1)
edge_attr = torch.cat(edge_attr_global, dim=0)
mask_index = torch.tensor(range(n_x * n_y), dtype=torch.long)
# print('create multi_grid with size:', X.shape, edge_index.shape, edge_attr.shape, mask_index.shape)
return (X, edge_index, edge_attr, mask_index, num_nodes)
def multi_pole_grid1d(theta, theta_d, s, N, is_periodic=False):
grid_list = []
theta_list = []
edge_index_list = []
edge_index_list_cuda = []
level = int(np.log2(s) - 1)
print(level)
for l in range(1, level+1):
r_l = 2 ** (l - 1)
s_l = s // r_l
n_l = s_l
print('level',s_l,r_l,n_l)
xs = np.linspace(0.0, 1.0, s_l)
grid_l = xs
grid_l = torch.tensor(grid_l, dtype=torch.float)
print(grid_l.shape)
grid_list.append(grid_l)
theta_l = theta[:,:,:theta_d].reshape(N, s, theta_d)
theta_l = theta_l[:, ::r_l, :]
theta_l = theta_l.reshape(N, n_l, theta_d)
theta_l = torch.tensor(theta_l, dtype=torch.float)
print(theta_l.shape)
theta_list.append(theta_l)
# for the finest level, we construct the nearest neighbors (NN)
if l==1:
edge_index_nn = []
for x_i in range(s_l):
for x in (-1,1):
x_j = x_i + x
if is_periodic:
x_j = x_j % s_l
# if (xj, yj) is a valid node
if (x_j in range(s_l)):
edge_index_nn.append([x_i,x_j])
edge_index_nn = torch.tensor(edge_index_nn, dtype=torch.long)
edge_index_nn = edge_index_nn.transpose(0,1)
edge_index_list.append(edge_index_nn)
edge_index_list_cuda.append(edge_index_nn.cuda())
print('edge', edge_index_nn.shape)
# we then compute the interactive neighbors -- their parents are NN but they are not NearestNeighbor
edge_index_inter = []
for x_i in range(s_l):
for x in range(-3,4):
x_j = x_i + x
# if (xj, yj) is a valid node
if is_periodic:
x_j = x_j % s_l
if (x_j in range(s_l)):
# if (xi, yi), (xj, yj) not NearestNeighbor
if abs(x)>=2:
# if their parents are NN
if abs(x_i//2 - x_j//2)%(s_l//2) <=1:
edge_index_inter.append([x_i,x_j])
edge_index_inter = torch.tensor(edge_index_inter, dtype=torch.long)
edge_index_inter = edge_index_inter.transpose(0,1)
edge_index_list.append(edge_index_inter)
edge_index_list_cuda.append(edge_index_inter.cuda())
print('edge_inter', edge_index_inter.shape)
print(len(grid_list),len(edge_index_list),len(theta_list))
return grid_list, theta_list, edge_index_list, edge_index_list_cuda
def get_edge_attr(grid, theta, edge_index):
n_edges = edge_index.shape[1]
edge_attr = np.zeros((n_edges, 4))
edge_attr[:, 0:2] = grid[edge_index.transpose(0,1)].reshape((n_edges, -1))
edge_attr[:, 2] = theta[edge_index[0]]
edge_attr[:, 3] = theta[edge_index[1]]
return torch.tensor(edge_attr, dtype=torch.float)
|
backend/src/baserow/config/asgi.py | ashishdhngr/baserow | 839 | 12793518 | import django
from channels.routing import ProtocolTypeRouter
from baserow.ws.routers import websocket_router
from django.core.asgi import get_asgi_application
django.setup()
django_asgi_app = get_asgi_application()
application = ProtocolTypeRouter(
{"http": django_asgi_app, "websocket": websocket_router}
)
|
threedod/benchmark_scripts/utils/box_utils.py | Levintsky/ARKitScenes | 237 | 12793620 | # TODO: Explain 8 corners logic at the top and use it consistently
# Add comments of explanation
import numpy as np
import scipy.spatial
from .rotation import rotate_points_along_z
def get_size(box):
"""
Args:
box: 8x3
Returns:
size: [dx, dy, dz]
"""
distance = scipy.spatial.distance.cdist(box[0:1, :], box[1:5, :])
l = distance[0, 2]
w = distance[0, 0]
h = distance[0, 3]
return [l, w, h]
def get_heading_angle(box):
"""
Args:
box: (8, 3)
Returns:
heading_angle: float
"""
a = box[0, 0] - box[1, 0]
b = box[0, 1] - box[1, 1]
heading_angle = np.arctan2(a, b)
return heading_angle
def compute_box_3d(size, center, rotmat):
"""Compute corners of a single box from rotation matrix
Args:
size: list of float [dx, dy, dz]
center: np.array [x, y, z]
rotmat: np.array (3, 3)
Returns:
corners: (8, 3)
"""
l, h, w = [i / 2 for i in size]
center = np.reshape(center, (-1, 3))
center = center.reshape(3)
x_corners = [l, l, -l, -l, l, l, -l, -l]
y_corners = [h, -h, -h, h, h, -h, -h, h]
z_corners = [w, w, w, w, -w, -w, -w, -w]
corners_3d = np.dot(
np.transpose(rotmat), np.vstack([x_corners, y_corners, z_corners])
)
corners_3d[0, :] += center[0]
corners_3d[1, :] += center[1]
corners_3d[2, :] += center[2]
return np.transpose(corners_3d)
def corners_to_boxes(corners3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
corners: (N, 8, 3), vertex order shown in figure above
Returns:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading]
with (x, y, z) is the box center
(dx, dy, dz) as the box size
and heading as the clockwise rotation angle
"""
boxes3d = np.zeros((corners3d.shape[0], 7))
for i in range(corners3d.shape[0]):
boxes3d[i, :3] = np.mean(corners3d[i, :, :], axis=0)
boxes3d[i, 3:6] = get_size(corners3d[i, :, :])
boxes3d[i, 6] = get_heading_angle(corners3d[i, :, :])
return boxes3d
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading],
(x, y, z) is the box center
Returns:
corners: (N, 8, 3)
"""
template = np.array([[1, 1, -1],
[1, -1, -1],
[-1, -1, -1],
[-1, 1, -1],
[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, 1, 1]]
) / 2.
# corners3d: of shape (N, 3, 8)
corners3d = np.tile(boxes3d[:, None, 3:6], (1, 8, 1)) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.reshape(-1, 8, 3), boxes3d[:, 6]).reshape(
-1, 8, 3
)
corners3d += boxes3d[:, None, 0:3]
return corners3d
def points_in_boxes(points, boxes):
"""
Args:
pc: np.array (n, 3+d)
boxes: np.array (m, 8, 3)
Returns:
mask: np.array (n, m) of type bool
"""
if len(boxes) == 0:
return np.zeros([points.shape[0], 1], dtype=np.bool)
points = points[:, :3] # get xyz
# u = p6 - p5
u = boxes[:, 6, :] - boxes[:, 5, :] # (m, 3)
# v = p6 - p7
v = boxes[:, 6, :] - boxes[:, 7, :] # (m, 3)
# w = p6 - p2
w = boxes[:, 6, :] - boxes[:, 2, :] # (m, 3)
# ux, vx, wx
ux = np.matmul(points, u.T) # (n, m)
vx = np.matmul(points, v.T)
wx = np.matmul(points, w.T)
# up6, up5, vp6, vp7, wp6, wp2
up6 = np.sum(u * boxes[:, 6, :], axis=1)
up5 = np.sum(u * boxes[:, 5, :], axis=1)
vp6 = np.sum(v * boxes[:, 6, :], axis=1)
vp7 = np.sum(v * boxes[:, 7, :], axis=1)
wp6 = np.sum(w * boxes[:, 6, :], axis=1)
wp2 = np.sum(w * boxes[:, 2, :], axis=1)
mask_u = np.logical_and(ux <= up6, ux >= up5) # (1024, n)
mask_v = np.logical_and(vx <= vp6, vx >= vp7)
mask_w = np.logical_and(wx <= wp6, wx >= wp2)
mask = mask_u & mask_v & mask_w # (10240, n)
return mask
def poly_area(x,y):
""" Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])
def computeIntersection():
dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]
dp = [s[0] - e[0], s[1] - e[1]]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return (outputList)
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1,p2)
if inter_p is not None:
hull_inter = scipy.spatial.ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def box3d_vol(corners):
''' corners: (8,3) no assumption on axis direction '''
a = np.sqrt(np.sum((corners[0,:] - corners[1,:])**2))
b = np.sqrt(np.sum((corners[1,:] - corners[2,:])**2))
c = np.sqrt(np.sum((corners[0,:] - corners[4,:])**2))
return a*b*c
def box3d_iou(corners1, corners2):
''' Compute 3D bounding box IoU.
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
'''
# corner points are in counter clockwise order
rect1 = [(corners1[i,0], corners1[i,1]) for i in range(3,-1,-1)]
rect2 = [(corners2[i,0], corners2[i,1]) for i in range(3,-1,-1)]
area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1])
area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1])
inter, inter_area = convex_hull_intersection(rect1, rect2)
iou_2d = inter_area/(area1+area2-inter_area)
ymax = min(corners1[:,2].max(), corners2[:,2].max())
ymin = max(corners1[:,2].min(), corners2[:,2].min())
inter_vol = inter_area * max(0.0, ymax-ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou |
src/fal/cli/fal_runner.py | emekdahl/fal | 360 | 12793626 | <gh_stars>100-1000
import argparse
from pathlib import Path
from typing import Any, Dict, List
import os
from dbt.config.profile import DEFAULT_PROFILES_DIR
from fal.run_scripts import raise_for_run_results_failures, run_scripts
from fal.fal_script import FalScript
from faldbt.project import DbtModel, FalDbt, FalGeneralException
def create_fal_dbt(args: argparse.Namespace):
real_project_dir = os.path.realpath(os.path.normpath(args.project_dir))
real_profiles_dir = None
env_profiles_dir = os.getenv("DBT_PROFILES_DIR")
if args.profiles_dir is not None:
real_profiles_dir = os.path.realpath(os.path.normpath(args.profiles_dir))
elif env_profiles_dir:
real_profiles_dir = os.path.realpath(os.path.normpath(env_profiles_dir))
else:
real_profiles_dir = DEFAULT_PROFILES_DIR
if hasattr(args, "state") and args.state is not None:
real_state = Path(os.path.realpath(os.path.normpath(args.state)))
else:
real_state = None
return FalDbt(
real_project_dir,
real_profiles_dir,
args.select,
args.exclude,
args.selector,
args.keyword,
args.threads,
real_state,
args.target,
)
def fal_run(args: argparse.Namespace):
"Runs the fal run command in a subprocess"
selector_flags = args.select or args.exclude or args.selector
if args.all and selector_flags:
raise FalGeneralException(
"Cannot pass --all flag alongside selection flags (--select/--models, --exclude, --selector)"
)
faldbt = create_fal_dbt(args)
models = _get_filtered_models(faldbt, args.all, selector_flags, args.before)
scripts = _select_scripts(args, models, faldbt)
if args.before:
if not _scripts_flag(args):
# run globals when no --script is passed
_run_global_scripts(faldbt, args.before)
results = run_scripts(scripts, faldbt)
raise_for_run_results_failures(scripts, results)
else:
results = run_scripts(scripts, faldbt)
raise_for_run_results_failures(scripts, results)
if not _scripts_flag(args):
# run globals when no --script is passed
_run_global_scripts(faldbt, args.before)
def _scripts_flag(args: argparse.Namespace) -> bool:
return bool(args.scripts)
def _select_scripts(
args: argparse.Namespace, models: List[DbtModel], faldbt: FalDbt
) -> List[FalScript]:
scripts = []
scripts_flag = _scripts_flag(args)
for model in models:
model_scripts = model.get_scripts(args.keyword, bool(args.before))
for path in model_scripts:
if not scripts_flag:
# run all scripts when no --script is passed
scripts.append(FalScript(faldbt, model, path))
elif path in args.scripts:
# if --script selector is there only run selected scripts
scripts.append(FalScript(faldbt, model, path))
return scripts
def _run_global_scripts(faldbt: FalDbt, is_before: bool):
global_scripts = list(
map(
lambda path: FalScript(faldbt, None, path),
faldbt._global_script_paths["before" if is_before else "after"],
)
)
results = run_scripts(global_scripts, faldbt)
raise_for_run_results_failures(global_scripts, results)
def _get_models_with_keyword(faldbt: FalDbt) -> List[DbtModel]:
return list(
filter(lambda model: faldbt.keyword in model.meta, faldbt.list_models())
)
def _get_filtered_models(faldbt: FalDbt, all, selected, before) -> List[DbtModel]:
selected_ids = _models_ids(faldbt._compile_task._flattened_nodes)
filtered_models: List[DbtModel] = []
if (
not all
and not selected
and not before
and faldbt._run_results.nativeRunResult is None
):
from faldbt.parse import FalParseError
raise FalParseError(
"Cannot define models to run without selection flags or dbt run_results artifact or --before flag"
)
models = _get_models_with_keyword(faldbt)
for node in models:
if selected:
if node.unique_id in selected_ids:
filtered_models.append(node)
elif before:
if node.get_scripts(faldbt.keyword, before) != []:
filtered_models.append(node)
elif all:
filtered_models.append(node)
elif node.status != "skipped":
filtered_models.append(node)
return filtered_models
def _models_ids(models):
return list(map(lambda r: r.unique_id, models))
|
lightbus/utilities/io.py | gcollard/lightbus | 178 | 12793641 | import logging
logger = logging.getLogger(__name__)
def make_file_safe_api_name(api_name):
"""Make an api name safe for use in a file name"""
return "".join([c for c in api_name if c.isalpha() or c.isdigit() or c in (".", "_", "-")])
|
python/mxnet/gluon/probability/transformation/domain_map.py | pioy/incubator-mxnet | 211 | 12793693 | <filename>python/mxnet/gluon/probability/transformation/domain_map.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Classes for registering and storing bijection/transformations from
unconstrained space to a given domain.
"""
from numbers import Number
from .transformation import (
ExpTransform, AffineTransform, SigmoidTransform, ComposeTransform)
from ..distributions.constraint import (Constraint, Positive, GreaterThan, GreaterThanEq,
LessThan, Interval, HalfOpenInterval)
__all__ = ['domain_map', 'biject_to', 'transform_to']
class domain_map():
"""
Abstract Class for registering and storing mappings from domain
to bijections/transformations
"""
def __init__(self):
# constraint -> constraint -> transformation
self._storage = {}
super(domain_map, self).__init__()
def register(self, constraint, factory=None):
"""Register a bijection/transformation from unconstrained space to the domain
specified by `constraint`.
Parameters
----------
constraint : Type or Object
A class of constraint or an object of constraint
factory : callable
A function that outputs a `transformation` given a `constraint`,
by default None.
"""
# Decorator mode
if factory is None:
return lambda factory: self.register(constraint, factory)
if isinstance(constraint, Constraint):
constraint = type(constraint)
if not isinstance(constraint, type) or not issubclass(constraint, Constraint):
raise TypeError('Expected constraint to be either a Constraint subclass or instance, '
'but got {}'.format(constraint))
self._storage[constraint] = factory
return factory
def __call__(self, constraint):
try:
factory = self._storage[type(constraint)]
except KeyError:
raise NotImplementedError(
'Cannot transform {} constraints'.format(type(constraint).__name__))
return factory(constraint)
biject_to = domain_map()
transform_to = domain_map()
@biject_to.register(Positive)
@transform_to.register(Positive)
def _transform_to_positive(constraint):
# Although `constraint` is not used in this factory function,
# we decide to keep it for the purpose of consistency.
# pylint: disable=unused-argument
return ExpTransform()
@biject_to.register(GreaterThan)
@biject_to.register(GreaterThanEq)
@transform_to.register(GreaterThan)
@transform_to.register(GreaterThanEq)
def _transform_to_greater_than(constraint):
return ComposeTransform([ExpTransform(),
AffineTransform(constraint._lower_bound, 1)])
@biject_to.register(LessThan)
@transform_to.register(LessThan)
def _transform_to_less_than(constraint):
return ComposeTransform([ExpTransform(),
AffineTransform(constraint._upper_bound, -1)])
@biject_to.register(Interval)
@biject_to.register(HalfOpenInterval)
@transform_to.register(Interval)
@transform_to.register(HalfOpenInterval)
def _transform_to_interval(constraint):
# Handle the special case of the unit interval.
lower_is_0 = isinstance(constraint._lower_bound,
Number) and constraint._lower_bound == 0
upper_is_1 = isinstance(constraint._upper_bound,
Number) and constraint._upper_bound == 1
if lower_is_0 and upper_is_1:
return SigmoidTransform()
loc = constraint._lower_bound
scale = constraint._upper_bound - constraint._lower_bound
return ComposeTransform([SigmoidTransform(),
AffineTransform(loc, scale)])
|
pytools/lib/readqc_report.py | virtualparadox/bbmap | 134 | 12793700 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Readqc report: record stat key-value in readqc-stats.txt
### JGI_Analysis_Utility_Illumina::illumina_read_level_report
Created: Jul 24 2013
sulsj (<EMAIL>)
"""
import os
import sys
## custom libs in "../lib/"
srcDir = os.path.dirname(__file__)
sys.path.append(os.path.join(srcDir, 'tools')) ## ./tools
sys.path.append(os.path.join(srcDir, '../lib')) ## rqc-pipeline/lib
sys.path.append(os.path.join(srcDir, '../tools')) ## rqc-pipeline/tools
from readqc_constants import RQCReadQcConfig, ReadqcStats
from rqc_constants import RQCExitCodes
from os_utility import run_sh_command
from common import append_rqc_stats, append_rqc_file
statsFile = RQCReadQcConfig.CFG["stats_file"]
filesFile = RQCReadQcConfig.CFG["files_file"]
"""
Title : read_megablast_hits
Function : This function generates tophit list of megablast against different databases.
Usage : read_megablast_hits(db_name, log)
Args : blast db name or full path
Returns : SUCCESS
FAILURE
Comments :
"""
def read_megablast_hits(db, log):
currentDir = RQCReadQcConfig.CFG["output_path"]
megablastDir = "megablast"
megablastPath = os.path.join(currentDir, megablastDir)
statsFile = RQCReadQcConfig.CFG["stats_file"]
filesFile = RQCReadQcConfig.CFG["files_file"]
##
## Process blast output files
##
matchings = 0
hitCount = 0
parsedFile = os.path.join(megablastPath, "megablast.*.%s*.parsed" % (db))
matchings, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (parsedFile), True, log)
if exitCode == 0: ## if parsed file found.
t = matchings.split()
if len(t) == 1 and t[0].isdigit():
hitCount = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_MATCHING_HITS + " " + db, hitCount, log)
##
## add .parsed file
##
parsedFileFound, _, exitCode = run_sh_command("ls %s" % (parsedFile), True, log)
if parsedFileFound:
parsedFileFound = parsedFileFound.strip()
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_PARSED_FILE + " " + db, os.path.join(megablastPath, parsedFileFound), log)
else:
log.error("- Failed to add megablast parsed file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
##
## wc the top hits
##
topHit = 0
tophitFile = os.path.join(megablastPath, "megablast.*.%s*.parsed.tophit" % (db))
tophits, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (tophitFile), True, log)
t = tophits.split()
if len(t) == 1 and t[0].isdigit():
topHit = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_TOP_HITS + " " + db, topHit, log)
##
## wc the taxonomic species
##
spe = 0
taxlistFile = os.path.join(megablastPath, "megablast.*.%s*.parsed.taxlist" % (db))
species, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (taxlistFile), True, log)
t = species.split()
if len(t) == 1 and t[0].isdigit():
spe = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_TAX_SPECIES + " " + db, spe, log)
##
## wc the top 100 hit
##
top100hits = 0
top100hitFile = os.path.join(megablastPath, "megablast.*.%s*.parsed.top100hit" % (db))
species, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (top100hitFile), True, log)
t = species.split()
if len(t) == 1 and t[0].isdigit():
top100hits = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_TOP_100HITS + " " + db, top100hits, log)
##
## Find and add taxlist file
##
taxListFound, _, exitCode = run_sh_command("ls %s" % (taxlistFile), True, log)
taxListFound = taxListFound.strip()
if taxListFound:
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_TAXLIST_FILE + " " + db, os.path.join(megablastPath, taxListFound), log)
else:
log.error("- Failed to add megablast taxlist file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
##
## Find and add tophit file
##
tophitFound, _, exitCode = run_sh_command("ls %s" % (tophitFile), True, log)
tophitFound = tophitFound.strip()
if tophitFound:
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_TOPHIT_FILE + " " + db, os.path.join(megablastPath, tophitFound), log)
else:
log.error("- Failed to add megablast tophit file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
##
## Find and add top100hit file
##
top100hitFound, _, exitCode = run_sh_command("ls %s" % (top100hitFile), True, log)
top100hitFound = top100hitFound.strip()
if top100hitFound:
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_TOP100HIT_FILE + " " + db, os.path.join(megablastPath, top100hitFound), log)
else:
log.error("- Failed to add megablast top100hit file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
else:
log.info("- No blast hits for %s." % (db))
return RQCExitCodes.JGI_SUCCESS
"""
Title : read_level_qual_stats
Function : Generate qual scores and plots of read level 20mer sampling
Usage : read_level_mer_sampling($analysis, $summary_file_dir)
Args : 1) A reference to an JGI_Analysis object
2) current working folder wkdir/uniqueness
Returns : JGI_SUCCESS: Illumina read level report could be successfully generated.
JGI_FAILURE: Illumina read level report could not be generated.
Comments : This function is intended to be called at the very end of the illumina read level data processing script.
"""
def read_level_mer_sampling(dataToRecordDict, dataFile, log):
retCode = RQCExitCodes.JGI_FAILURE
## Old data
#nSeq nStartUniMer fracStartUniMer nRandUniMer fracRandUniMer
## 0 1 2 3 4
##25000 2500 0.1 9704 0.3882
## New data
#count first rand first_cnt rand_cnt
# 0 1 2 3 4
#25000 66.400 76.088 16600 19022
#50000 52.148 59.480 13037 14870
#75000 46.592 53.444 11648 13361
#100000 43.072 49.184 10768 12296 ...
if os.path.isfile(dataFile):
with open(dataFile, "r") as merFH:
lines = merFH.readlines()
## last line
t = lines[-1].split('\t')
# breaks 2016-09-07
#assert len(t) == 5
totalMers = int(t[0])
## new by bbcountunique
uniqStartMerPer = float("%.2f" % (float(t[1])))
uniqRandtMerPer = float("%.2f" % (float(t[2])))
dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_SAMPLE_SIZE] = totalMers
dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_PERCENTAGE_STARTING_MERS] = uniqStartMerPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_PERCENTAGE_RANDOM_MERS] = uniqRandtMerPer
retCode = RQCExitCodes.JGI_SUCCESS
else:
log.error("- qhist file not found: %s" % (dataFile))
return retCode
"""
Title : base_level_qual_stats
Function : Generate qual scores and plots of read level QC
Usage : base_level_qual_stats($analysis, $)
Args : 1) A reference to an JGI_Analysis object
2) current working folder wkdir/qual
Returns : JGI_SUCCESS: Illumina read level report could be successfully generated.
JGI_FAILURE: Illumina read level report could not be generated.
Comments : This function is intended to be called at the very end of the illumina base level data processing script.
"""
def base_level_qual_stats(dataToRecordDict, reformatObqhistFile, log):
cummlatPer = 0
cummlatBase = 0
statsPerc = {30:0, 25:0, 20:0, 15:0, 10:0, 5:0}
statsBase = {30:0, 25:0, 20:0, 15:0, 10:0, 5:0}
Q30_seen = 0
Q25_seen = 0
Q20_seen = 0
Q15_seen = 0
Q10_seen = 0
Q5_seen = 0
## New format
##Median 38
##Mean 37.061
##STDev 4.631
##Mean_30 37.823
##STDev_30 1.699
##Quality bases fraction
#0 159 0.00008
#1 0 0.00000
#2 12175 0.00593
#3 0 0.00000
#4 0 0.00000
#5 0 0.00000
#6 0 0.00000
allLines = open(reformatObqhistFile).readlines()
for l in allLines[::-1]:
l = l.strip()
##
## obqhist file format example
##
# #Median 36
# #Mean 33.298
# #STDev 5.890
# #Mean_30 35.303
# #STDev_30 1.517
# #Quality bases fraction
# 0 77098 0.00043
# 1 0 0.00000
# 2 0 0.00000
# 3 0 0.00000
# 4 0 0.00000
# 5 0 0.00000
# 6 0 0.00000
if len(l) > 0:
if l.startswith("#"):
if l.startswith("#Mean_30"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30_SCORE_MEAN] = l.split('\t')[1]
elif l.startswith("#STDev_30"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30_SCORE_STD] = l.split('\t')[1]
elif l.startswith("#Mean"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_OVERALL_BASES_Q_SCORE_MEAN] = l.split('\t')[1]
elif l.startswith("#STDev"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_OVERALL_BASES_Q_SCORE_STD] = l.split('\t')[1]
continue
qavg = None
nbase = None
percent = None
t = l.split()
try:
qavg = int(t[0])
nbase = int(t[1])
percent = float(t[2])
except IndexError:
log.warn("parse error in base_level_qual_stats: %s %s %s %s" % (l, qavg, nbase, percent))
continue
log.debug("base_level_qual_stats(): qavg and nbase and percent: %s %s %s" % (qavg, nbase, percent))
cummlatPer += percent * 100.0
cummlatPer = float("%.f" % (cummlatPer))
if cummlatPer > 100:
cummlatPer = 100.0 ## RQC-621
cummlatBase += nbase
if qavg == 30:
Q30_seen = 1
statsPerc[30] = cummlatPer
statsBase[30] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C30] = cummlatBase
elif qavg == 25:
Q25_seen = 1
statsPerc[25] = cummlatPer
statsBase[25] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q25] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C25] = cummlatBase
elif qavg == 20:
Q20_seen = 1
statsPerc[20] = cummlatPer
statsBase[20] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q20] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C20] = cummlatBase
elif qavg == 15:
Q15_seen = 1
statsPerc[15] = cummlatPer
statsBase[15] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q15] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C15] = cummlatBase
elif qavg == 10:
Q10_seen = 1
statsPerc[10] = cummlatPer
statsBase[10] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q10] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C10] = cummlatBase
elif qavg == 5:
Q5_seen = 1
statsPerc[5] = cummlatPer
statsBase[5] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q5] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C5] = cummlatBase
## Double check that no value is missing.
if Q25_seen == 0 and Q30_seen != 0:
Q25_seen = 1
statsPerc[25] = statsPerc[30]
statsBase[25] = statsBase[30]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q25] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C25] = cummlatBase
if Q20_seen == 0 and Q25_seen != 0:
Q20_seen = 1
statsPerc[20] = statsPerc[25]
statsBase[20] = statsBase[25]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q20] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C20] = cummlatBase
if Q15_seen == 0 and Q20_seen != 0:
Q15_seen = 1
statsPerc[15] = statsPerc[20]
statsBase[15] = statsBase[20]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q15] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C15] = cummlatBase
if Q10_seen == 0 and Q15_seen != 0:
Q10_seen = 1
statsPerc[10] = statsPerc[15]
statsBase[10] = statsBase[15]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q10] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C10] = cummlatBase
if Q5_seen == 0 and Q10_seen != 0:
Q5_seen = 1
statsPerc[5] = statsPerc[10]
statsBase[5] = statsBase[10]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q5] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C5] = cummlatBase
if Q30_seen == 0:
log.error("Q30 is 0. Base quality values are ZERO.")
log.debug("Q and C values: %s" % (dataToRecordDict))
return RQCExitCodes.JGI_SUCCESS
"""
Title : q20_score
Function : this method returns Q20 using a qrpt file as input
Usage : JGI_QC_Utility::qc20_score($qrpt)
Args : $_[0] : qrpt file.
Returns : a number of Q20 score
Comments :
"""
# def q20_score(qrpt, log):
# log.debug("qrpt file %s" % (qrpt))
#
# q20 = None
# num = 0
#
# if os.path.isfile(qrpt):
# with open(qrpt, "r") as qrptFH:
# for l in qrptFH:
# num += 1
#
# if num == 1:
# continue
#
# ##############
# ## Old format
# ## READ1.qrpt
# ## column count min max sum mean Q1 med Q3 IQR lW rW A_Count C_Count G_Count T_Count N_Count Max_count
# ## 1 378701 2 34 12447306 32.87 31 34 34 3 27 34 108573 83917 81999 104127 85 378701
# ## 2 378701 2 34 12515957 33.05 33 34 34 1 32 34 112178 83555 84449 98519 0 378701
# ## 3 378701 2 34 12519460 33.06 33 34 34 1 32 34 104668 72341 80992 120700 0 378701
# ## 4 378701 2 37 13807944 36.46 37 37 37 0 37 37 96935 95322 83958 102440 46 378701
# ## 5 378701 2 37 13790443 36.42 37 37 37 0 37 37 114586 68297 78020 117740 58 378701
# ##
# ## or
# ##
# ## READ2.qrpt
# ## column count min max sum mean Q1 med Q3 IQR lW rW A_Count C_Count G_Count T_Count N_Count Max_count
# ## 1 378701 2 34 8875097 23.44 25 26 28 3 21 32 106904 84046 81795 105956 0 378701
# ## 2 378701 2 34 6543224 17.28 15 16 26 11 2 34 107573 77148 97953 88998 7029 378701
# ## 3 378701 2 34 7131741 18.83 16 16 26 10 2 34 96452 83003 107891 91355 0 378701
# ## 4 378701 2 37 9686653 25.58 19 32 33 14 2 37 97835 78304 87944 114618 0 378701
# ## 5 378701 2 37 10208226 26.96 25 33 35 10 10 37 98021 90611 89040 101029 0 378701
#
# pos = None
# mean = None
# t = l.split("\t")
# assert len(t) > 6
# pos = int(t[0])
# mean = float(t[5])
#
# if mean and pos:
# if mean < 20:
# return pos - 1
# else:
# q20 = pos
#
# else:
# log.error("- qhist file not found: %s" % (qrpt))
# return None
#
#
# return q20
def q20_score_new(bqHist, readNum, log):
log.debug("q20_score_new(): bqHist file = %s" % (bqHist))
q20 = None
if os.path.isfile(bqHist):
with open(bqHist, "r") as qrptFH:
for l in qrptFH:
if l.startswith('#'):
continue
## New data
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
##BaseNum count_1 min_1 max_1 mean_1 Q1_1 med_1 Q3_1 LW_1 RW_1 count_2 min_2 max_2 mean_2 Q1_2 med_2 Q3_2 LW_2 RW_2
# 0 6900 0 36 33.48 33 34 34 29 36 6900 0 36 33.48 33 34 34 29 36
pos = None
mean = None
t = l.split("\t")
pos = int(t[0]) + 1
if readNum == 1:
mean = float(t[4])
else:
mean = float(t[13])
if mean and pos:
if mean < 20:
return pos - 1
else:
q20 = pos
else:
log.error("- bqHist file not found: %s" % (bqHist))
return None
return q20
"""
Title : read_level_qual_stats
Function : Generate qual scores and plots of read level QC
Usage : read_level_qual_stats($analysis, $)
Args : 1) A reference to an JGI_Analysis object
2) current working folder wkdir/qual
Returns : JGI_SUCCESS: Illumina read level report could be successfully generated.
JGI_FAILURE: Illumina read level report could not be generated.
Comments : This function is intended to be called at the very end of the illumina read level data processing script.
"""
def read_level_qual_stats(dataToRecordDict, qhistTxtFullPath, log):
retCode = RQCExitCodes.JGI_FAILURE
cummlatPer = 0.0
Q30_seen = 0
Q25_seen = 0
Q20_seen = 0
Q15_seen = 0
Q10_seen = 0
Q5_seen = 0
if os.path.isfile(qhistTxtFullPath):
stats = {30:0, 25:0, 20:0, 15:0, 10:0, 5:0}
allLines = open(qhistTxtFullPath).readlines()
for l in allLines[::-1]:
if not l:
break
if l.startswith('#'):
continue
t = l.split()
assert len(t) == 3
qavg = int(t[0])
percent = float(t[2]) * 100.0 ## 20140826 Changed for bbtools
cummlatPer = cummlatPer + percent
cummlatPer = float("%.2f" % cummlatPer)
if qavg <= 30 and qavg > 25 and Q30_seen == 0:
Q30_seen = 1
stats[30] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q30] = cummlatPer
elif qavg <= 25 and qavg > 20 and Q25_seen == 0:
Q25_seen = 1
stats[25] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q25] = cummlatPer
elif qavg <= 20 and qavg > 15 and Q20_seen == 0:
Q20_seen = 1
stats[20] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q20] = cummlatPer
elif qavg <= 15 and qavg > 10 and Q15_seen == 0:
Q15_seen = 1
stats[15] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q15] = cummlatPer
elif qavg <= 10 and qavg > 5 and Q10_seen == 0:
Q10_seen = 1
stats[10] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q10] = cummlatPer
elif qavg <= 5 and Q5_seen == 0:
Q5_seen = 1
stats[5] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q5] = cummlatPer
### Double check that no value is missing.
if Q25_seen == 0 and Q30_seen != 0:
Q25_seen = 1
stats[25] = stats[30]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q25] = cummlatPer
if Q20_seen == 0 and Q25_seen != 0:
Q20_seen = 1
stats[20] = stats[25]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q20] = cummlatPer
if Q15_seen == 0 and Q20_seen != 0:
Q15_seen = 1
stats[15] = stats[20]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q15] = cummlatPer
if Q10_seen == 0 and Q15_seen != 0:
Q10_seen = 1
stats[10] = stats[15]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q10] = cummlatPer
if Q5_seen == 0 and Q10_seen != 0:
Q5_seen = 1
stats[5] = stats[10]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q5] = cummlatPer
if Q30_seen == 0:
log.error("Q30 is 0 . Read quality values are ZERO.")
log.debug("Q30 %s, Q25 %s, Q20 %s, Q15 %s, Q10 %s, Q5 %s" % \
(stats[30], stats[25], stats[20], stats[15], stats[10], stats[5]))
retCode = RQCExitCodes.JGI_SUCCESS
else:
log.error("- qhist file not found: %s" % (qhistTxtFullPath))
return retCode
"""
Title : read_gc_mean
Function : This function generates average GC content % and its standard deviation and put them into database.
Usage : read_gc_mean($analysis)
Args : 1) A reference to an JGI_Analysis object
Returns : JGI_SUCCESS:
JGI_FAILURE:
Comments :
"""
def read_gc_mean(histFile, log):
mean = 0.0
stdev = 0.0
retCode = RQCExitCodes.JGI_FAILURE
if os.path.isfile(histFile):
with open(histFile, "r") as histFH:
line = histFH.readline() ## we only need the first line
# Ex) #Found 1086 total values totalling 420.3971. <0.387106 +/- 0.112691>
if len(line) == 0 or not line.startswith("#Found"):
log.error("- GC content hist text file does not contains right results: %s, %s" % (histFile, line))
retCode = RQCExitCodes.JGI_FAILURE
else:
toks = line.split()
assert len(toks) == 9
mean = float(toks[6][1:]) * 100.0
stdev = float(toks[8][:-1]) * 100.0
log.debug("mean, stdev = %.2f, %.2f" % (mean, stdev))
retCode = RQCExitCodes.JGI_SUCCESS
else:
log.error("- gc hist file not found: %s" % (histFile))
return retCode, mean, stdev
if __name__ == "__main__":
exit(0)
## EOF
|
bookwyrm/tests/templatetags/test_notification_page_tags.py | mouse-reeve/fedireads | 270 | 12793706 | <gh_stars>100-1000
""" style fixes and lookups for templates """
from unittest.mock import patch
from django.test import TestCase
from bookwyrm import models
from bookwyrm.templatetags import notification_page_tags
@patch("bookwyrm.activitystreams.add_status_task.delay")
@patch("bookwyrm.activitystreams.remove_status_task.delay")
class NotificationPageTags(TestCase):
"""lotta different things here"""
def setUp(self):
"""create some filler objects"""
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.user = models.User.objects.create_user(
"<EMAIL>",
"<EMAIL>",
"mouseword",
local=True,
localname="mouse",
)
def test_related_status(self, *_):
"""gets the subclass model for a notification status"""
with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"):
status = models.Status.objects.create(content="hi", user=self.user)
notification = models.Notification.objects.create(
user=self.user, notification_type="MENTION", related_status=status
)
result = notification_page_tags.related_status(notification)
self.assertIsInstance(result, models.Status)
|
a06_Seq2seqWithAttention/a1_seq2seq.py | sunshinenum/text_classification | 7,723 | 12793729 | # -*- coding: utf-8 -*-
import tensorflow as tf
# 【该方法测试的时候使用】返回一个方法。这个方法根据输入的值,得到对应的索引,再得到这个词的embedding.
def extract_argmax_and_embed(embedding, output_projection=None):
"""
Get a loop_function that extracts the previous symbol and embeds it. Used by decoder.
:param embedding: embedding tensor for symbol
:param output_projection: None or a pair (W, B). If provided, each fed previous output will
first be multiplied by W and added B.
:return: A loop function
"""
def loop_function(prev, _):
if output_projection is not None:
prev = tf.matmul(prev, output_projection[0]) + output_projection[1]
prev_symbol = tf.argmax(prev, 1) #得到对应的INDEX
emb_prev = tf.gather(embedding, prev_symbol) #得到这个INDEX对应的embedding
return emb_prev
return loop_function
# RNN的解码部分。
# 如果是训练,使用训练数据的输入;如果是test,将t时刻的输出作为t+1时刻的s输入
def rnn_decoder_with_attention(decoder_inputs, initial_state, cell, loop_function,attention_states,scope=None):#3D Tensor [batch_size x attn_length x attn_size]
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].it is decoder input.
initial_state: 2D Tensor with shape [batch_size x cell.state_size].it is the encoded vector of input sentences, which represent 'thought vector'
cell: core_rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].it is represent input X.
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing generated outputs.
state: The state of each cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with tf.variable_scope(scope or "rnn_decoder"):
print("rnn_decoder_with_attention started...")
state = initial_state #[batch_size x cell.state_size].
_, hidden_size = state.get_shape().as_list() #200
attention_states_original=attention_states
batch_size,sequence_length,_=attention_states.get_shape().as_list()
outputs = []
prev = None
#################################################
for i, inp in enumerate(decoder_inputs):#循环解码部分的输入。如sentence_length个[batch_size x input_size]
# 如果是训练,使用训练数据的输入;如果是test, 将t时刻的输出作为t + 1 时刻的s输入
if loop_function is not None and prev is not None:#测试的时候:如果loop_function不为空且前一个词的值不为空,那么使用前一个的值作为RNN的输入
with tf.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
if i > 0:
tf.get_variable_scope().reuse_variables()
##ATTENTION#################################################################################################################################################
# 1.get logits of attention for each encoder input. attention_states:[batch_size x attn_length x attn_size]; query=state:[batch_size x cell.state_size]
query=state
W_a = tf.get_variable("W_a", shape=[hidden_size, hidden_size],initializer=tf.random_normal_initializer(stddev=0.1))
query=tf.matmul(query, W_a) #[batch_size,hidden_size]
query=tf.expand_dims(query,axis=1) #[batch_size, 1, hidden_size]
U_a = tf.get_variable("U_a", shape=[hidden_size, hidden_size],initializer=tf.random_normal_initializer(stddev=0.1))
U_aa = tf.get_variable("U_aa", shape=[ hidden_size])
attention_states=tf.reshape(attention_states,shape=(-1,hidden_size)) #[batch_size*sentence_length,hidden_size]
attention_states=tf.matmul(attention_states, U_a) #[batch_size*sentence_length,hidden_size]
#print("batch_size",batch_size," ;sequence_length:",sequence_length," ;hidden_size:",hidden_size) #print("attention_states:", attention_states) #(?, 200)
attention_states=tf.reshape(attention_states,shape=(-1,sequence_length,hidden_size)) # TODO [batch_size,sentence_length,hidden_size]
#query_expanded: [batch_size,1, hidden_size]
#attention_states_reshaped: [batch_size,sentence_length,hidden_size]
attention_logits=tf.nn.tanh(query+attention_states+U_aa) #[batch_size,sentence_length,hidden_size]. additive style
# 2.get possibility of attention
attention_logits=tf.reshape(attention_logits,shape=(-1,hidden_size)) #batch_size*sequence_length [batch_size*sentence_length,hidden_size]
V_a = tf.get_variable("V_a", shape=[hidden_size,1],initializer=tf.random_normal_initializer(stddev=0.1)) #[hidden_size,1]
attention_logits=tf.matmul(attention_logits,V_a) #最终需要的是[batch_size*sentence_length,1]<-----[batch_size*sentence_length,hidden_size],[hidden_size,1]
attention_logits=tf.reshape(attention_logits,shape=(-1,sequence_length)) #attention_logits:[batch_size,sequence_length]
##########################################################################################################################################################
#attention_logits=tf.reduce_sum(attention_logits,2) #[batch_size x attn_length]
attention_logits_max=tf.reduce_max(attention_logits,axis=1,keep_dims=True) #[batch_size x 1]
# possibility distribution for each encoder input.it means how much attention or focus for each encoder input
p_attention=tf.nn.softmax(attention_logits-attention_logits_max)#[batch_size x attn_length]
# 3.get weighted sum of hidden state for each encoder input as attention state
p_attention=tf.expand_dims(p_attention,axis=2) #[batch_size x attn_length x 1]
# attention_states:[batch_size x attn_length x attn_size]; p_attention:[batch_size x attn_length];
attention_final=tf.multiply(attention_states_original,p_attention) #[batch_size x attn_length x attn_size]
context_vector=tf.reduce_sum(attention_final,axis=1) #[batch_size x attn_size]
############################################################################################################################################################
#inp:[batch_size x input_size].it is decoder input; attention_final:[batch_size x attn_size]
output, state = cell(inp, state,context_vector) #attention_final TODO 使用RNN走一步
outputs.append(output) # 将输出添加到结果列表中
if loop_function is not None:
prev = output
print("rnn_decoder_with_attention ended...")
return outputs, state |
kitsune/users/migrations/0025_auto_20200926_0638.py | The-smooth-operator/kitsune | 929 | 12793730 | # Generated by Django 2.2.14 on 2020-09-26 06:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0024_auto_20200914_0433'),
]
operations = [
migrations.AlterModelOptions(
name='profile',
options={'permissions': (('view_karma_points', 'Can view karma points'), ('deactivate_users', 'Can deactivate users'))},
),
]
|
examples/example_GenericSignal.py | ishine/acoular | 294 | 12793742 | # -*- coding: utf-8 -*-
"""
"""
from pylab import *
from acoular import *
# files
datafile = 'example_data.h5'
t1 = MaskedTimeSamples(name=datafile)
t1.start = 0 # first sample, default
t1.stop = 16000 # last valid sample = 15999
invalid = [1,7] # list of invalid channels (unwanted microphones etc.)
t1.invalid_channels = invalid
t2 = ChannelMixer(source=t1)
sig = GenericSignalGenerator(source=t2)
plot(sig.signal())
show() |
openspeech/datasets/librispeech/preprocess/subword.py | CanYouImagine/openspeech | 207 | 12793746 | # MIT License
#
# Copyright (c) 2021 <NAME> and <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sentencepiece as spm
import shutil
from typing import Tuple
from openspeech.datasets.librispeech.preprocess.preprocess import collect_transcripts
SENTENCEPIECE_MODEL_NAME = "sp"
def _prepare_tokenizer(train_transcripts, vocab_size):
""" Prepare sentencepice tokenizer """
input_file = 'spm_input.txt'
model_type = 'unigram'
with open(input_file, 'w') as f:
for transcript in train_transcripts:
f.write(f"{transcript.split('|')[-1]}\n")
spm.SentencePieceTrainer.Train(f"--input={input_file} "
f"--model_prefix={SENTENCEPIECE_MODEL_NAME} "
f"--vocab_size={vocab_size} "
f"--model_type={model_type} "
f"--pad_id=0 "
f"--bos_id=1 "
f"--eos_id=2 "
f"--unk_id=3 "
f"--user_defined_symbols=<blank>")
def generate_manifest_files(dataset_path: str, manifest_file_path: str, vocab_path: str, vocab_size: int) -> None:
"""
Generate manifest files.
Format: {audio_path}\t{transcript}\t{numerical_label}
Args:
vocab_size (int): size of subword vocab
Returns:
None
"""
transcripts_collection = collect_transcripts(dataset_path)
_prepare_tokenizer(transcripts_collection[0], vocab_size)
shutil.copy(f"{SENTENCEPIECE_MODEL_NAME}.model", os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.model"))
shutil.copy(f"{SENTENCEPIECE_MODEL_NAME}.vocab", os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.vocab"))
sp = spm.SentencePieceProcessor()
sp.Load(os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.model"))
with open(manifest_file_path, 'w') as f:
for idx, part in enumerate(['train-960', 'dev-clean', 'dev-other', 'test-clean', 'test-other']):
for transcript in transcripts_collection[idx]:
audio_path, transcript = transcript.split('|')
text = " ".join(sp.EncodeAsPieces(transcript))
label = " ".join([str(item) for item in sp.EncodeAsIds(transcript)])
f.write(f"{audio_path}\t{text}\t{label}\n")
|
seurat/generation/maya/seurat_rig.py | Asteur/vrhelper | 819 | 12793751 | <reponame>Asteur/vrhelper
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates a JSON manifest and a Maya camera rig for Seurat.
Example usage:
CreateRig(headbox_min=[-0.5, -0.5, -0.5],
headbox_max=[0.5, 0.5, 0.5],
num_view_groups=16, # Should be a power of two.
image_size=1024,
near_clip=0.1,
far_clip=100.0,
depth_type='EYE_Z',
depth_channel_name='A',
color_file_path_pattern='%s_color.%04d.exr',
depth_file_path_pattern='%s_depth.%04d.exr',
json_file_path='./manifest.json')
"""
import json
import math
import operator
def ProjectPoint(matrix, point):
"""Projects a 3D point using a 4x4 matrix.
Args:
matrix: A 4x4 matrix represented as a list of 16 floats.
point: A 3D point represented as a list of 3 floats.
Returns:
The projected point, represented as a list of 3 floats.
"""
result_hom = [0.0, 0.0, 0.0, 0.0]
for row in xrange(4):
for col in xrange(3):
result_hom[row] += matrix[4 * row + col] * point[col]
# point.w = 1.0 implicitly
result_hom[row] += matrix[4 * row + 3]
w = result_hom[3]
return map(operator.div, result_hom[0:3], [w, w, w])
def WorldFromEyeMatrixFromFace(face_name):
"""Creates world-from-eye matrix for the given face of a cube map.
Args:
face_name: Name of the face. Must be one of 'front', 'back', 'left',
'right', 'bottom', 'top'.
Returns:
The world-from-eye matrix for the given face as a list in row-major order.
Raises:
ValueError: face_name is not the name of a cube map face.
"""
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
if face_name is 'front':
return [ 1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
elif face_name is 'back':
return [-1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
elif face_name is 'left':
return [ 0.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
-1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
elif face_name is 'right':
return [ 0.0, 0.0, -1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
elif face_name is 'bottom':
return [ 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
elif face_name is 'top':
return [ 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
else:
raise ValueError('Invalid face_name')
def CubeFaceProjectionMatrix(near, far):
"""Creates a cube-face 90 degree FOV projection matrix.
The created matrix is an OpenGL-style projection matrix.
Args:
near: Eye-space Z position of the near clipping plane.
far: Eye-space Z position of the far clipping plane.
Returns:
The clip-from-eye matrix as a list in row-major order.
Raises:
ValueError: Invalid clip planes. near <= 0.0 or far <= near.
"""
if near <= 0.0:
raise ValueError('near must be positive.')
if far <= near:
raise ValueError('far must be greater than near.')
left = -near
right = near
bottom = -near
top = near
a = (2.0 * near) / (right - left)
b = (2.0 * near) / (top - bottom)
c = (right + left) / (right - left)
d = (top + bottom) / (top - bottom)
e = (near + far) / (near - far)
f = (2.0 * near * far) / (near - far)
# pylint: disable=bad-whitespace
return [a, 0.0, c, 0.0,
0.0, b, d, 0.0,
0.0, 0.0, e, f,
0.0, 0.0, -1.0, 0.0] # pyformat: disable
def RadicalInverse(a, base):
"""Computes the radical inverse of |a| in base |base|.
Args:
a: The integer number for which the radical inverse is computed.
base: The radical inverse is computed in this base (integer).
Returns:
The radical inverse as a float in the range [0.0, 1.0).
"""
reversed_digits = 0
base_n = 1
# Compute the reversed digits, base b.
while a > 0:
next_a = a / base
digit = a - next_a * base
reversed_digits = reversed_digits * base + digit
base_n *= base
a = next_a
# Only when done are the reversed digits divided by b^n.
return min(reversed_digits / float(base_n), 1.0)
def PointInBox(box_min, box_max, sample):
"""Computes a sample point inside a box with arbitrary number of dimensions.
Args:
box_min: A list of floats representing the lower bounds of the box.
box_max: A list of floats representing the upper bounds of the box.
sample: A list of floats in the range [0.0, 1.0] representing the
relative sample position in the box.
Returns:
A list of floats, representing the absolute position of the sample in
the box.
"""
delta = map(operator.sub, box_max, box_min)
offset = map(operator.mul, delta, sample)
position = map(operator.add, box_min, offset)
return position
def Distance(point_a, point_b):
"""Computes the euclidean distance between two points.
The points can have an aribtrary number of dimensions.
Args:
point_a: A list of numbers representing the first point.
point_b: A list of numbers representing the second point.
Returns:
The euclidean distance as a float.
"""
delta = map(operator.sub, point_a, point_b)
delta_sqr = map(operator.mul, delta, delta)
distance_sqr = 0.0
for element in delta_sqr:
distance_sqr += element
return math.sqrt(distance_sqr)
def RotateCamera(camera_name, face_name):
"""Rotates a Maya camera node to look at a given cube map face.
Args:
camera_name: Name of the Maya camera's transform node.
face_name: Name of the cube map face.
Raises:
ValueError: face is not a valid cube map face name.
"""
# Disable the undefined-variable lint error, because the Maya package is not
# defined in the environment where the linter runs.
#
# pylint: disable=undefined-variable
if face_name is 'front':
pass
elif face_name is 'back':
maya.cmds.setAttr(camera_name + '.rotateY', 180)
elif face_name is 'left':
maya.cmds.setAttr(camera_name + '.rotateY', 90)
elif face_name is 'right':
maya.cmds.setAttr(camera_name + '.rotateY', -90)
elif face_name is 'bottom':
maya.cmds.setAttr(camera_name + '.rotateX', -90)
elif face_name is 'top':
maya.cmds.setAttr(camera_name + '.rotateX', 90)
else:
raise ValueError('Invalid face_name')
def GenerateCameraPositions(headbox_min, headbox_max, num_cameras):
"""Generates camera positions in a headbox.
Camera posittions are computed as a 3D Hammersley point set. The points are
transformed such that their bounding box is exactly equal to the headbox. The
points are then sorted according to distance to the headbox center. Finally,
the point that is closest to the headbox center is replaced by the headbox
center itself to include a view from the reference camera.
Args:
headbox_min: The lower bounds of the headbox as a list of 3 floats.
headbox_max: The upper bounds of the headbox as a list of 3 floats.
num_cameras: The number of cameras to generate. Should be a power of two.
Returns:
A list of 3D points (each a list of 3 floats), representing the positions
of the generated cameras.
Raises:
ValueError: num_cameras is not positive.
"""
if num_cameras <= 0:
raise ValueError('num_cameras must be positive')
if num_cameras == 1:
# Use the headbox center if a single camera position is requested.
return [PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])]
samples = []
max_sample = [0.0, 0.0, 0.0]
for i in xrange(num_cameras):
# Use a 3D Hammersley point set for the samples.
sample = [
i / float(num_cameras),
RadicalInverse(i, 2),
RadicalInverse(i, 3)
]
for dim in xrange(3):
max_sample[dim] = max(max_sample[dim], sample[dim])
samples.append(sample)
headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])
camera_positions = []
for sample in samples:
# Normalize the samples so that their bounding box is the unit cube.
for dim in xrange(3):
sample[dim] /= max_sample[dim]
position = PointInBox(headbox_min, headbox_max, sample)
camera_positions.append(position)
sorted_positions = sorted(
camera_positions, key=lambda point: Distance(point, headbox_center))
# Replace the point closest to the headbox center by the headbox center
# itself.
sorted_positions[0] = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])
return sorted_positions
def CreateCameras(camera_positions, near_clip, far_clip):
"""Creates and animates the Maya cameras for the rig.
Six cameras, one for each cube face, are generated. Each camera is configured
with a square viewport and the given near and far clipping planes. This method
also adjusts the Maya timeline to exactly contain the frames for the rig
animation. Each of the six cameras will get one keyframe per camera position.
Args:
camera_positions: A list of 3D points (each a list of 3 floats) representing
the positions of the cameras.
near_clip: Eye-space Z position of the near clipping planes.
far_clip: Eye-space Z position of the far clipping planes.
"""
# Disable the undefined-variable lint error, because the Maya package is not
# defined in the environment where the linter runs.
#
# pylint: disable=undefined-variable
start_time = 0
end_time = len(camera_positions) - 1
maya.cmds.playbackOptions(
animationStartTime=start_time,
animationEndTime=end_time,
minTime=start_time,
maxTime=end_time)
for face in ['front', 'back', 'left', 'right', 'bottom', 'top']:
# Create a cube face camera and rotate it.
camera_name = maya.cmds.camera(
name='seurat_' + face,
focalLength=12.7,
horizontalFilmAperture=1,
verticalFilmAperture=1,
nearClipPlane=near_clip,
farClipPlane=far_clip)[0]
RotateCamera(camera_name, face)
# Set translation keyframes for all positions on this camera.
for view_group_index, position in enumerate(camera_positions):
maya.cmds.setKeyframe(
camera_name, at='translateX', t=view_group_index, v=position[0])
maya.cmds.setKeyframe(
camera_name, at='translateY', t=view_group_index, v=position[1])
maya.cmds.setKeyframe(
camera_name, at='translateZ', t=view_group_index, v=position[2])
def CreateViewGroups(headbox_center, camera_positions, image_size, near_clip,
far_clip, depth_type, depth_channel_name,
color_file_path_pattern, depth_file_path_pattern):
"""Creates and returns the view groups for the JSON output.
Args:
headbox_center: Center of the headbox as a list of 3 floats.
camera_positions: Positions of the cameras as a list of 3D points (each a
list of 3 floats).
image_size: Size of the output images in pixels.
near_clip: Eye-space Z position of the near clipping planes.
far_clip: Eye-space Z position of the far clipping planes.
depth_type: A string representing the depth encoding. Valid values are:
'WINDOW_Z' (window-space Z coordinate in the range [0.0, 1.0]),
'EYE_Z' (negated eye-space Z coordinate in the range [0.0, inf);
Arnold's encoding),
'RAY_DEPTH' (distance to eye).
depth_channel_name: Name of the depth channel in the output file.
Commonly used values are 'R' (VRay) and 'A' (Arnold).
color_file_path_pattern: File name pattern for color images. Must
contain a placeholder for a string (face name) and an integer (view
group number). Example: '%s.%04d.exr' for file names
'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'.
depth_file_path_pattern: File name pattern for depth images. Must
contain a placeholder for a string (face name) and an integer (view
group number). Example: '%s.%04d.exr' for file names
'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'.
Returns:
A dictionary representing the view groups.
"""
view_groups = []
for view_group_index, absolute_position in enumerate(camera_positions):
views = []
for face in ['front', 'back', 'left', 'right', 'bottom', 'top']:
# Camera position relative to headbox center.
position = map(operator.sub, absolute_position, headbox_center)
clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip)
world_from_eye_matrix = WorldFromEyeMatrixFromFace(face)
# Set translation component of world-from-eye matrix.
for i in xrange(3):
world_from_eye_matrix[4 * i + 3] = position[i]
# Create camera object
camera = {
'image_width': image_size,
'image_height': image_size,
'clip_from_eye_matrix': clip_from_eye_matrix,
'world_from_eye_matrix': world_from_eye_matrix,
'depth_type': depth_type
}
# Create view object and add it to the view groups
color_image_path = (color_file_path_pattern % (face, view_group_index))
depth_image_path = (depth_file_path_pattern % (face, view_group_index))
view = {
'projective_camera': camera,
'depth_image_file': {
'color': {
'path': color_image_path,
'channel_0': 'R',
'channel_1': 'G',
'channel_2': 'B',
'channel_alpha': 'A'
},
'depth': {
'path': depth_image_path,
'channel_0': depth_channel_name
}
}
}
views.append(view)
view_group = {'views': views}
view_groups.append(view_group)
# Return the view_groups as a Python list.
return view_groups
def CreateRig(headbox_min,
headbox_max,
num_view_groups,
image_size,
near_clip,
far_clip,
depth_type,
depth_channel_name,
color_file_path_pattern,
depth_file_path_pattern,
json_file_path,
json_only=False):
"""Creates a Maya camera rig and JSON manifest for Seurat.
Args:
headbox_min: List of three floats representing the lower bounds of the
headbox in world-space.
headbox_max: List of three floats representing the upper bounds of the
headbox in world-space.
num_view_groups: Number of view groups (camera positions) to generate.
Must be a power of two.
image_size: Resolution of the output images in pixels.
near_clip: Eye-space Z position of the near clipping planes.
far_clip: Eye-space Z position of the far clipping planes.
depth_type: A string representing the depth encoding. Valid values are:
'WINDOW_Z' (window-space Z coordinate in the range [0.0, 1.0]),
'EYE_Z' (negated eye-space Z coordinate in the range [0.0, inf);
Arnold's encoding),
'RAY_DEPTH' (distance to eye).
depth_channel_name: Name of the depth channel in the output file.
Commonly used values are 'R' (VRay) and 'A' (Arnold).
color_file_path_pattern: File name pattern for color images. Must
contain a placeholder for a string (face name) and an integer (view
group number). Example: '%s.%04d.exr' for file names
'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'.
depth_file_path_pattern: File name pattern for depth images. Must
contain a placeholder for a string (face name) and an integer (view
group number). Example: '%s.%04d.exr' for file names
'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'.
json_file_path: Path to the output JSON manifest file.
json_only: A boolean value. If true, the Maya camera generation step is
bypassed.
"""
# Compute the positions of the cameras.
camera_positions = GenerateCameraPositions(headbox_min, headbox_max,
num_view_groups)
# Generate the six Maya cameras and keyframe their positions.
if not json_only:
CreateCameras(camera_positions, near_clip, far_clip)
# Compute the headbox center.
headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])
# Generate the JSON manifest and write it to the file.
view_groups = CreateViewGroups(headbox_center, camera_positions, image_size,
near_clip, far_clip, depth_type,
depth_channel_name, color_file_path_pattern,
depth_file_path_pattern)
json_string = json.dumps({'view_groups': view_groups}, indent=2)
with open(json_file_path, 'w') as json_file:
json_file.write(json_string)
|
tools/agile-machine-learning-api/codes/trainer/utils/metric_utils.py | ruchirjain86/professional-services | 2,116 | 12793759 | <gh_stars>1000+
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for creating metrics in tensorflow for model training."""
import tensorflow as tf
def mean_acc(labels, predictions, num_classes):
"""Mean per class accuracy metrics
Arguments:
labels: tf.Tensor objects, True values of the dependent variable
predictions: tf.Tensor objects, Predictions from the model
Returns:
The mean per class accuracy
"""
return {'mean_class_acc': tf.metrics.mean_per_class_accuracy(
labels,
predictions['class_ids'], num_classes)
}
def my_auc(labels, predictions):
"""Custom AUC metric using interpolation.
Arguments:
labels: tf.Tensor objects, True values of dependent variable
predictions: tf.Tensor objects, Predictions from the model
Returns:
The AUC metric for the model
"""
return {'auc_ci': tf.metrics.auc(
labels,
predictions['class_ids'],
summation_method='careful_interpolation')
}
def rmse(labels, predictions):
"""Root mean squared error metric for regression tasks.
Arguments:
labels: tf.Tensor objects, True values of dependent variable
predictions: tf.Tensor objects, Predictions from the model
Returns:
Root mean squared error for regression model
"""
return {'root_mean_square_error': tf.metrics.root_mean_squared_error(
labels,
predictions['predictions'])
}
def mar(labels, predictions):
"""Mean absolute error for regression model.
Arguments:
labels: tf.Tensor objects, True values of dependent variable
predictions: tf.Tensor objects, Predictions from the model
Returns:
Mean absolute error for the regression model
"""
return {'mean_absolute_error': tf.metrics.mean_absolute_error(
labels,
predictions['predictions'])
}
|
recipes/Python/389203_Series_generator_using_multiple_generators_/recipe-389203.py | tdiprima/code | 2,023 | 12793788 | <gh_stars>1000+
# Simple series generator with
# multiple generators & decorators.
# Author : <NAME>
def myfunc(**kwds):
def func(f):
cond = kwds['condition']
proc = kwds['process']
num = kwds['number']
x = 0
for item in f():
if cond and cond(item):
if proc: item = proc(item)
yield item
x += 1
if x==num:
break
return func
def series(condition=None, process=None, number=10):
@myfunc(condition=condition,process=process,number=number)
def wrapper():
x = 1
while 1:
yield x
x += 1
return wrapper
|
rpcpy/openapi.py | william-wambua/rpc.py | 152 | 12793804 | <reponame>william-wambua/rpc.py<gh_stars>100-1000
import functools
import inspect
import typing
import warnings
__all__ = [
"BaseModel",
"create_model",
"validate_arguments",
"set_type_model",
"is_typed_dict_type",
"parse_typed_dict",
"TEMPLATE",
]
Callable = typing.TypeVar("Callable", bound=typing.Callable)
try:
from pydantic import BaseModel, ValidationError, create_model
from pydantic import validate_arguments as pydantic_validate_arguments
# visit this issue
# https://github.com/samuelcolvin/pydantic/issues/1205
def validate_arguments(function: Callable) -> Callable:
function = pydantic_validate_arguments(function)
@functools.wraps(function)
def change_exception(*args, **kwargs):
try:
return function(*args, **kwargs)
except ValidationError as exception:
type_error = TypeError(
"Failed to pass pydantic's type verification, please output"
" `.more_info` of this exception to view detailed information."
)
type_error.more_info = exception
raise type_error
return change_exception # type: ignore
except ImportError:
def create_model(*args, **kwargs): # type: ignore
raise NotImplementedError("Need install `pydantic` from pypi.")
def validate_arguments(function: Callable) -> Callable:
return function
BaseModel = type("BaseModel", (), {}) # type: ignore
def set_type_model(func: Callable) -> Callable:
"""
try generate request body model from type hint and default value
"""
sig = inspect.signature(func)
field_definitions: typing.Dict[str, typing.Any] = {}
for name, parameter in sig.parameters.items():
if parameter.annotation == parameter.empty:
# raise ValueError(
# f"You must specify the type for the parameter {func.__name__}:{name}."
# )
return func # Maybe the type hint should be mandatory? I'm not sure.
if parameter.default == parameter.empty:
field_definitions[name] = (parameter.annotation, ...)
else:
field_definitions[name] = (parameter.annotation, parameter.default)
if field_definitions:
try:
body_model: typing.Type[BaseModel] = create_model(
func.__name__, **field_definitions
)
setattr(func, "__body_model__", body_model)
except NotImplementedError:
message = (
"If you wanna using type hint "
"to create OpenAPI docs or convert type, "
"please install `pydantic` from pypi."
)
warnings.warn(message, ImportWarning)
return func
def is_typed_dict_type(type_) -> bool:
return issubclass(type_, dict) and getattr(type_, "__annotations__", False)
def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]:
"""
parse `TypedDict` to generate `pydantic.BaseModel`
"""
annotations = {}
for name, field in typed_dict.__annotations__.items():
if is_typed_dict_type(field):
annotations[name] = (parse_typed_dict(field), ...)
else:
default_value = getattr(typed_dict, name, ...)
annotations[name] = (field, default_value)
return create_model(typed_dict.__name__, **annotations) # type: ignore
TEMPLATE = """<!DOCTYPE html>
<html>
<head>
<link type="text/css" rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/swagger-ui.css">
<title>OpenAPI Docs</title>
</head>
<body>
<div id="swagger-ui"></div>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/swagger-ui-bundle.js"></script>
<script>
const ui = SwaggerUIBundle({
url: './get-openapi-docs',
dom_id: '#swagger-ui',
presets: [
SwaggerUIBundle.presets.apis,
SwaggerUIBundle.SwaggerUIStandalonePreset
],
layout: "BaseLayout",
deepLinking: true,
showExtensions: true,
showCommonExtensions: true
})
</script>
</body>
</html>
"""
|
deskwork_detector.py | sgarbirodrigo/ml-sound-classifier | 118 | 12793806 | from realtime_predictor import *
emoji = {'Writing': '\U0001F4DD ', 'Scissors': '\u2701 ',
'Computer_keyboard': '\u2328 '}
def on_predicted_deskwork(ensembled_pred):
result = np.argmax(ensembled_pred)
label = conf.labels[result]
if label in ['Writing', 'Scissors', 'Computer_keyboard']:
p = ensembled_pred[result]
level = int(p*10) + 1
print(emoji[label] * level, label, p)
if __name__ == '__main__':
model = get_model(args.model_pb_graph)
# file mode
if args.input_file != '':
process_file(model, args.input_file, on_predicted_deskwork)
my_exit(model)
# device list display mode
if args.input < 0:
print_pyaudio_devices()
my_exit(model)
# normal: realtime mode
FORMAT = pyaudio.paInt16
CHANNELS = 1
audio = pyaudio.PyAudio()
stream = audio.open(
format=FORMAT,
channels=CHANNELS,
rate=conf.sampling_rate,
input=True,
input_device_index=args.input,
frames_per_buffer=conf.rt_chunk_samples,
start=False,
stream_callback=callback # uncomment for non_blocking
)
# main loop
stream.start_stream()
while stream.is_active():
main_process(model, on_predicted_deskwork)
time.sleep(0.001)
stream.stop_stream()
stream.close()
# finish
audio.terminate()
my_exit(model)
|
benchmarks/v3-app-note/run_benchmarks_pll_empirical.py | ayresdl/beagle-lib | 110 | 12793820 | <filename>benchmarks/v3-app-note/run_benchmarks_pll_empirical.py
#!/usr/bin/env python2.7
# <NAME>
import sys
import argparse
import subprocess
import re
from math import log, exp
# def gen_log_site_list(min, max, samples):
# log_range=(log(max) - log(min))
# samples_list = []
# for i in range(0, samples):
# samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i))))
# return samples_list
def main():
parser = argparse.ArgumentParser(description='generate synthetictest benchmarks')
parser.add_argument('synthetictest_path', help='path to synthetictest')
args = parser.parse_args()
file_list = ['59', '128', '354', '404']
rates = 4
precision_list = ['double']
states_list = [4]
# site_samples = 40
# sites_min = 100
# sites_max = 1000000
# sites_list = gen_log_site_list(sites_min, sites_max, site_samples)
rsrc_list = ['cpu', 'cpu-threaded', 'pll', 'pll-repeats', 'gpu', 'dual-gpu']
reps = 10
seed_list = range(1,11)
extra_args = ['--randomtree', '--stdrand', '--fulltiming', '--newparameters', '--reroot', '--newtree']
throughput_re = re.compile('tree throughput total: (.*) M partials/second')
debug_file = open('debug.txt', 'w')
header = 'iteration, precision, states, file, seed, resource, throughput'
print header
iteration = 0
for file in file_list:
for rsrc in rsrc_list:
for precision in precision_list:
for states in states_list:
for seed in seed_list:
out_string = str(iteration)
out_string += ', ' + str(precision)
out_string += ', ' + str(states)
out_string += ', ' + str(file)
out_string += ', ' + str(seed)
synthetictest_cmd = [args.synthetictest_path]
synthetictest_cmd.extend(['--alignmentdna', file + '.phy'])
synthetictest_cmd.extend(['--tree', file + '.tree'])
synthetictest_cmd.extend(['--states', str(states)])
synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)])
synthetictest_cmd.extend(['--seed', str(seed)])
throughput_re_index = 0
if rsrc == 'cpu':
synthetictest_cmd.extend(['--rsrc', '0', '--postorder'])
elif rsrc == 'cpu-threaded':
synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder'])
elif rsrc == 'pll':
synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder'])
elif rsrc == 'pll-repeats':
synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats', '--postorder'])
elif rsrc == 'gpu':
synthetictest_cmd.extend(['--rsrc', '1'])
elif rsrc == 'dual-gpu':
synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc'])
elif rsrc == 'quadruple-gpu':
synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc'])
synthetictest_cmd.extend(extra_args)
if precision == 'double':
synthetictest_cmd.extend(['--doubleprecision'])
try:
synthetictest_out = subprocess.check_output(synthetictest_cmd)
out_string += ', ' + rsrc
throughput = throughput_re.findall(synthetictest_out)
if throughput:
out_string += ', ' + throughput[throughput_re_index]
print out_string
except subprocess.CalledProcessError:
debug_file.write('ERROR')
debug_file.write('===============================================================\n')
debug_file.write(out_string + '\n')
debug_file.write(' '.join(synthetictest_cmd) + '\n')
debug_file.write(synthetictest_out)
iteration += 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
app/dashapp1/callbacks.py | rseed42/dash_on_flask | 258 | 12793847 | from datetime import datetime as dt
from dash.dependencies import Input
from dash.dependencies import Output
from dash.dependencies import State
from flask_login import current_user
import pandas_datareader as pdr
def register_callbacks(dashapp):
@dashapp.callback(
Output('my-graph', 'figure'),
Input('my-dropdown', 'value'),
State('user-store', 'data'))
def update_graph(selected_dropdown_value, data):
df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now())
return {
'data': [{
'x': df.index,
'y': df.Close
}],
'layout': {'margin': {'l': 40, 'r': 0, 't': 20, 'b': 30}}
}
@dashapp.callback(
Output('user-store', 'data'),
Input('my-dropdown', 'value'),
State('user-store', 'data'))
def cur_user(args, data):
if current_user.is_authenticated:
return current_user.username
@dashapp.callback(Output('username', 'children'), Input('user-store', 'data'))
def username(data):
if data is None:
return ''
else:
return f'Hello {data}'
|
model-ms/benchmark/make_fixture_models.py | ncoop57/deep_parking | 126 | 12793849 | <reponame>ncoop57/deep_parking<filename>model-ms/benchmark/make_fixture_models.py
import torchvision
from fastai.vision import ImageDataBunch, cnn_learner, unet_learner, SegmentationItemList, imagenet_stats
data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats)
learner = cnn_learner(data, torchvision.models.resnet34)
learner.export()
data = (SegmentationItemList.from_folder('fixtures/segmentation/images')
.split_none()
.label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2])
.databunch()
.normalize(imagenet_stats))
learner = unet_learner(data, torchvision.models.resnet50)
learner.export('../export.pkl')
|
src/autogen/eigs.py | ldXiao/polyfem | 228 | 12793858 | from sympy import *
from sympy.matrices import *
import os
import re
import argparse
# local
import pretty_print
def sqr(a):
return a * a
def trunc_acos(x):
tmp = Piecewise((0.0, x >= 1.0), (pi, x <= -1.0), (acos(x), True))
return tmp.subs(x, x)
def eigs_2d(mat):
a = mat[0, 0] + mat[1, 1]
delta = (mat[0, 0] - mat[1, 1])**2 + 4 * mat[0, 1]**2
tmp1 = Piecewise(
(a / 2, delta < 1e-10),
((a - sqrt(delta)) / 2.0, True)
)
tmp2 = Piecewise(
(a / 2, delta < 1e-10),
((a + sqrt(delta)) / 2.0, True)
)
return tmp1.subs(delta, delta), tmp2.subs(delta, delta)
def eigs_3d(mat):
b = mat[0] + mat[4] + mat[8]
t = sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5])
p = 0.5 * (sqr(mat[0] - mat[4]) + sqr(mat[0] - mat[8]) + sqr(mat[4] - mat[8]))
p += 3.0 * t
q = 18.0 * (mat[0] * mat[4] * mat[8] + 3.0 * mat[1] * mat[2] * mat[5])
q += 2.0 * (mat[0] * sqr(mat[0]) + mat[4] * sqr(mat[4]) + mat[8] * sqr(mat[8]))
q += 9.0 * b * t
q -= 3.0 * (mat[0] + mat[4]) * (mat[0] + mat[8]) * (mat[4] + mat[8])
q -= 27.0 * (mat[0] * sqr(mat[5]) + mat[4] * sqr(mat[2]) + mat[8] * sqr(mat[1]))
delta = trunc_acos(0.5 * q / sqrt(p * sqr(p)))
p = 2.0 * sqrt(p)
tmp1 = Piecewise(
(b / 3.0, p < 1e-10),
((b + p * cos(delta / 3.0)) / 3.0, True)
)
tmp2 = Piecewise(
(b / 3.0, p < 1e-10),
((b + p * cos((delta + 2.0 * pi) / 3.0)) / 3.0, True)
)
tmp3 = Piecewise(
(b / 3.0, p < 1e-10),
((b + p * cos((delta - 2.0 * pi) / 3.0)) / 3.0, True)
)
return tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p, p)
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("output", type=str, help="path to the output folder")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
dims = [2, 3]
cpp = "#include <polyfem/auto_eigs.hpp>\n\n\n"
hpp = "#pragma once\n\n#include <Eigen/Dense>\n\n"
cpp = cpp + "namespace polyfem {\nnamespace autogen " + "{\n"
hpp = hpp + "namespace polyfem {\nnamespace autogen " + "{\n"
hpp = hpp + "template<typename T>\nT int_pow(T val, int exp) { T res = exp <=0 ? T(0.): val; for(int i = 1; i < exp; ++i) res = res*val; return res; }\n\n"
lambdaa = Symbol('lambda', real=True)
for dim in dims:
print("processing " + str(dim))
M = zeros(dim, dim)
for i in range(0, dim):
for j in range(0, dim):
if i <= j:
M[i, j] = Symbol('m[' + str(i) + ',' + str(j) + ']', real=True)
else:
M[i, j] = Symbol('m[' + str(j) + ',' + str(i) + ']', real=True)
if dim == 2:
lambdas = eigs_2d(M)
else:
lambdas = eigs_3d(M)
# lambdas = simplify(lambdas)
c99 = pretty_print.C99_print(lambdas)
c99 = re.sub(r"m\[(\d{1}),(\d{1})\]", r'm(\1,\2)', c99)
c99 = re.sub(r"result_(\d{1})", r'res(\1)', c99)
c99 = c99.replace("0.0", "T(0)")
c99 = c99.replace(" M_PI", " T(M_PI)")
signature = "template<typename T>\nvoid eigs_" + str(dim) + "d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m, "
signature += "Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1> &res)"
hpp = hpp + signature + " {\nres.resize(" + str(dim) + ");\n" + c99 + "\n}\n\n"
cpp = cpp + "\n"
hpp = hpp + "\n"
cpp = cpp + "\n}}\n"
hpp = hpp + "\n}}\n"
path = os.path.abspath(args.output)
print("saving...")
with open(os.path.join(path, "auto_eigs.cpp"), "w") as file:
file.write(cpp)
with open(os.path.join(path, "auto_eigs.hpp"), "w") as file:
file.write(hpp)
print("done!")
|
PopStats/model.py | haoruilee/DeepSets | 213 | 12793860 | <filename>PopStats/model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
# from loglinear import LogLinear
class DeepSet(nn.Module):
def __init__(self, in_features, set_features=50):
super(DeepSet, self).__init__()
self.in_features = in_features
self.out_features = set_features
self.feature_extractor = nn.Sequential(
nn.Linear(in_features, 50),
nn.ELU(inplace=True),
nn.Linear(50, 100),
nn.ELU(inplace=True),
nn.Linear(100, set_features)
)
self.regressor = nn.Sequential(
nn.Linear(set_features, 30),
nn.ELU(inplace=True),
nn.Linear(30, 30),
nn.ELU(inplace=True),
nn.Linear(30, 10),
nn.ELU(inplace=True),
nn.Linear(10, 1),
)
self.add_module('0', self.feature_extractor)
self.add_module('1', self.regressor)
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input):
x = input
x = self.feature_extractor(x)
x = x.sum(dim=1)
x = self.regressor(x)
return x
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'Feature Exctractor=' + str(self.feature_extractor) \
+ '\n Set Feature' + str(self.regressor) + ')'
class DeepSet1(nn.Module):
def __init__(self, in_features, set_features=512):
super(DeepSet1, self).__init__()
self.in_features = in_features
self.out_features = set_features
self.feature_extractor = nn.Sequential(
nn.Linear(in_features, 512),
nn.ELU(inplace=True),
nn.Linear(512, 512),
nn.ELU(inplace=True),
nn.Linear(512, set_features)
)
self.regressor = nn.Sequential(
nn.Linear(set_features, 512),
nn.ELU(inplace=True),
nn.Linear(512, 512),
nn.ELU(inplace=True),
nn.Linear(512, 512),
nn.ELU(inplace=True),
nn.Linear(512, 1),
)
self.add_module('0', self.feature_extractor)
self.add_module('1', self.regressor)
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input):
x = input
x = self.feature_extractor(x)
x = x.sum(dim=1)
x = self.regressor(x)
return x
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'Feature Exctractor=' + str(self.feature_extractor) \
+ '\n Set Feature' + str(self.regressor) + ')'
class DeepSet2(nn.Module):
def __init__(self, in_features, set_features=256):
super(DeepSet2, self).__init__()
self.in_features = in_features
self.out_features = set_features
self.feature_extractor = nn.Sequential(
nn.Linear(in_features, 256),
nn.ELU(inplace=True),
nn.Linear(256, 256),
nn.ELU(inplace=True),
nn.Linear(256, set_features)
)
self.log_feature_extractor = nn.Sequential(
nn.Linear(in_features, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 256),
nn.ReLU(inplace=True),
nn.Linear(256, set_features),
nn.ReLU(inplace=True)
)
self.regressor = nn.Sequential(
nn.Linear(set_features*2, 512),
nn.ELU(inplace=True),
nn.Linear(512, 512),
nn.ELU(inplace=True),
nn.Linear(512, 512),
nn.ELU(inplace=True),
nn.Linear(512, 1),
)
self.add_module('0', self.feature_extractor)
self.add_module('1', self.regressor)
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input):
x = input
x1 = self.feature_extractor(x)
x2 = self.log_feature_extractor(x) + 0.001
x2 = x2.log()
x = torch.cat((x1, x2), 2)
x = x.sum(dim=1)
x = self.regressor(x)
return x
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'Feature Exctractor=' + str(self.feature_extractor) \
+ '\n Set Feature' + str(self.regressor) + ')'
class DeepSet3(nn.Module):
def __init__(self, in_features, set_features=50):
super(DeepSet3, self).__init__()
self.in_features = in_features
self.out_features = set_features
self.feature_extractor = nn.Sequential(
nn.Linear(in_features, 50),
nn.ELU(inplace=True),
nn.Linear(50, 50),
nn.ELU(inplace=True),
nn.Linear(50, set_features)
)
self.log_feature_extractor = nn.Sequential(
nn.Linear(in_features, 50),
nn.ReLU(inplace=True),
nn.Linear(50, 50),
nn.ReLU(inplace=True),
nn.Linear(50, set_features),
nn.ReLU(inplace=True)
)
self.l1 = nn.Linear(set_features*2, 30)
self.l2 = LogLinear(set_features*2, 30)
self.lp = nn.ReLU()
self.regressor = nn.Sequential(
#nn.Linear(set_features*2, 512),
nn.ELU(inplace=True),
nn.Linear(60, 30),
nn.ELU(inplace=True),
nn.Linear(30, 10),
nn.ELU(inplace=True),
nn.Linear(10, 1),
)
self.add_module('0', self.feature_extractor)
self.add_module('1', self.regressor)
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input):
x = input
x1 = self.feature_extractor(x)
x2 = self.log_feature_extractor(x) + 0.001
x2 = x2.log()
x = torch.cat((x1, x2), 2)
x = x.sum(dim=1)
x1 = self.l1(x)
x2 = self.lp(x) + 0.001
x2 = self.l2(x2)
x = torch.cat((x1, x2), 1)
x = self.regressor(x)
return x
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'Feature Exctractor=' + str(self.feature_extractor) \
+ '\n Set Feature' + str(self.regressor) + ')'
|
test/hummingbot/core/data_type/test_trade_fee.py | pecuniafinance/hummingbot | 542 | 12793864 | from decimal import Decimal
from unittest import TestCase
from hummingbot.core.data_type.common import TradeType, PositionAction
from hummingbot.core.data_type.in_flight_order import TradeUpdate
from hummingbot.core.data_type.trade_fee import (
AddedToCostTradeFee,
DeductedFromReturnsTradeFee,
TokenAmount,
TradeFeeBase,
TradeFeeSchema,
)
class TradeFeeTests(TestCase):
def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self):
schema = TradeFeeSchema(
percent_fee_token="HBOT",
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.BUY,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self):
schema = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=True,
)
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.BUY,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_deducted_from_return_spot_fee_created_for_sell(self):
schema = TradeFeeSchema(
percent_fee_token="HBOT",
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.SELL,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
schema.percent_fee_token = None
schema.buy_percent_fee_deducted_from_returns = True
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.SELL,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
def test_added_to_cost_perpetual_fee_created_when_opening_positions(self):
schema = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.OPEN,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
schema.percent_fee_token = "HBOT"
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.OPEN,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self):
schema = TradeFeeSchema(
percent_fee_token="HBOT",
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.CLOSE,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self):
schema = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.CLOSE,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_added_to_cost_json_serialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = AddedToCostTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
expected_json = {
"fee_type": AddedToCostTradeFee.type_descriptor_for_json(),
"percent": "0.5",
"percent_token": "COINALPHA",
"flat_fees": [token_amount.to_json()]
}
self.assertEqual(expected_json, fee.to_json())
def test_added_to_cost_json_deserialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = AddedToCostTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json()))
def test_deducted_from_returns_json_serialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
expected_json = {
"fee_type": DeductedFromReturnsTradeFee.type_descriptor_for_json(),
"percent": "0.5",
"percent_token": "COINALPHA",
"flat_fees": [token_amount.to_json()]
}
self.assertEqual(expected_json, fee.to_json())
def test_deducted_from_returns_json_deserialization(self):
token_amount = TokenAmount(token="CO<PASSWORD>", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json()))
def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self):
# Configure fee to use a percent token different from the token used to request the fee value
# That forces the logic to need the convertion rate if the fee amount is calculated
fee = AddedToCostTradeFee(percent=Decimal("0"), percent_token="CO<PASSWORD>")
fee_amount = fee.fee_amount_in_token(
trading_pair="HBOT-COINALPHA",
price=Decimal("1000"),
order_amount=Decimal("1"),
token="BNB")
self.assertEqual(Decimal("0"), fee_amount)
def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self):
# Configure fee to use a percent token different from the token used to request the fee value
# That forces the logic to need the convertion rate if the fee amount is calculated
fee = DeductedFromReturnsTradeFee(percent=Decimal("0"), percent_token="CO<PASSWORD>")
fee_amount = fee.fee_amount_in_token(
trading_pair="HBOT-COINALPHA",
price=Decimal("1000"),
order_amount=Decimal("1"),
token="BNB")
self.assertEqual(Decimal("0"), fee_amount)
class TokenAmountTests(TestCase):
def test_json_serialization(self):
amount = TokenAmount(token="HBOT-COINALPHA", amount=Decimal("1000.50"))
expected_json = {
"token": "HBOT-COINALPHA",
"amount": "1000.50",
}
self.assertEqual(expected_json, amount.to_json())
def test_json_deserialization(self):
amount = TokenAmount(token="HBOT-COINALPHA", amount=Decimal("1000.50"))
self.assertEqual(amount, TokenAmount.from_json(amount.to_json()))
class TradeUpdateTests(TestCase):
def test_json_serialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
trade_update = TradeUpdate(
trade_id="12345",
client_order_id="OID1",
exchange_order_id="EOID1",
trading_pair="HBOT-COINALPHA",
fill_timestamp=1640001112,
fill_price=Decimal("1000.11"),
fill_base_amount=Decimal("2"),
fill_quote_amount=Decimal("2000.22"),
fee=fee,
)
expected_json = trade_update._asdict()
expected_json.update({
"fill_price": "1000.11",
"fill_base_amount": "2",
"fill_quote_amount": "2000.22",
"fee": fee.to_json(),
})
self.assertEqual(expected_json, trade_update.to_json())
def test_json_deserialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="CO<PASSWORD>",
flat_fees=[token_amount]
)
trade_update = TradeUpdate(
trade_id="12345",
client_order_id="OID1",
exchange_order_id="EOID1",
trading_pair="HBOT-COINALPHA",
fill_timestamp=1640001112,
fill_price=Decimal("1000.11"),
fill_base_amount=Decimal("2"),
fill_quote_amount=Decimal("2000.22"),
fee=fee,
)
self.assertEqual(trade_update, TradeUpdate.from_json(trade_update.to_json()))
|
mmdnn/conversion/examples/darknet/extractor.py | kmader/MMdnn | 3,442 | 12793886 | #----------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
import os
from mmdnn.conversion.examples.darknet import darknet as cdarknet
from mmdnn.conversion.examples.imagenet_test import TestKit
from mmdnn.conversion.examples.extractor import base_extractor
from mmdnn.conversion.common.utils import download_file
class darknet_extractor(base_extractor):
_base_model_url = "https://raw.githubusercontent.com/pjreddie/darknet/master/"
architecture_map = {
'yolov3' : {
'config' : _base_model_url + "cfg/yolov3.cfg",
'weights' : "https://pjreddie.com/media/files/yolov3.weights"
},
'yolov2' :{
'config' : _base_model_url + "cfg/yolov2.cfg",
'weights' : "https://pjreddie.com/media/files/yolov2.weights"
}
}
@classmethod
def download(cls, architecture, path = './'):
if cls.sanity_check(architecture):
cfg_name = architecture + ".cfg"
architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name)
if not architecture_file:
return None
weight_name = architecture + ".weights"
weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name)
if not weight_file:
return None
print("Darknet Model {} saved as [{}] and [{}].".format(architecture, architecture_file, weight_file))
return (architecture_file, weight_file)
else:
return None
@classmethod
def inference(cls, architecture, files, model_path, image_path):
import numpy as np
if cls.sanity_check(architecture):
download_file(cls._base_model_url + "cfg/coco.data", directory='./')
download_file(cls._base_model_url + "data/coco.names", directory='./data/')
print(files)
net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0)
meta = cdarknet.load_meta("coco.data".encode())
r = cdarknet.detect(net, meta, image_path.encode())
# print(r)
return r
else:
return None
# d = darknet_extractor()
# model_filename = d.download('yolov3')
# print(model_filename)
# image_path = "./mmdnn/conversion/examples/data/dog.jpg"
# model_path = "./"
# d = darknet_extractor()
# result = d.inference('yolov3', model_filename, model_path, image_path = image_path)
# print(result)
|
data/transcoder_evaluation_gfg/python/SCHEDULE_JOBS_SERVER_GETS_EQUAL_LOAD.py | mxl1n/CodeGen | 241 | 12793887 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( a , b , n ) :
s = 0
for i in range ( 0 , n ) :
s += a [ i ] + b [ i ]
if n == 1 :
return a [ 0 ] + b [ 0 ]
if s % n != 0 :
return - 1
x = s // n
for i in range ( 0 , n ) :
if a [ i ] > x :
return - 1
if i > 0 :
a [ i ] += b [ i - 1 ]
b [ i - 1 ] = 0
if a [ i ] == x :
continue
y = a [ i ] + b [ i ]
if i + 1 < n :
y += b [ i + 1 ]
if y == x :
a [ i ] = y
b [ i ] = 0
if i + 1 < n : b [ i + 1 ] = 0
continue
if a [ i ] + b [ i ] == x :
a [ i ] += b [ i ]
b [ i ] = 0
continue
if i + 1 < n and a [ i ] + b [ i + 1 ] == x :
a [ i ] += b [ i + 1 ]
b [ i + 1 ] = 0
continue
return - 1
for i in range ( 0 , n ) :
if b [ i ] != 0 :
return - 1
return x
#TOFILL
if __name__ == '__main__':
param = [
([4, 9, 16, 18, 20, 23, 24, 25, 25, 26, 29, 30, 35, 40, 41, 43, 44, 46, 53, 53, 56, 56, 58, 60, 62, 70, 80, 80, 80, 82, 86, 90, 92, 92, 95],[3, 15, 16, 16, 18, 26, 30, 32, 32, 35, 37, 41, 42, 43, 48, 49, 49, 54, 55, 57, 65, 66, 67, 67, 68, 83, 85, 89, 89, 90, 91, 93, 96, 97, 99],29,),
([-24, 70, -74, -90, 72, 50, -94, 86, -58, -68, 42, 0, 98, -70, -14, -32, 6, 74, 64, -78, 86, -42, -56, 2, -34, -46, 70, -62, 50, -58, -58, 42, 86, 96, -8, 8, -22, -14, -14, 98, 2, 98, -28],[-26, 36, 48, 48, -38, -86, 90, -62, 30, -4, 82, 16, 32, -6, 58, 82, -66, -40, 52, -78, 94, -70, -80, -68, -58, -26, 50, -78, -90, -48, -28, 48, 56, 50, 72, -22, -2, 8, -94, 92, -44, -66, -30],34,),
([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],13,),
([98, 18, 50, 36, 88, 75, 2, 40, 74, 19, 63, 82, 77, 5, 59, 97, 70, 50, 71, 90, 90, 61, 63, 99],[93, 25, 16, 42, 55, 61, 69, 68, 95, 28, 40, 90, 1, 86, 76, 40, 13, 47, 71, 4, 64, 54, 84, 45],16,),
([-80, -64, -64, -64, -64, -62, -54, -48, -44, -44, -38, -30, -30, -26, -14, -12, -10, -6, -6, 6, 22, 22, 22, 26, 28, 50, 52, 70, 86, 86, 88, 90],[-96, -94, -80, -74, -64, -56, -52, -32, -30, -24, -12, -12, -8, -2, 4, 8, 16, 20, 24, 24, 24, 48, 50, 54, 60, 64, 74, 80, 88, 90, 92, 92],22,),
([0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1],[1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1],20,),
([59, 61, 64],[22, 59, 85],1,),
([98, 92, 28, 42, -74, -36, 40, -8, 32, -22, -70, -22, -56, 74, 6, 6, -62, 46, 34, 2],[-62, -84, 72, 60, 10, -18, -44, -22, 14, 0, 76, 72, 96, -28, -24, 52, -74, -30, 16, 66],18,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],34,),
([72, 97, 79, 21, 83, 2, 31, 59, 6, 11, 79, 97],[27, 71, 87, 36, 73, 37, 80, 34, 57, 17, 88, 52],9,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
autograd/scipy/stats/beta.py | gautam1858/autograd | 6,119 | 12793911 | <gh_stars>1000+
from __future__ import absolute_import
import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from autograd.scipy.special import beta, psi
cdf = primitive(scipy.stats.beta.cdf)
logpdf = primitive(scipy.stats.beta.logpdf)
pdf = primitive(scipy.stats.beta.pdf)
def grad_beta_logpdf_arg0(x, a, b):
return (1 + a * (x-1) + x * (b-2)) / (x * (x-1))
def grad_beta_logpdf_arg1(x, a, b):
return np.log(x) - psi(a) + psi(a + b)
def grad_beta_logpdf_arg2(x, a, b):
return np.log1p(-x) - psi(b) + psi(a + b)
defvjp(cdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * np.power(x, a-1) * np.power(1-x, b-1) / beta(a, b)), argnums=[0])
defvjp(logpdf,
lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * grad_beta_logpdf_arg0(x, a, b)),
lambda ans, x, a, b: unbroadcast_f(a, lambda g: g * grad_beta_logpdf_arg1(x, a, b)),
lambda ans, x, a, b: unbroadcast_f(b, lambda g: g * grad_beta_logpdf_arg2(x, a, b)))
defvjp(pdf,
lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * ans * grad_beta_logpdf_arg0(x, a, b)),
lambda ans, x, a, b: unbroadcast_f(a, lambda g: g * ans * grad_beta_logpdf_arg1(x, a, b)),
lambda ans, x, a, b: unbroadcast_f(b, lambda g: g * ans * grad_beta_logpdf_arg2(x, a, b)))
|
alembic/versions/140a25d5f185_create_tokens_table.py | alvierahman90/matrix-registration | 160 | 12793914 | """create tokens table
Revision ID: 1<PASSWORD>
Revises:
Create Date: 2020-12-12 01:44:28.195736
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey
from sqlalchemy.engine.reflection import Inspector
from flask_sqlalchemy import SQLAlchemy
# revision identifiers, used by Alembic.
revision = '1<PASSWORD>'
down_revision = None
branch_labels = None
depends_on = None
db = SQLAlchemy()
def upgrade():
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
if 'ips' not in tables:
op.create_table(
'ips',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('address', sa.String(255), nullable=True)
)
if 'tokens' not in tables:
op.create_table(
'tokens',
sa.Column('name', String(255), primary_key=True),
sa.Column('expiration_date', DateTime, nullable=True),
sa.Column('max_usage', Integer, default=1),
sa.Column('used', Integer, default=0),
sa.Column('disabled', Boolean, default=False),
sa.Column('ips', Integer, ForeignKey('association.id'))
)
else:
try:
with op.batch_alter_table('tokens') as batch_op:
batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True)
batch_op.alter_column('one_time', new_column_name='max_usage')
batch_op.add_column(
Column('disabled', Boolean, default=False)
)
except KeyError:
pass
if 'association' not in tables:
op.create_table(
'association', db.Model.metadata,
Column('ips', String, ForeignKey('ips.address'), primary_key=True),
Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True)
)
op.execute("update tokens set expiration_date=null where expiration_date='None'")
def downgrade():
op.alter_column('tokens', 'expiration_date', new_column_name='ex_date')
op.alter_column('tokens', 'max_usage', new_column_name='one_time')
|
earth_enterprise/src/scons/packageUtils_test.py | ezeeyahoo/earthenterprise | 2,661 | 12793916 | <reponame>ezeeyahoo/earthenterprise
#-*- Python -*-
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import re
from packageUtils import IsPackageVersionSufficient
from packageUtils import UsesRPM
from packageUtils import FileContains
from packageUtils import GetDEBPackageInfo
failure_list = []
# test FileContains
test_file = '/etc/profile'
if FileContains('/IDontExist', re.compile(r"a")):
failure_list.append("FileContains Failed: returned true for non-existing file")
if FileContains(test_file, re.compile(r"PROFILE")) == False:
failure_list.append("FileContains Failed: did not find PROFILE in /etc/hostname")
if FileContains(test_file, re.compile(r"not anything here")):
failure_list.append("FileContains Failed: found garbage search string in /etc/hostname")
# test UsesRPM
print("Basic checks for Ubuntu vs RPM\nMake sure these coincide with your current system.\n\n")
uses_rpm = "does not use RPM"
if UsesRPM():
uses_rpm = "uses RPM"
print("This machine %s" % uses_rpm)
# test GetDEBPackageInfo for non-RPM systems
if UsesRPM() == False:
package_name = "gdal-ge"
package_results = GetDEBPackageInfo (package_name)
if len(package_results) != 2 | package_results[1] == False:
failure_list.append("%s not installed: GetDEBPackageInfo returns %s" %
(package_name, package_results))
# test Package check
valid_test_packages = [['apache-ge-devel', '2.2.2'],
['apache-ge-devel', '2.2.2.1'],
['jdk-ge', '1.6.0-1'],
['jdk-ge', '1.6.0-0']];
invalid_test_packages = [['apache-ge-devel9', '2.2.2'],
['apache-ge-devel', '10.2.2.1'],
['j9dk-ge', '1.6.0-1'],
['jdk-ge', '1.99.0-0']];
for package_list in valid_test_packages:
if IsPackageVersionSufficient(package_list[0], package_list[1]) == False:
failure_list.append("Failed test that should pass: %s" % (package_list))
print("Test is now looking for invalid packages (error messages expected until tests are complete).\n\n")
for package_list in invalid_test_packages:
if IsPackageVersionSufficient(package_list[0], package_list[1]):
failure_list.append("Passed test that should fail: %s" % (package_list))
print("\n\nTests complete.\n\n")
if len(failure_list) > 0:
print("\n\n%s TEST FAILURES" % len(failure_list))
for s in failure_list:
print(s)
else:
print("\n\nSUCCESS: All tests succeeded!")
|
ch13/myproject_virtualenv/src/django-myproject/myproject/apps/likes/views.py | PacktPublishing/Django-3-Web-Development-Cookbook | 159 | 12793928 | <reponame>PacktPublishing/Django-3-Web-Development-Cookbook
import structlog
from django.contrib.contenttypes.models import ContentType
from django.http import JsonResponse
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from .models import Like
from .templatetags.likes_tags import liked_count
logger = structlog.get_logger("django_structlog")
@never_cache
@csrf_exempt
def json_set_like(request, content_type_id, object_id):
"""
Sets the object as a favorite for the current user
"""
result = {
"success": False,
}
if request.user.is_authenticated and request.method == "POST":
content_type = ContentType.objects.get(id=content_type_id)
obj = content_type.get_object_for_this_type(pk=object_id)
like, is_created = Like.objects.get_or_create(
content_type=ContentType.objects.get_for_model(obj),
object_id=obj.pk,
user=request.user)
if is_created:
logger.info("like_created", content_type_id=content_type.pk, object_id=obj.pk)
else:
like.delete()
logger.info("like_deleted", content_type_id=content_type.pk, object_id=obj.pk)
result = {
"success": True,
"action": "add" if is_created else "remove",
"count": liked_count(obj),
}
return JsonResponse(result)
|
notebooks-text-format/cond_bmm_emnist.py | arpitvaghela/probml-notebooks | 166 | 12793940 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="M_qo7DmLJKLP"
# #Class-Conditional Bernoulli Mixture Model for EMNIST
# + [markdown] id="TU1pCzcIJHTm"
# ## Setup
#
# + id="400WanLyGA2C"
# !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
# %cd -q /pyprobml/scripts
# + id="k1rLl6dHH7Wh"
# !pip install -q superimport
# !pip install -q distrax
# + id="cLpBn5KQeB46"
from conditional_bernoulli_mix_lib import ClassConditionalBMM
from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class
from noisy_spelling_hmm import Word
from jax import vmap
import jax.numpy as jnp
import jax
from jax.random import PRNGKey, split
import numpy as np
from matplotlib import pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="ey9k06RweuKc" outputId="38131e5a-82fb-49db-c4d3-f4364a643152"
select_n = 25
dataset, targets = get_emnist_images_per_class(select_n)
dataset, targets = jnp.array(dataset), jnp.array(targets)
# + [markdown] id="KwNq7HYYLPO9"
# ## Initialization of Class Conditional BMMs
# + colab={"base_uri": "https://localhost:8080/"} id="UABtUDPjffFt" outputId="d873a708-542c-44e6-8c72-2c5908c7bbad"
n_mix = 30
n_char = 52
mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix))
p_min, p_max = 0.4, 0.6
n_pixels = 28 * 28
probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels)))
class_priors = jnp.array(np.full((n_char,), 1./n_char))
cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char)
cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char)
# + [markdown] id="Qa95Fua5Kc3i"
# ## Full Batch Gradient Descentt
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="PDzuEjs9Kewi" outputId="c81916c0-c6b7-45bd-d308-eab878afe281"
num_epochs, batch_size = 100, len(dataset)
losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs = num_epochs)
plt.plot(losses, color="k", linewidth=3)
plt.xlabel("Iteration")
plt.ylabel("Negative Log Likelihood")
plt.show()
# + [markdown] id="37mNMNrpInfh"
# ## EM Algorithm
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="FJeBzIKYfsUk" outputId="9d8db485-a251-4b1a-a6e5-93833c83dce6"
losses = cbm_em.fit_em(dataset, targets, 8)
plt.plot(losses, color="k", linewidth=3)
plt.xlabel("Iteration")
plt.ylabel("Negative Log Likelihood")
plt.show()
# + [markdown] id="NjCQpoH1Iuuf"
# ## Plot of the Probabilities of Components Distribution
# + id="KkyAHDW4JgyM"
def plot_components_dist(cbm, n_mix):
fig = plt.figure(figsize=(45, 20))
for k in range(n_mix):
for cls in range(cbm.num_of_classes):
plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1)
plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = "gray")
plt.axis('off')
plt.tight_layout()
plt.show()
# + [markdown] id="J8KLkCWpNAeF"
# ### GD
# + colab={"base_uri": "https://localhost:8080/", "height": 666} id="DSOiuNeAM8gl" outputId="dce9416a-b646-423d-b4bf-c78728db1cab"
plot_components_dist(cbm_gd, n_mix)
# + [markdown] id="FO31plUVNDSO"
# ### EM
# + id="ZM43qs6FfvlP" colab={"base_uri": "https://localhost:8080/", "height": 666} outputId="81a095f1-1099-4809-90a8-272dbed11662"
plot_components_dist(cbm_em, n_mix)
# + [markdown] id="IqRdcklzOeAY"
# ## Sampling
# + id="wgI6sFWKN4ax"
p1, p2, p3 = 0.4, 0.1, 2e-3
n_misspelled = 1 # number of misspelled words created for each class
vocab = ['book', 'bird', 'bond', 'bone', 'bank', 'byte', 'pond', 'mind', 'song', 'band']
rng_key = PRNGKey(0)
keys = [dev_array for dev_array in split(rng_key, len(vocab))]
# + id="x3GpZ8jbf11N" colab={"base_uri": "https://localhost:8080/"} outputId="5a348b69-bdf4-4f80-f059-1062ba2fbb88"
hmms = {word: Word(word, p1, p2, p3, n_char, "all", mixing_coeffs=cbm_em.model.mixture_distribution.probs,
initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab}
samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys)
# + id="7VXVsobcg_KO" colab={"base_uri": "https://localhost:8080/"} outputId="3e915a79-7f5c-4131-d6ee-97f11c83d86f"
decoded_words = vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab), -1)), n_char + 1, "all")
get_decoded_samples(decoded_words)
# + [markdown] id="xrRy8MG0afR8"
# ### Figure
# + id="O0-HaN5rQAvP"
def plot_samples(samples):
samples = np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28))
fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10))
fig.subplots_adjust(hspace = .2, wspace=.001)
for i, ax in enumerate(axes.flatten()):
ax.imshow(samples[i], cmap="gray")
ax.set_axis_off()
fig.tight_layout()
plt.show()
# + id="EbZn9vrfhei4" colab={"base_uri": "https://localhost:8080/", "height": 728} outputId="114217bf-cadb-4331-82ef-b4844c038342"
plot_samples(samples)
# + [markdown] id="eNDmwV7EPyrR"
# ## Calculation of Log Likelihoods for Test Data
# + id="525MUl5HPe1K"
# noisy words
test_words = ['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-']
test_images = fake_test_data(test_words, dataset, targets, n_char + 1, "all")
# + id="1dFCdVNgPYtJ"
def plot_log_likelihood(hmms, test_words, test_images, vocab):
fig, axes = plt.subplots(4, 3, figsize=(20, 10))
for i, (ax, img, word) in enumerate(zip(axes.flat, test_images, test_words)):
flattened_img = img.reshape((len(img), -1))
loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab)
loglikelihoods = jnp.array(loglikelihoods)
ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color="black")
ax.set_title(f'{word}')
plt.tight_layout()
plt.show()
# + id="qv-Df8GEhfC4" colab={"base_uri": "https://localhost:8080/", "height": 784} outputId="9be6abf3-0ecc-4ef5-e301-380c5eac38ff"
plot_log_likelihood(hmms, test_words, test_images, vocab)
|
python/tests/test_base.py | JLLeitschuh/DDF | 160 | 12793941 | from __future__ import unicode_literals
import unittest
from ddf import DDFManager, DDF_HOME
class BaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dm_spark = DDFManager('spark')
cls.airlines = cls.loadAirlines(cls.dm_spark)
cls.mtcars = cls.loadMtCars(cls.dm_spark)
@classmethod
def tearDownClass(cls):
cls.dm_spark.shutdown()
@classmethod
def loadAirlines(cls, dm):
table_name = 'airlines_na_pyddf_unittest'
if table_name not in [x.split('\t')[0] for x in dm.sql('show tables')]:
dm.sql('set hive.metastore.warehouse.dir=/tmp', False)
dm.sql('drop table if exists {}'.format(table_name), False)
dm.sql("""create table {} (Year int,Month int,DayofMonth int,
DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int,
CRSArrTime int,UniqueCarrier string, FlightNum int,
TailNum string, ActualElapsedTime int, CRSElapsedTime int,
AirTime int, ArrDelay int, DepDelay int, Origin string,
Dest string, Distance int, TaxiIn int, TaxiOut int, Cancelled int,
CancellationCode string, Diverted string, CarrierDelay int,
WeatherDelay int, NASDelay int, SecurityDelay int, LateAircraftDelay int )
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
""".format(table_name), False)
dm.sql("load data local inpath '{}/resources/test/airlineWithNA.csv' "
"into table {}".format(DDF_HOME, table_name), False)
return dm.sql2ddf('select * from {}'.format(table_name), False)
@classmethod
def loadMtCars(cls, dm):
table_name = 'mtcars_pyddf_unittest'
if table_name not in [x.split('\t')[0] for x in dm.sql('show tables')]:
dm.sql('set shark.test.data.path=resources', False)
# session.sql('set hive.metastore.warehouse.dir=/tmp')
dm.sql('drop table if exists {}'.format(table_name), False)
dm.sql("CREATE TABLE {} (mpg double, cyl int, disp double, "
"hp int, drat double, wt double, "
"qesc double, vs int, am int, gear int, carb int)"
" ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '".format(table_name), False)
dm.sql("LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' "
"INTO TABLE {}".format(DDF_HOME, table_name), False)
return dm.sql2ddf('select * from {}'.format(table_name), False)
|
datasets/code_x_glue_cc_cloze_testing_all/generated_definitions.py | WojciechKusa/datasets | 10,608 | 12793960 | DEFINITIONS = {
"go": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "go",
"parameters": {"language": "go"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/go",
"sizes": {"train": 25282},
},
"java": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "java",
"parameters": {"language": "java"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/java",
"sizes": {"train": 40492},
},
"javascript": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "javascript",
"parameters": {"language": "javascript"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/javascript",
"sizes": {"train": 13837},
},
"php": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "php",
"parameters": {"language": "php"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/php",
"sizes": {"train": 51930},
},
"python": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "python",
"parameters": {"language": "python"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/python",
"sizes": {"train": 40137},
},
"ruby": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "ruby",
"parameters": {"language": "ruby"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/ruby",
"sizes": {"train": 4437},
},
}
|
docs/source/plots/var_plot_forecast.py | madhushree14/statsmodels | 6,931 | 12793970 | from var_plots import plot_forecast
plot_forecast()
|
tests/anomaly/test_default.py | cnll0075/Merlion | 2,215 | 12793976 | #
# Copyright (c) 2022 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
from abc import ABC
import logging
import os
from os.path import abspath, dirname, join
import sys
import unittest
import torch
import random
import numpy as np
import pandas as pd
from merlion.models.defaults import DefaultDetector, DefaultDetectorConfig
from merlion.plot import plot_anoms_plotly
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils import TimeSeries
from ts_datasets.anomaly import *
rootdir = dirname(dirname(dirname(abspath(__file__))))
logger = logging.getLogger(__name__)
def set_random_seeds():
torch.manual_seed(12345)
random.seed(12345)
np.random.seed(12345)
def get_train_test_splits(df: pd.DataFrame, metadata: pd.DataFrame, n: int) -> (pd.DataFrame, pd.DataFrame, np.ndarray):
train_df = df[metadata.trainval]
test_df = df[~metadata.trainval]
test_labels = pd.DataFrame(metadata[~metadata.trainval].anomaly)
return train_df.tail(n), test_df.head(n), test_labels[:n]
class Mixin(ABC):
def test_score(self):
print("-" * 80)
logger.info("test_score\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
test_ts = TimeSeries.from_pd(self.test_df)
score_ts = self.model.get_anomaly_score(test_ts)
scores = score_ts.to_pd().values.flatten()
min_score, max_score, sum_score = min(scores), max(scores), sum(scores)
logger.info(f"scores look like: {scores[:10]}")
logger.info(f"min score = {min_score}")
logger.info(f"max score = {max_score}")
logger.info(f"sum score = {sum_score}")
def test_save_load(self):
print("-" * 80)
logger.info("test_save_load\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
multi = train_ts.dim > 1
path = join(rootdir, "tmp", "default", "anom", "multi" if multi else "uni")
self.model.save(dirname=path)
loaded_model = DefaultDetector.load(dirname=path)
test_ts = TimeSeries.from_pd(self.test_df)
scores = self.model.get_anomaly_score(test_ts)
scores_np = scores.to_pd().values.flatten()
loaded_model_scores = loaded_model.get_anomaly_score(test_ts)
loaded_model_scores = loaded_model_scores.to_pd().values.flatten()
self.assertEqual(len(scores_np), len(loaded_model_scores))
alarms = self.model.post_rule(scores)
loaded_model_alarms = loaded_model.post_rule(scores)
self.assertSequenceEqual(list(alarms), list(loaded_model_alarms))
def test_plot(self):
try:
import plotly
print("-" * 80)
logger.info("test_plot\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
multi = train_ts.dim > 1
savedir = join(rootdir, "tmp", "default", "anom")
os.makedirs(savedir, exist_ok=True)
path = join(savedir, ("multi" if multi else "uni") + ".png")
test_ts = TimeSeries.from_pd(self.test_df)
fig = self.model.plot_anomaly_plotly(
time_series=test_ts, time_series_prev=train_ts, plot_time_series_prev=True
)
plot_anoms_plotly(fig, TimeSeries.from_pd(self.test_labels))
try:
import kaleido
fig.write_image(path, engine="kaleido")
except ImportError:
logger.info("kaleido not installed, not trying to save image")
except ImportError:
logger.info("plotly not installed, skipping test case")
class TestUnivariate(unittest.TestCase, Mixin):
def run_init(self):
set_random_seeds()
self.model = DefaultDetector(
DefaultDetectorConfig(granularity="1h", threshold=AggregateAlarms(alm_threshold=1.5))
)
# Time series with anomalies in both train split and test split
df = pd.read_csv(join(rootdir, "data", "synthetic_anomaly", "horizontal_spike_anomaly.csv"))
df.timestamp = pd.to_datetime(df.timestamp, unit="s")
df = df.set_index("timestamp")
# Get training & testing splits
self.train_df = df.iloc[: -len(df) // 2, :1]
self.test_df = df.iloc[-len(df) // 2 :, :1]
self.test_labels = df.iloc[-len(df) // 2 :, -1:]
class TestMultivariate(unittest.TestCase, Mixin):
def run_init(self):
set_random_seeds()
self.model = DefaultDetector(DefaultDetectorConfig(threshold=AggregateAlarms(alm_threshold=2)))
self.dataset = MSL(rootdir=join(rootdir, "data", "smap"))
df, metadata = self.dataset[0]
self.train_df, self.test_df, self.test_labels = get_train_test_splits(df, metadata, 2000)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", stream=sys.stdout, level=logging.INFO
)
unittest.main()
|
hal_fuzz/hal_fuzz/handlers/debug.py | diagprov/hal-fuzz | 117 | 12793997 | <gh_stars>100-1000
from unicorn.arm_const import *
def stop(uc):
print_context(uc)
input("...")
def print_context(uc):
print("==== State ====")
r0 = uc.reg_read(UC_ARM_REG_R0)
r1 = uc.reg_read(UC_ARM_REG_R1)
r2 = uc.reg_read(UC_ARM_REG_R2)
r3 = uc.reg_read(UC_ARM_REG_R3)
r4 = uc.reg_read(UC_ARM_REG_R4)
r5 = uc.reg_read(UC_ARM_REG_R5)
r7 = uc.reg_read(UC_ARM_REG_R7)
sp = uc.reg_read(UC_ARM_REG_SP)
pc = uc.reg_read(UC_ARM_REG_PC)
print("r0: 0x{:x}\nr1: 0x{:x}\nr2: 0x{:x}\nr3: 0x{:x}\nr4: 0x{:x}\nr5: 0x{:x}\nr7: 0x{:x}\npc: 0x{:x}\nsp: 0x{:x}".format(r0, r1, r2, r3, r4, r5, r7, pc, sp))
def breakpoint(uc):
import ipdb; ipdb.set_trace()
|
python/interpret-core/interpret/ext/glassbox/__init__.py | prateekiiest/interpret | 2,674 | 12794003 | <reponame>prateekiiest/interpret<gh_stars>1000+
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
import sys
from interpret.ext.extension_utils import load_class_extensions
from interpret.ext.extension import GLASSBOX_EXTENSION_KEY, _is_valid_glassbox_explainer
load_class_extensions(
sys.modules[__name__], GLASSBOX_EXTENSION_KEY, _is_valid_glassbox_explainer
)
|
awacs/devicefarm.py | alanjjenkins/awacs | 358 | 12794005 | <filename>awacs/devicefarm.py
# Copyright (c) 2012-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Device Farm"
prefix = "devicefarm"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CreateDevicePool = Action("CreateDevicePool")
CreateInstanceProfile = Action("CreateInstanceProfile")
CreateNetworkProfile = Action("CreateNetworkProfile")
CreateProject = Action("CreateProject")
CreateRemoteAccessSession = Action("CreateRemoteAccessSession")
CreateTestGridProject = Action("CreateTestGridProject")
CreateTestGridUrl = Action("CreateTestGridUrl")
CreateUpload = Action("CreateUpload")
CreateVPCEConfiguration = Action("CreateVPCEConfiguration")
DeleteDevicePool = Action("DeleteDevicePool")
DeleteInstanceProfile = Action("DeleteInstanceProfile")
DeleteNetworkProfile = Action("DeleteNetworkProfile")
DeleteProject = Action("DeleteProject")
DeleteRemoteAccessSession = Action("DeleteRemoteAccessSession")
DeleteRun = Action("DeleteRun")
DeleteTestGridProject = Action("DeleteTestGridProject")
DeleteUpload = Action("DeleteUpload")
DeleteVPCEConfiguration = Action("DeleteVPCEConfiguration")
GetAccountSettings = Action("GetAccountSettings")
GetDevice = Action("GetDevice")
GetDeviceInstance = Action("GetDeviceInstance")
GetDevicePool = Action("GetDevicePool")
GetDevicePoolCompatibility = Action("GetDevicePoolCompatibility")
GetInstanceProfile = Action("GetInstanceProfile")
GetJob = Action("GetJob")
GetNetworkProfile = Action("GetNetworkProfile")
GetOfferingStatus = Action("GetOfferingStatus")
GetProject = Action("GetProject")
GetRemoteAccessSession = Action("GetRemoteAccessSession")
GetRun = Action("GetRun")
GetSuite = Action("GetSuite")
GetTest = Action("GetTest")
GetTestGridProject = Action("GetTestGridProject")
GetTestGridSession = Action("GetTestGridSession")
GetUpload = Action("GetUpload")
GetVPCEConfiguration = Action("GetVPCEConfiguration")
InstallToRemoteAccessSession = Action("InstallToRemoteAccessSession")
ListArtifacts = Action("ListArtifacts")
ListDeviceInstances = Action("ListDeviceInstances")
ListDevicePools = Action("ListDevicePools")
ListDevices = Action("ListDevices")
ListInstanceProfiles = Action("ListInstanceProfiles")
ListJobs = Action("ListJobs")
ListNetworkProfiles = Action("ListNetworkProfiles")
ListOfferingPromotions = Action("ListOfferingPromotions")
ListOfferingTransactions = Action("ListOfferingTransactions")
ListOfferings = Action("ListOfferings")
ListProjects = Action("ListProjects")
ListRemoteAccessSessions = Action("ListRemoteAccessSessions")
ListRuns = Action("ListRuns")
ListSamples = Action("ListSamples")
ListSuites = Action("ListSuites")
ListTagsForResource = Action("ListTagsForResource")
ListTestGridProjects = Action("ListTestGridProjects")
ListTestGridSessionActions = Action("ListTestGridSessionActions")
ListTestGridSessionArtifacts = Action("ListTestGridSessionArtifacts")
ListTestGridSessions = Action("ListTestGridSessions")
ListTests = Action("ListTests")
ListUniqueProblems = Action("ListUniqueProblems")
ListUploads = Action("ListUploads")
ListVPCEConfigurations = Action("ListVPCEConfigurations")
PurchaseOffering = Action("PurchaseOffering")
RenewOffering = Action("RenewOffering")
ScheduleRun = Action("ScheduleRun")
StopJob = Action("StopJob")
StopRemoteAccessSession = Action("StopRemoteAccessSession")
StopRun = Action("StopRun")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateDeviceInstance = Action("UpdateDeviceInstance")
UpdateDevicePool = Action("UpdateDevicePool")
UpdateInstanceProfile = Action("UpdateInstanceProfile")
UpdateNetworkProfile = Action("UpdateNetworkProfile")
UpdateProject = Action("UpdateProject")
UpdateTestGridProject = Action("UpdateTestGridProject")
UpdateUpload = Action("UpdateUpload")
UpdateVPCEConfiguration = Action("UpdateVPCEConfiguration")
|
tools/tube.py | fsanges/glTools | 165 | 12794006 | <filename>tools/tube.py<gh_stars>100-1000
import maya.cmds as mc
import glTools.tools.controlBuilder
import glTools.utils.attach
import glTools.utils.base
import glTools.utils.attribute
import glTools.utils.component
import glTools.utils.stringUtils
def buildProfile(radius=1,spans=8):
'''
Create tube profile curve (circle)
@param radius: Profile radius
@type radius: float
@param spans: Number of profile curve spans
@type spans: int
'''
crv = mc.circle(c=[0,0,0],nr=[0,0,1],sw=360,r=radius,s=spans,d=3,ch=False)
return crv
def buildOffsetCurve(crv):
'''
'''
prefix = glTools.utils.stringUtils.stripSuffix(crv)
offsetCrvShape = mc.createNode('nurbsCurve',n=prefix+'_offsetCrvShape')
offsetCrv = mc.listRelatives(offsetCrvShape,p=True,pa=True)[0]
mc.connectAttr(crv+'.worldSpace[0]',offsetCrvShape+'.create',f=True)
return offsetCrv
def buildSubCurveDetach(crv):
'''
'''
# Get Prefix
prefix = glTools.utils.stringUtils.stripSuffix(crv)
# Prep Curve
mc.rebuildCurve(crv,ch=False,rpo=True,rt=0,end=1,kr=0,kcp=1,kep=1,kt=0,s=0,d=3)
mc.delete(crv,ch=True)
# Detach Curve
detach = mc.detachCurve(crv,p=(0.001,0.999),k=(0,1,0),rpo=False)
detachCrv = detach[1]
detachNode = detach[-1]
mc.delete(detach[0],detach[2])
# Connect Detach Min/Max
mc.addAttr(subCrv,ln='min',min=0,max=0.999,dv=0,k=True)
mc.addAttr(subCrv,ln='max',min=0.001,max=1,dv=1,k=True)
mc.addAttr(subCrv,ln='offset',min=-1,max=1,dv=1,k=True)
minAdd = mc.createNode('addDoubleLinear',n=prefix+'_minAdd_addDoubleLinear')
maxAdd = mc.createNode('addDoubleLinear',n=prefix+'_maxAdd_addDoubleLinear')
minMaxClamp = mc.createNode('clamp',n=prefix+'_minMax_clamp')
mc.connectAttr(subCrv+'.min',minAdd+'.input1',f=True)
mc.connectAttr(subCrv+'.offset',minAdd+'.input2',f=True)
mc.connectAttr(subCrv+'.max',maxAdd+'.input1',f=True)
mc.connectAttr(subCrv+'.offset',maxAdd+'.input2',f=True)
mc.connectAttr(minAdd+'.output',minMaxClamp+'.inputR',f=True)
mc.connectAttr(maxAdd+'.output',minMaxClamp+'.inputB',f=True)
mc.setAttr(minMaxClamp+'.min',0,0,0.0001)
mc.setAttr(minMaxClamp+'.max',0.9999,0,0)
mc.connectAttr(minMaxClamp+'.outputR',detachNode+'.parameter[0]',f=True)
mc.connectAttr(minMaxClamp+'.outputB',detachNode+'.parameter[1]',f=True)
# Return Result
return detachCrv
def buildCurveRig(crv):
'''
'''
# Get Prefix
prefix = glTools.utils.stringUtils.stripSuffix(crv)
# Build Joints
pts = glTools.utils.base.getPointArray(crv)
jnts = []
mc.select(cl=True)
for i in range(len(pts)):
ind = glTools.utils.stringUtils.alphaIndex(i)
jnt = mc.joint(p=pts[i],n=prefix+'_fk'+ind+'_jnt')
mc.joint()
mc.select(jnt)
# Orient Joints
# Build FK
# Build Offset
def buildSubCurve(crv):
'''
'''
# Build Sub Curve
prefix = glTools.utils.stringUtils.stripSuffix(crv)
subCrvShape = mc.createNode('nurbsCurve',n=prefix+'_subCrvShape')
subCrv = mc.listRelatives(subCrvShape,p=True,pa=True)[0]
subCrvNode = mc.createNode('subCurve',n=prefix+'_subCurve')
# Connect Sub Curve
mc.connectAttr(crv+'.worldSpace[0]',subCrvNode+'.inputCurve',f=True)
mc.connectAttr(subCrvNode+'.outputCurve',subCrvShape+'.create',f=True)
# Connect Sub Curve Min/Max
mc.addAttr(subCrv,ln='min',min=0,max=0.999,dv=0,k=True)
mc.addAttr(subCrv,ln='max',min=0.001,max=1,dv=1,k=True)
mc.connectAttr(subCrv+'.min',subCrvNode+'.minValue',f=True)
mc.connectAttr(subCrv+'.max',subCrvNode+'.maxValue',f=True)
mc.setAttr(subCrvNode+'.relative',1)
# Return Result
return subCrv
def resetCV(cvs):
'''
'''
# Check CVs
if not cvs: return None
cvList = mc.filterExpand(cvs,ex=True,sm=28)
# Reset CVs
for cv in cvList:
crv = mc.ls(cv,o=True)[0]
i = glTools.utils.component.index(cv)
mc.setAttr(crv+'.controlPoints['+i+'].xValue',0)
mc.setAttr(crv+'.controlPoints['+i+'].yValue',0)
mc.setAttr(crv+'.controlPoints['+i+'].zValue',0)
def attachCurve(base,crv,cleanup=True):
'''
'''
# Get Spans
spans = mc.getAttr(crv+'.spans')
mc.setAttr(base+'.spans',spans)
# Match Shape
shapeOrig = base+'ShapeOrig'
mc.setAttr(shapeOrig+'.intermediateObject',0)
mc.rebuildCurve(shapeOrig,ch=True,rpo=True,rt=0,end=1,kr=0,kcp=0,kep=1,kt=0,s=spans,d=3)
bs = mc.blendShape(crv,shapeOrig)[0]
mc.setAttr(bs+'.w[0]',1)
# Delete Orig
if cleanup:
mc.delete(shapeOrig,ch=True)
mc.delete(crv)
# Restore Intermediate Shape
mc.setAttr(shapeOrig+'.intermediateObject',1)
# Return Result
return
def attachToCurveParam(ctrl,crv):
'''
'''
grp = mc.listRelatives(ctrl,p=True,pa=True)[0]
param = mc.getAttr(ctrl+'.param')
glTools.utils.attach.attachToCurve(crv,grp,param,uAttr='param')
mc.connectAttr(ctrl+'.param',grp+'.param',f=True)
def addDropoffControls(locs,prefix):
'''
'''
ctrlBuilder = glTools.tools.controlBuilder.ControlBuilder()
for i in range(len(locs)):
pre = prefix+glTools.utils.stringUtils.stripSuffix(locs[i])
wire = mc.listConnections(locs[i]+'.param',s=False,d=True)[0]
param = mc.getAttr(locs[i]+'.param')
ind = glTools.utils.attribute.getConnectionIndex(locs[i]+'.param')
ctrl = ctrlBuilder.create('sphere',pre+'_ctrl')
grp = glTools.utils.base.group(ctrl)
mc.connectAttr(locs[i]+'.worldPosition[0]',grp+'.translate',f=True)
mc.addAttr(ctrl,ln='param',min=0,max=1,dv=param,k=True)
mc.addAttr(ctrl,ln='bulge',min=-1,dv=0,k=True)
mc.connectAttr(ctrl+'.param',locs[i]+'.param['+str(ind)+']',f=True)
mc.connectAttr(ctrl+'.bulge',wire+'.wireLocatorEnvelope['+str(ind)+']',f=True)
def buildTube( crv
profile=None,
addCage=False,
prefix=None)
'''
'''
# Nurbs Tube
mc.extrude(
ch = True,
rn = False,
po = 0,
et = 2,
ucp = 1,
fpt = 1,
upn = 1,
rotation =0,
scale = 1,
rsp = 1
)
# Polygon Tube
mc.extrude(
ch = True,
rn = False,
po = 1,
et = 2,
ucp = 1,
fpt = 1,
upn = 1,
rotation =0,
scale =1,
rsp = 1
)
|
program_synthesis/karel/scripts/eval_refinement.py | kavigupta/program_synthesis | 123 | 12794018 | import collections
import cPickle as pickle
import glob
import itertools
import json
import operator
import os
import re
import sys
from program_synthesis.karel.dataset import dataset
from program_synthesis.karel.dataset import executor
from program_synthesis.karel.dataset.karel_runtime import KarelRuntime
from program_synthesis.karel.models import karel_model
from program_synthesis.common.tools.saver import restore_args
BASE_DIR = ""
with open(BASE_DIR + "text2code-models/karel-sgd-cl1-lr1-lds100k-ldr0.5/report-dev-00100100.jsonl") as f:
baseline_report = []
print(f.readline())
for line in f:
baseline_report.append(json.loads(line))
class Args(object):
model_dir = BASE_DIR + 'program_synthesis-models/karel-lgrl-ref-m123-sgd-cl1-lr0.1-lds100k-ldr0.5'
step = 250100
args = Args()
restore_args(args)
args.word_vocab = ',,/data/karel/word.vocab'
m = karel_model.KarelLGRLRefineModel(args)
batch_processor = m.batch_processor(for_eval=True)
m.args.max_beam_trees = 64
m.args.max_eval_trials = 64
i = 0
result = []
while i < len(baseline_report):
batch = []
while len(batch) < 32 and i < len(baseline_report):
if baseline_report[i]['code']['info']['trees_checked'] == 1:
i += 1
continue
e = dataset.KarelExample.from_dict(baseline_report[i]['example'])
ref_code_sequence = baseline_report[i]['code']['info']['candidates'][0]
e.ref_example = dataset.KarelExample(idx=None, guid=None, code_sequence=ref_code_sequence, input_tests=e.input_tests, tests=e.tests)
batch.append(e)
i += 1
print("Starting batch (%d)..." % i)
res = m.inference(batch_processor(batch))
for example, infer in zip(batch, res):
result.append((example, infer))
# if i > 100:
# break
print(len(result), len(baseline_report))
the_executor = executor.KarelExecutor()
stats = {'total': len(result), 'fixed': 0}
refinement_results = []
for example, infer in result:
if not infer.code_sequence:
continue
correct = True
for test in example.input_tests + example.tests:
try:
log = the_executor.execute(infer.code_sequence, None, test['input'])
if log.result != test['output']:
correct = False
break
except (executor.ExecutorRuntimeException, executor.ExecutorSyntaxException) as e:
correct = False
break
refinement_results.append(correct)
if correct:
stats['fixed'] += 1
print(float(stats['fixed']) / stats['total'], stats['fixed'], stats['total'])
|
第5章/program/Chapter_5_xpath_special.py | kingname/SourceCodeOfBook | 274 | 12794048 | import lxml.html
html1 = '''
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title></title>
</head>
<body>
<div id="test-1-k">需要的内容1</div>
<div id="test-2-k">需要的内容2</div>
<div id="testfault-k">需要的内容3</div>
<div id="useless">这是我不需要的内容</div>
</body>
</html>
'''
# selector = lxml.html.fromstring(html1)
# content = selector.xpath('//div[ends-with(@id, "-k")]/text()')
# for each in content:
# print(each)
html2 = '''
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title></title>
</head>
<body>
<div id="abc-key-x">需要的内容1</div>
<div id="123-key-000">需要的内容2</div>
<div id="haha-key">需要的内容3</div>
<div id="useless">这是我不需要的内容</div>
</body>
</html>
'''
# selector = lxml.html.fromstring(html2)
# content = selector.xpath('//div[contains(@id, "-key")]/text()')
# for each in content:
# print(each)
html3 = '''
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title></title>
</head>
<body>
<div id="test3">
我左青龙,
<span id="tiger">
右白虎,
<ul>上朱雀,
<li>下玄武。</li>
</ul>
老牛在当中,
</span>
龙头在胸口。
</div>
</body>
</html>
'''
#如果使用一般的办法,就会出现获取到的数据不完整的情况
selector = lxml.html.fromstring(html3)
# content_1 = selector.xpath('//div[@id="test3"]/text()')
# for each in content_1:
# print(each)
# 使用string(.)就可以把数据获取完整
data = selector.xpath('//div[@id="test3"]')[0]
info = data.xpath('string(.)')
print(info)
|
examples/stockquotes-old/phase1/stockmarket.py | brubbel/Pyro4 | 638 | 12794051 | <filename>examples/stockquotes-old/phase1/stockmarket.py
import random
class StockMarket(object):
def __init__(self, marketname, symbols):
self.name = marketname
self.symbolmeans = {}
for symbol in symbols:
self.symbolmeans[symbol] = random.uniform(20, 200)
self.aggregators = []
def generate(self):
quotes = {}
for symbol, mean in self.symbolmeans.items():
if random.random() < 0.2:
quotes[symbol] = round(random.normalvariate(mean, 20), 2)
for aggregator in self.aggregators:
aggregator.quotes(self.name, quotes)
def listener(self, aggregator):
self.aggregators.append(aggregator)
def symbols(self):
return self.symbolmeans.keys()
|
env/lib/python3.6/site-packages/dal_queryset_sequence/tests/test_views.py | anthowen/duplify | 1,368 | 12794099 | <gh_stars>1000+
import json
from dal import autocomplete
from django import test
from django.contrib.auth.models import Group
class Select2QuerySetSequenceViewTestCase(test.TestCase):
def setUp(self):
self.expected = {
'pagination': {
'more': False
},
'results': []
}
@classmethod
def setUpClass(cls):
for i in range(0, 3):
Group.objects.create(name='ViewTestCase%s' % i)
cls.request = test.RequestFactory().get('?q=foo')
super(Select2QuerySetSequenceViewTestCase, cls).setUpClass()
def get_view(self, **kwargs):
view = autocomplete.Select2QuerySetSequenceView(
queryset=autocomplete.QuerySetSequence(
Group.objects.all(),
),
paginate_by=2,
**kwargs
)
view.request = self.request
return view
def get_view_response(self, **view_kwargs):
return self.get_view(**view_kwargs).dispatch(self.request)
def get_view_response_json(self, **view_kwargs):
return json.loads(self.get_view_response(**view_kwargs).content)
def test_get(self):
result = self.get_view_response_json()
assert self.expected == result
def test_get_with_create_field(self):
self.expected['results'].append({
'text': 'Create "foo"',
'id': 'foo',
'create_id': True
})
result = self.get_view_response_json(create_field='name')
assert self.expected == result
|
third_party/blink/renderer/bindings/scripts/idl_types.py | Ron423c/chromium | 575 | 12794143 | <filename>third_party/blink/renderer/bindings/scripts/idl_types.py
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""IDL type handling.
Classes:
IdlTypeBase
IdlType
IdlUnionType
IdlArrayOrSequenceType
IdlSequenceType
IdlFrozenArrayType
IdlNullableType
IdlAnnotatedType
IdlTypes are picklable because we store them in interfaces_info.
"""
from collections import defaultdict
################################################################################
# IDL types
################################################################################
INTEGER_TYPES = frozenset([
# http://www.w3.org/TR/WebIDL/#dfn-integer-type
'byte',
'octet',
'short',
'unsigned short',
# int and unsigned are not IDL types
'long',
'unsigned long',
'long long',
'unsigned long long',
])
NUMERIC_TYPES = (
INTEGER_TYPES | frozenset([
# http://www.w3.org/TR/WebIDL/#dfn-numeric-type
'float',
'unrestricted float',
'double',
'unrestricted double',
]))
# http://www.w3.org/TR/WebIDL/#dfn-primitive-type
PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES)
BASIC_TYPES = (
PRIMITIVE_TYPES | frozenset([
# Built-in, non-composite, non-object data types
# http://heycam.github.io/webidl/#idl-types
'DOMString',
'ByteString',
'USVString',
# http://heycam.github.io/webidl/#idl-types
'void',
]))
TYPE_NAMES = {
# http://heycam.github.io/webidl/#dfn-type-name
'any': 'Any',
'boolean': 'Boolean',
'byte': 'Byte',
'octet': 'Octet',
'short': 'Short',
'unsigned short': 'UnsignedShort',
'long': 'Long',
'unsigned long': 'UnsignedLong',
'long long': 'LongLong',
'unsigned long long': 'UnsignedLongLong',
'float': 'Float',
'unrestricted float': 'UnrestrictedFloat',
'double': 'Double',
'unrestricted double': 'UnrestrictedDouble',
'DOMString': 'String',
'ByteString': 'ByteString',
'USVString': 'USVString',
'object': 'Object',
}
STRING_TYPES = frozenset([
# http://heycam.github.io/webidl/#es-interface-call (step 10.11)
# (Interface object [[Call]] method's string types.)
'String',
'ByteString',
'USVString',
])
EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([
'AllowShared',
'Clamp',
'EnforceRange',
'StringContext',
'TreatNullAs',
])
################################################################################
# Inheritance
################################################################################
ancestors = defaultdict(list) # interface_name -> ancestors
def inherits_interface(interface_name, ancestor_name):
return (interface_name == ancestor_name
or ancestor_name in ancestors[interface_name])
def set_ancestors(new_ancestors):
ancestors.update(new_ancestors)
class IdlTypeBase(object):
"""Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType
and IdlNullableType.
"""
def __str__(self):
raise NotImplementedError('__str__() should be defined in subclasses')
def __getattr__(self, name):
# Default undefined attributes to None (analogous to Jinja variables).
# This allows us to not define default properties in the base class, and
# allows us to relay __getattr__ in IdlNullableType to the inner type.
return None
def resolve_typedefs(self, typedefs):
raise NotImplementedError(
'resolve_typedefs should be defined in subclasses')
def idl_types(self):
"""A generator which yields IdlTypes which are referenced from |self|,
including itself."""
yield self
################################################################################
# IdlType
################################################################################
class IdlType(IdlTypeBase):
# FIXME: incorporate Nullable, etc.
# to support types like short?[] vs. short[]?, instead of treating these
# as orthogonal properties (via flags).
callback_functions = {}
callback_interfaces = set()
dictionaries = set()
enums = {} # name -> values
def __init__(self, base_type, is_unrestricted=False):
super(IdlType, self).__init__()
if is_unrestricted:
self.base_type = 'unrestricted %s' % base_type
else:
self.base_type = base_type
def __str__(self):
return self.base_type
def __getstate__(self):
return {
'base_type': self.base_type,
}
def __setstate__(self, state):
self.base_type = state['base_type']
@property
def is_basic_type(self):
return self.base_type in BASIC_TYPES
@property
def is_callback_function(self): # pylint: disable=C0103
return self.base_type in IdlType.callback_functions
@property
def is_custom_callback_function(self):
entry = IdlType.callback_functions.get(self.base_type)
callback_function = entry.get('callback_function')
if not callback_function:
return False
return 'Custom' in callback_function.extended_attributes
@property
def is_callback_interface(self):
return self.base_type in IdlType.callback_interfaces
@property
def is_dictionary(self):
return self.base_type in IdlType.dictionaries
@property
def is_enum(self):
# FIXME: add an IdlEnumType class and a resolve_enums step
# at end of IdlDefinitions constructor
return self.name in IdlType.enums
@property
def enum_values(self):
return IdlType.enums.get(self.name)
@property
def enum_type(self):
return self.name if self.is_enum else None
@property
def is_integer_type(self):
return self.base_type in INTEGER_TYPES
@property
def is_void(self):
return self.base_type == 'void'
@property
def is_numeric_type(self):
return self.base_type in NUMERIC_TYPES
@property
def is_primitive_type(self):
return self.base_type in PRIMITIVE_TYPES
@property
def is_interface_type(self):
# Anything that is not another type is an interface type.
# http://www.w3.org/TR/WebIDL/#idl-types
# http://www.w3.org/TR/WebIDL/#idl-interface
# In C++ these are RefPtr types.
return not (self.is_basic_type or self.is_callback_function
or self.is_dictionary or self.is_enum or self.name == 'Any'
or self.name == 'Object' or self.name == 'Promise'
) # Promise will be basic in future
@property
def is_string_type(self):
return self.name in STRING_TYPES
@property
def name(self):
"""Return type name
http://heycam.github.io/webidl/#dfn-type-name
"""
base_type = self.base_type
return TYPE_NAMES.get(base_type, base_type)
@classmethod
def set_callback_functions(cls, new_callback_functions):
cls.callback_functions.update(new_callback_functions)
@classmethod
def set_callback_interfaces(cls, new_callback_interfaces):
cls.callback_interfaces.update(new_callback_interfaces)
@classmethod
def set_dictionaries(cls, new_dictionaries):
cls.dictionaries.update(new_dictionaries)
@classmethod
def set_enums(cls, new_enums):
cls.enums.update(new_enums)
def resolve_typedefs(self, typedefs):
base_type = self.base_type
if base_type in typedefs:
resolved_type = typedefs[base_type]
if resolved_type.base_type in typedefs:
raise ValueError("We can't typedef a typedef'ed type.")
# For the case that the resolved type contains other typedef'ed
# type(s).
return resolved_type.resolve_typedefs(typedefs)
return self
################################################################################
# IdlUnionType
################################################################################
class IdlUnionType(IdlTypeBase):
# http://heycam.github.io/webidl/#idl-union
# IdlUnionType has __hash__() and __eq__() methods because they are stored
# in sets.
def __init__(self, member_types):
super(IdlUnionType, self).__init__()
self.member_types = member_types
def __str__(self):
return '(' + ' or '.join(
str(member_type) for member_type in self.member_types) + ')'
def __hash__(self):
return hash(self.name)
def __eq__(self, rhs):
return self.name == rhs.name
def __getstate__(self):
return {
'member_types': self.member_types,
}
def __setstate__(self, state):
self.member_types = state['member_types']
@property
def flattened_member_types(self):
"""Returns the set of the union's flattened member types.
https://heycam.github.io/webidl/#dfn-flattened-union-member-types
"""
# We cannot use a set directly because each member is an
# IdlTypeBase-derived class, and comparing two objects of the
# same type is not the same as comparing their names.
# In other words:
# x = IdlType('ByteString')
# y = IdlType('ByteString')
# x == y # False
# x.name == y.name # True
# |flattened_members|'s keys are type names, the values are type
# |objects|.
# We assume we can use two IDL objects of the same type interchangeably.
flattened_members = {}
for member in self.member_types:
if member.is_nullable:
member = member.inner_type
if member.is_union_type:
for inner_member in member.flattened_member_types:
flattened_members[inner_member.name] = inner_member
else:
flattened_members[member.name] = member
return set(flattened_members.values())
@property
def number_of_nullable_member_types(self):
"""Returns the union's number of nullable types.
http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types
"""
count = 0
for member in self.member_types:
if member.is_nullable:
count += 1
member = member.inner_type
if member.is_union_type:
count += member.number_of_nullable_member_types
return count
@property
def is_union_type(self):
return True
def single_matching_member_type(self, predicate):
matching_types = list(filter(predicate, self.flattened_member_types))
if len(matching_types) > 1:
raise ValueError('%s is ambiguous.' % self.name)
return matching_types[0] if matching_types else None
@property
def string_member_type(self):
return self.single_matching_member_type(
lambda member_type: (member_type.is_string_type or member_type.is_enum)
)
@property
def numeric_member_type(self):
return self.single_matching_member_type(
lambda member_type: member_type.is_numeric_type)
@property
def boolean_member_type(self):
return self.single_matching_member_type(
lambda member_type: member_type.base_type == 'boolean')
@property
def sequence_member_type(self):
return self.single_matching_member_type(
lambda member_type: member_type.is_sequence_type)
@property
def dictionary_member_type(self):
return self.single_matching_member_type(
lambda member_type: member_type.is_dictionary)
@property
def as_union_type(self):
# Note: Use this to "look through" a possible IdlNullableType wrapper.
return self
@property
def name(self):
"""Return type name (or inner type name if nullable)
http://heycam.github.io/webidl/#dfn-type-name
"""
return 'Or'.join(member_type.name for member_type in self.member_types)
def resolve_typedefs(self, typedefs):
self.member_types = [
member_type.resolve_typedefs(typedefs)
for member_type in self.member_types
]
return self
def idl_types(self):
yield self
for member_type in self.member_types:
for idl_type in member_type.idl_types():
yield idl_type
################################################################################
# IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType
################################################################################
# TODO(bashi): Rename this like "IdlArrayTypeBase" or something.
class IdlArrayOrSequenceType(IdlTypeBase):
"""Base class for array-like types."""
def __init__(self, element_type):
super(IdlArrayOrSequenceType, self).__init__()
self.element_type = element_type
def __getstate__(self):
return {
'element_type': self.element_type,
}
def __setstate__(self, state):
self.element_type = state['element_type']
def resolve_typedefs(self, typedefs):
self.element_type = self.element_type.resolve_typedefs(typedefs)
return self
@property
def is_array_or_sequence_type(self):
return True
@property
def is_sequence_type(self):
return False
@property
def is_frozen_array(self):
return False
@property
def enum_values(self):
return self.element_type.enum_values
@property
def enum_type(self):
return self.element_type.enum_type
def idl_types(self):
yield self
for idl_type in self.element_type.idl_types():
yield idl_type
class IdlSequenceType(IdlArrayOrSequenceType):
def __init__(self, element_type):
super(IdlSequenceType, self).__init__(element_type)
def __str__(self):
return 'sequence<%s>' % self.element_type
@property
def name(self):
return self.element_type.name + 'Sequence'
@property
def is_sequence_type(self):
return True
class IdlFrozenArrayType(IdlArrayOrSequenceType):
def __init__(self, element_type):
super(IdlFrozenArrayType, self).__init__(element_type)
def __str__(self):
return 'FrozenArray<%s>' % self.element_type
@property
def name(self):
return self.element_type.name + 'Array'
@property
def is_frozen_array(self):
return True
################################################################################
# IdlRecordType
################################################################################
class IdlRecordType(IdlTypeBase):
def __init__(self, key_type, value_type):
super(IdlRecordType, self).__init__()
self.key_type = key_type
self.value_type = value_type
def __str__(self):
return 'record<%s, %s>' % (self.key_type, self.value_type)
def __getstate__(self):
return {
'key_type': self.key_type,
'value_type': self.value_type,
}
def __setstate__(self, state):
self.key_type = state['key_type']
self.value_type = state['value_type']
def idl_types(self):
yield self
for idl_type in self.key_type.idl_types():
yield idl_type
for idl_type in self.value_type.idl_types():
yield idl_type
def resolve_typedefs(self, typedefs):
self.key_type = self.key_type.resolve_typedefs(typedefs)
self.value_type = self.value_type.resolve_typedefs(typedefs)
return self
@property
def is_record_type(self):
return True
@property
def name(self):
return self.key_type.name + self.value_type.name + 'Record'
################################################################################
# IdlNullableType
################################################################################
# https://heycam.github.io/webidl/#idl-nullable-type
class IdlNullableType(IdlTypeBase):
def __init__(self, inner_type):
super(IdlNullableType, self).__init__()
if inner_type.name == 'Any':
raise ValueError('Inner type of nullable type must not be any.')
if inner_type.name == 'Promise':
raise ValueError(
'Inner type of nullable type must not be a promise.')
if inner_type.is_nullable:
raise ValueError(
'Inner type of nullable type must not be a nullable type.')
if inner_type.is_union_type:
if inner_type.number_of_nullable_member_types > 0:
raise ValueError(
'Inner type of nullable type must not be a union type that '
'itself includes a nullable type.')
if any(member.is_dictionary
for member in inner_type.flattened_member_types):
raise ValueError(
'Inner type of nullable type must not be a union type that '
'has a dictionary type as its members.')
self.inner_type = inner_type
def __str__(self):
# FIXME: Dictionary::ConversionContext::setConversionType can't
# handle the '?' in nullable types (passes nullability separately).
# Update that function to handle nullability from the type name,
# simplifying its signature.
# return str(self.inner_type) + '?'
return str(self.inner_type)
def __getattr__(self, name):
return getattr(self.inner_type, name)
def __getstate__(self):
return {
'inner_type': self.inner_type,
}
def __setstate__(self, state):
self.inner_type = state['inner_type']
@property
def is_nullable(self):
return True
@property
def name(self):
return self.inner_type.name + 'OrNull'
@property
def enum_values(self):
# Nullable enums are handled by preprending a None value to the list of
# enum values. This None value is converted to nullptr on the C++ side,
# which matches the JavaScript 'null' in the enum parsing code.
inner_values = self.inner_type.enum_values
if inner_values:
return [None] + inner_values
return None
def resolve_typedefs(self, typedefs):
self.inner_type = self.inner_type.resolve_typedefs(typedefs)
return self
def idl_types(self):
yield self
for idl_type in self.inner_type.idl_types():
yield idl_type
################################################################################
# IdlAnnotatedType
################################################################################
class IdlAnnotatedType(IdlTypeBase):
"""IdlAnnoatedType represents an IDL type with extended attributes.
[Clamp], [EnforceRange], [StringContext], and [TreatNullAs] are applicable
to types.
https://heycam.github.io/webidl/#idl-annotated-types
"""
def __init__(self, inner_type, extended_attributes):
super(IdlAnnotatedType, self).__init__()
self.inner_type = inner_type
self.extended_attributes = extended_attributes
if any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES
for key in extended_attributes):
raise ValueError(
'Extended attributes not applicable to types: %s' % self)
if ('StringContext' in extended_attributes
and inner_type.base_type not in ['DOMString', 'USVString']):
raise ValueError(
'StringContext is only applicable to string types.')
def __str__(self):
annotation = ', '.join(
(key + ('' if val is None else '=' + val))
for key, val in self.extended_attributes.items())
return '[%s] %s' % (annotation, str(self.inner_type))
def __getattr__(self, name):
return getattr(self.inner_type, name)
def __getstate__(self):
return {
'inner_type': self.inner_type,
'extended_attributes': self.extended_attributes,
}
def __setstate__(self, state):
self.inner_type = state['inner_type']
self.extended_attributes = state['extended_attributes']
@property
def is_annotated_type(self):
return True
@property
def has_string_context(self):
return 'StringContext' in self.extended_attributes
@property
def name(self):
annotation = ''.join(
(key + ('' if val is None else val))
for key, val in sorted(self.extended_attributes.items()))
return self.inner_type.name + annotation
def resolve_typedefs(self, typedefs):
self.inner_type = self.inner_type.resolve_typedefs(typedefs)
return self
def idl_types(self):
yield self
yield self.inner_type
|
src/rpdk/core/jsonutils/utils.py | zjinmei/cloudformation-cli | 200 | 12794162 | import hashlib
import json
import logging
from collections.abc import Mapping, Sequence
from typing import Any, List, Tuple
from nested_lookup import nested_lookup
from ordered_set import OrderedSet
from .pointer import fragment_decode, fragment_encode
LOG = logging.getLogger(__name__)
NON_MERGABLE_KEYS = ("uniqueItems", "insertionOrder")
TYPE = "type"
REF = "$ref"
UNPACK_SEQUENCE_IDENTIFIER = "*"
class FlatteningError(Exception):
pass
def item_hash(
item,
): # assumption -> input is only json comparable type (dict/list/scalar)
"""MD5 hash for an item (Dictionary/Iterable/Scalar)"""
dhash = hashlib.md5() # nosec
if isinstance(item, dict):
item = {k: item_hash(v) for k, v in item.items()}
if isinstance(item, list):
item = [item_hash(i) for i in item].sort()
encoded = json.dumps(item, sort_keys=True).encode()
dhash.update(encoded)
return dhash.hexdigest()
def to_set(value: Any) -> OrderedSet:
return (
OrderedSet(value)
if isinstance(value, (list, OrderedSet))
else OrderedSet([value])
)
class ConstraintError(FlatteningError, ValueError):
def __init__(self, message, path, *args):
self.path = fragment_encode(path)
message = message.format(*args, path=self.path)
super().__init__(message)
class BaseRefPlaceholder:
"""A sentinel object representing a reference inside the base document."""
def __repr__(self):
"""Readable representation for debugging.
>>> repr(BaseRefPlaceholder())
'<BASE>'
"""
return "<BASE>"
#: The sentinel instance representing a reference inside the base document.
BASE = BaseRefPlaceholder()
def rewrite_ref(ref):
"""Rewrite a reference to be inside of the base document. A relative JSON
pointer is returned (in URI fragment identifier representation).
If the reference is already inside the base document (:const:`BASE`), the parts
are simply encoded into a pointer.
If the reference is outside of the base document, a unique pointer inside
the base document is made by namespacing the reference under the remote base
name inside the remote section.
>>> rewrite_ref((BASE, "foo", "bar"))
'#/foo/bar'
>>> rewrite_ref((BASE,))
'#'
>>> rewrite_ref(("remote", "foo", "bar"))
'#/remote/remote/foo/bar'
>>> rewrite_ref(("remote",))
'#/remote/remote'
"""
base, *parts = ref
if base is not BASE:
parts = ["remote", base] + parts
return fragment_encode(parts)
def traverse(document, path_parts):
"""Traverse the document according to the reference.
Since the document is presumed to be the reference's base, the base is
discarded. There is no validation that the reference is valid.
:raises ValueError, LookupError: the reference is invalid for this document
>>> traverse({"foo": {"bar": [42]}}, tuple())
({'foo': {'bar': [42]}}, (), None)
>>> traverse({"foo": {"bar": [42]}}, ["foo"])
({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}})
>>> traverse({"foo": {"bar": [42]}}, ("foo", "bar"))
([42], ('foo', 'bar'), {'bar': [42]})
>>> traverse({"foo": {"bar": [42]}}, ("foo", "bar", "0"))
(42, ('foo', 'bar', 0), [42])
>>> traverse({}, ["foo"])
Traceback (most recent call last):
...
KeyError: 'foo'
>>> traverse([], ["foo"])
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'foo'
>>> traverse([], [0])
Traceback (most recent call last):
...
IndexError: list index out of range
"""
parent = None
path = []
for part in path_parts:
if isinstance(document, Sequence):
part = int(part)
parent = document
document = document[part]
path.append(part)
return document, tuple(path), parent
def _resolve_ref(sub_schema: dict, definitions: dict, last_step: bool = False):
# resolve $ref
ref = nested_lookup(REF, sub_schema) # should be safe (always single value)
# bc sub_schema is always per paranet property
# (taken from definitions)
if last_step and REF not in sub_schema: # dont traverse deeper than requested
# check if $ref is used directly ->
# means that we need to check definition
# otherwise it's an array and return subschema
return sub_schema
if ref:
# [0] should be a single $ref in subschema on the top level
# [-1] $ref must follow #/definitions/object
sub_schema = definitions[fragment_decode(ref[0])[-1]]
# resolve properties
properties = nested_lookup("properties", sub_schema)
if properties:
sub_schema = properties[0]
return sub_schema
# pylint: disable=C0301
def traverse_raw_schema(schema: dict, path: tuple):
"""Traverse the raw json schema resolving $ref
:raises TypeError: either schema is not of type dict
:raises ConstraintError: the schema tries to override "type" or "$ref"
>>> traverse_raw_schema({"properties": {"bar": [42]}}, tuple())
{'bar': [42]}
>>> traverse_raw_schema({"properties": {"bar": [42]}}, ("bar",))
[42]
>>> traverse_raw_schema({"definitions": {"bar": {"type": "boolean"}},"properties": {"bar": {"$ref": "#/definitions/bar"}}}, ("bar",))
{'type': 'boolean'}
>>> traverse_raw_schema({"definitions":{"b":[1],"f":{"properties":{"b":{"$ref":"#/definitions/b"}}}},"properties":{"f":{"$ref":"#/definitions/f"}}},("f", "b")) # noqa: B950
[1]
>>> traverse_raw_schema({}, ("foo"))
{}
>>> traverse_raw_schema([], ["foo"])
Traceback (most recent call last):
...
TypeError: Schema must be a dictionary
"""
if not isinstance(schema, Mapping):
raise TypeError("Schema must be a dictionary")
try:
properties = schema["properties"]
definitions = schema.get("definitions", {})
sub_properties = properties
last_step = (
len(path) - 1
) # get amount of steps to prevent deeper traversal than requested
for step in path:
sub_properties = _resolve_ref(
sub_properties[step],
definitions,
last_step=path.index(step) == last_step,
)
return sub_properties
except KeyError as e:
LOG.debug("Malformed Schema or incorrect path provided\n%s\n%s", path, e)
return {}
def traverse_path_for_sequence_members(
document: dict, path_parts: Sequence, path: list = None
) -> Tuple[List[object], List[tuple]]:
"""Traverse the paths for all sequence members in the document according to the reference.
Since the document is presumed to be the reference's base, the base is
discarded. There is no validation that the reference is valid.
Differing from traverse, this returns a list of documents and a list of resolved paths.
:parameter document: document to traverse (dict or list)
:parameter path_parts: document paths to traverse
:parameter path: traversed path so far
:raises ValueError, LookupError: the reference is invalid for this document
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, tuple())
([{'foo': {'bar': [42, 43, 44]}}], [()])
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ["foo"])
([{'bar': [42, 43, 44]}], [('foo',)])
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ("foo", "bar"))
([[42, 43, 44]], [('foo', 'bar')])
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ("foo", "bar", "*"))
([42, 43, 44], [('foo', 'bar', 0), ('foo', 'bar', 1), ('foo', 'bar', 2)])
>>> traverse_path_for_sequence_members({"foo": {"bar": [{"baz": 1, "bin": 1}, {"baz": 2, "bin": 2}]}}, ("foo", "bar", "*"))
([{'baz': 1, 'bin': 1}, {'baz': 2, 'bin': 2}], [('foo', 'bar', 0), ('foo', 'bar', 1)])
>>> traverse_path_for_sequence_members({"foo": {"bar": [{"baz": 1, "bin": 1}, {"baz": 2, "bin": 2}]}}, ("foo", "bar", "*", "baz"))
([1, 2], [('foo', 'bar', 0, 'baz'), ('foo', 'bar', 1, 'baz')])
>>> traverse_path_for_sequence_members({}, ["foo"])
Traceback (most recent call last):
...
KeyError: 'foo'
>>> traverse_path_for_sequence_members([], ["foo"])
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'foo'
>>> traverse_path_for_sequence_members([], [0])
Traceback (most recent call last):
...
IndexError: list index out of range
"""
if path is None:
path = []
if not path_parts:
return [document], [tuple(path)]
path_parts = list(path_parts)
if not isinstance(document, Sequence):
return _handle_non_sequence_for_traverse(document, path_parts, path)
return _handle_sequence_for_traverse(document, path_parts, path)
def _handle_non_sequence_for_traverse(
current_document: dict, current_path_parts: list, current_path: list
) -> Tuple[List[object], List[tuple]]:
"""
Handling a non-sequence member for `traverse_path_for_sequence_members` is like the loop block in `traverse`:
The next path part is the first part in the list of path parts.
The new document is obtained from the current document using the new path part as the key.
The next path part is added to the traversed path.
The traversal continues by recursively calling `traverse_path_for_sequence_members`
"""
part_to_handle = current_path_parts.pop(0)
current_document = current_document[part_to_handle]
current_path.append(part_to_handle)
return traverse_path_for_sequence_members(
current_document, current_path_parts, current_path
)
def _handle_sequence_for_traverse(
current_document: Sequence, current_path_parts: list, current_path: list
) -> Tuple[List[object], List[tuple]]:
"""
Check the new path part for the unpack sequence identifier (e.g. '*'), otherwise traverse index and continue:
The new document is obtained from the current document (a sequence) using the new path part as the index.
The next path part is added to the traversed path
"""
sequence_part = current_path_parts.pop(0)
if sequence_part == UNPACK_SEQUENCE_IDENTIFIER:
return _handle_unpack_sequence_for_traverse(
current_document, current_path_parts, current_path
)
# otherwise, sequence part should be a valid index
current_sequence_part = int(sequence_part)
current_document = current_document[current_sequence_part]
current_path.append(current_sequence_part)
return [current_document], [tuple(current_path)]
def _handle_unpack_sequence_for_traverse(
current_document: Sequence, current_path_parts: list, current_path: list
) -> Tuple[List[object], List[tuple]]:
"""
When unpacking a sequence, we need to include multiple paths and multiple documents, one for each sequence member.
For each sequence member:
Append the traversed paths w/ the sequence index, and get the new document.
The new document is obtained by traversing the current document using the sequence index.
The new document is appended to the list of new documents.
For each new document:
The remaining document is traversed using the remaining path parts.
The list of traversed documents and traversed paths are returned.
"""
documents = []
resolved_paths = []
new_documents = []
new_paths = []
for sequence_index in range(len(current_document)):
new_paths.append(current_path.copy() + [sequence_index])
new_document = traverse_path_for_sequence_members(
current_document, [sequence_index] + current_path_parts, current_path.copy()
)[0]
new_documents.extend(new_document)
for i in range(len(new_documents)): # pylint: disable=consider-using-enumerate
new_document = new_documents[i]
newer_documents, newer_paths = traverse_path_for_sequence_members(
new_document, current_path_parts, new_paths[i]
)
documents.extend(newer_documents)
resolved_paths.extend(newer_paths)
return documents, resolved_paths
def schema_merge(target, src, path): # noqa: C901 # pylint: disable=R0912
"""Merges the src schema into the target schema in place.
If there are duplicate keys, src will overwrite target.
:raises TypeError: either schema is not of type dict
:raises ConstraintError: the schema tries to override "type" or "$ref"
>>> schema_merge({}, {}, ())
{}
>>> schema_merge({'foo': 'a'}, {}, ())
{'foo': 'a'}
>>> schema_merge({}, {'foo': 'a'}, ())
{'foo': 'a'}
>>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ())
{'foo': 'b'}
>>> schema_merge({'required': 'a'}, {'required': 'b'}, ())
{'required': ['a', 'b']}
>>> a, b = {'$ref': 'a'}, {'foo': 'b'}
>>> schema_merge(a, b, ('foo',))
{'$ref': 'a', 'foo': 'b'}
>>> a, b = {'$ref': 'a'}, {'type': 'b'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b'])}
>>> a, b = {'$ref': 'a'}, {'$ref': 'b'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b'])}
>>> a, b = {'$ref': 'a'}, {'type': ['b', 'c']}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'type': ['a', 'b']}, {'$ref': 'c'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}}
>>> schema_merge(a, b, ('foo',))
{'Foo': {'type': OrderedSet(['a', 'b'])}}
>>> schema_merge({'type': 'a'}, {'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE
{'type': OrderedSet(['a', 'b'])}
>>> schema_merge({'type': 'string'}, {'type': 'integer'}, ())
{'type': OrderedSet(['string', 'integer'])}
"""
if not (isinstance(target, Mapping) and isinstance(src, Mapping)):
raise TypeError("Both schemas must be dictionaries")
for key, src_schema in src.items():
try:
if key in (
REF,
TYPE,
): # $ref and type are treated similarly and unified
target_schema = target.get(key) or target.get(TYPE) or target[REF]
else:
target_schema = target[key] # carry over existing properties
except KeyError:
target[key] = src_schema
else:
next_path = path + (key,)
try:
target[key] = schema_merge(target_schema, src_schema, next_path)
except TypeError:
if key in (TYPE, REF): # combining multiple $ref and types
src_set = to_set(src_schema)
try:
target[TYPE] = to_set(
target[TYPE]
) # casting to ordered set as lib
# implicitly converts strings to sets
target[TYPE] |= src_set
except (TypeError, KeyError):
target_set = to_set(target_schema)
target[TYPE] = target_set | src_set
try:
# check if there are conflicting $ref and type
# at the same sub schema. Conflicting $ref could only
# happen on combiners because method merges two json
# objects without losing any previous info:
# e.g. "oneOf": [{"$ref": "..#1.."},{"$ref": "..#2.."}] ->
# { "ref": "..#1..", "type": [{},{}] }
target.pop(REF)
except KeyError:
pass
elif key == "required":
target[key] = sorted(set(target_schema) | set(src_schema))
else:
if key in NON_MERGABLE_KEYS and target_schema != src_schema:
msg = (
"Object at path '{path}' declared multiple values "
"for '{}': found '{}' and '{}'"
)
# pylint: disable=W0707
raise ConstraintError(msg, path, key, target_schema, src_schema)
target[key] = src_schema
return target
|
src/nsupdate/utils/_tests/test_mail.py | mirzazulfan/nsupdate.info | 774 | 12794179 | <reponame>mirzazulfan/nsupdate.info
"""
Tests for mail module.
"""
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from ..mail import translate_for_user
class TestTransUser(object):
def test(self):
User = get_user_model()
user = User.objects.get(username='test')
user.profile.language = 'de'
msgs = [_('German'), _('English')]
msgs = translate_for_user(user, *msgs)
assert msgs == ['Deutsch', 'Englisch']
|
readthedocs/config/utils.py | tkoyama010/readthedocs.org | 4,054 | 12794215 | """Shared functions for the config module."""
def to_dict(value):
"""Recursively transform a class from `config.models` to a dict."""
if hasattr(value, 'as_dict'):
return value.as_dict()
if isinstance(value, list):
return [
to_dict(v)
for v in value
]
if isinstance(value, dict):
return {
k: to_dict(v)
for k, v in value.items()
}
return value
def list_to_dict(list_):
"""Transform a list to a dictionary with its indices as keys."""
dict_ = {
str(i): element
for i, element in enumerate(list_)
}
return dict_
|
tensorflow_graphics/projects/nasa/lib/utils.py | Liang813/graphics | 2,759 | 12794237 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General helper functions."""
from os import path
import numpy as np
from skimage import measure
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.cvxnet.lib.libmise import mise
from tensorflow_graphics.projects.nasa.lib import datasets
from tensorflow_graphics.projects.nasa.lib import models
import tensorflow_probability as tfp
from tqdm import trange
import trimesh
tf.disable_eager_execution()
tfd = tfp.distributions
def define_flags():
"""Define command line flags."""
flags = tf.app.flags
# Dataset Parameters
flags.DEFINE_enum("dataset", "amass",
list(k for k in datasets.dataset_dict.keys()),
"Name of the dataset.")
flags.DEFINE_string("data_dir", None, "Directory to load data from.")
flags.mark_flag_as_required("data_dir")
flags.DEFINE_integer("sample_bbox", 1024, "Number of bbox samples.")
flags.DEFINE_integer("sample_surf", 1024, "Number of surface samples.")
flags.DEFINE_integer("batch_size", 12, "Batch size.")
flags.DEFINE_integer("motion", 0, "Index of the motion for evaluation.")
flags.DEFINE_integer("subject", 0, "Index of the subject for training.")
# Model Parameters
flags.DEFINE_enum("model", "nasa", list(k for k in models.model_dict.keys()),
"Name of the model.")
flags.DEFINE_integer("n_parts", 24, "Number of parts.")
flags.DEFINE_integer("total_dim", 960,
"Dimension of the latent vector (in total).")
flags.DEFINE_bool("shared_decoder", False, "Whether to use shared decoder.")
flags.DEFINE_float("soft_blend", 5., "The constant to blend parts.")
flags.DEFINE_bool("projection", True,
"Whether to use projected shape features.")
flags.DEFINE_float("level_set", 0.5, "The value of the level_set.")
flags.DEFINE_integer("n_dims", 3, "The dimension of the query points.")
# Training Parameters
flags.DEFINE_float("lr", 1e-4, "Learning rate")
flags.DEFINE_string("train_dir", None, "Training directory.")
flags.mark_flag_as_required("train_dir")
flags.DEFINE_integer("max_steps", 200000, "Number of optimization steps.")
flags.DEFINE_integer("save_every", 5000,
"Number of steps to save checkpoint.")
flags.DEFINE_integer("summary_every", 500,
"Number of steps to save checkpoint.")
flags.DEFINE_float("label_w", 0.5, "Weight of labed vertices loss.")
flags.DEFINE_float("minimal_w", 0.05, "Weight of minimal loss.")
flags.DEFINE_bool("use_vert", True,
"Whether to use vertices on the mesh for training.")
flags.DEFINE_bool("use_joint", True,
"Whether to use joint-based transformation.")
flags.DEFINE_integer("sample_vert", 2048, "Number of vertex samples.")
# Evalulation Parameters
flags.DEFINE_bool("gen_mesh_only", False, "Whether to generate meshes only.")
# Tracking Parameters
flags.DEFINE_float("theta_lr", 5e-4, "Learning rate")
flags.DEFINE_integer("max_steps_per_frame", 1792,
"Number of optimization steps for tracking each frame.")
flags.DEFINE_enum("gradient_type", "reparam", ["vanilla", "reparam"],
"Type of gradient to use in theta optimization.")
flags.DEFINE_integer("sample_track_vert", 1024,
"Number of vertex samples for tracking each frame.")
flags.DEFINE_integer("n_noisy_samples", 8,
"Number of noisy samples per vertex")
flags.DEFINE_float("bandwidth", 1e-2, "Bandwidth of the gaussian noises.")
flags.DEFINE_bool(
"left_trans", False,
"Whether to use left side transformation (True) or right side (False).")
flags.DEFINE_string("joint_data", None, "Path to load joint data.")
flags.DEFINE_float("glue_w", 20., "Weight of length constraint loss.")
flags.DEFINE_float("trans_range", 1., "The range of allowed translations.")
def gen_mesh(sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
idx=0):
"""Generating meshes given a trained NASA model."""
scale = 1.1 # Scale of the padded bbox regarding the tight one.
level_set = hparams.level_set
latent_val = sess.run(latent, feed_dict)
mesh_extractor = mise.MISE(32, 3, level_set)
points = mesh_extractor.query()
gt_verts = batch_val["vert"].reshape([-1, 3])
gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0)
gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5
gt_scale = (gt_bbox[1] - gt_bbox[0]).max()
while points.shape[0] != 0:
orig_points = points
points = points.astype(np.float32)
points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution -
0.5) * scale
points = points * gt_scale + gt_center
n_points = points.shape[1]
values = []
for i in range(0, n_points,
100000): # Add this to prevent OOM due to points overload.
feed_dict[latent_holder] = latent_val
feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1)
value = sess.run(occ[:, idx], feed_dict)
values.append(value)
values = np.concatenate(values, axis=1)
values = values[0, :, 0].astype(np.float64)
mesh_extractor.update(orig_points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
try:
value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6)
verts, faces, normals, unused_var = measure.marching_cubes_lewiner(
value_grid, min(level_set, value_grid.max()))
del normals
verts -= 1
verts /= np.array([
value_grid.shape[0] - 3, value_grid.shape[1] - 3,
value_grid.shape[2] - 3
],
dtype=np.float32)
verts = scale * (verts - 0.5)
verts = verts * gt_scale + gt_center
faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1)
mesh = trimesh.Trimesh(vertices=verts, faces=faces)
return mesh
except: # pylint: disable=bare-except
return None
def save_mesh(sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
pth="meshes"):
"""Generate and save meshes to disk given a trained NASA model."""
name = batch_val["name"][0].decode("utf-8")
subject, motion, frame = amass_name_helper(name)
pth = path.join(hparams.train_dir, pth, frame)
if not tf.io.gfile.isdir(pth):
tf.io.gfile.makedirs(pth)
start = hparams.n_parts
for i in range(start, hparams.n_parts + 1):
mesh_model = gen_mesh(
sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
idx=i)
mesh_name = "full_pred.obj"
if mesh_model is not None:
with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout:
mesh_model.export(fout, file_type="obj")
return subject, motion, frame, mesh_model
def save_pointcloud(data, hparams, pth="pointcloud"):
"""Save pointcloud to disk."""
name = data["name"][0].decode("utf-8")
unused_subject, unused_motion, frame = amass_name_helper(name)
pth = path.join(hparams.train_dir, pth, frame)
if not tf.io.gfile.isdir(pth):
tf.io.gfile.makedirs(pth)
mesh_name = "pointcloud.obj"
with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout:
pointcloud = data["vert"].reshape([-1, 3])
for v in pointcloud:
fout.write("v {0} {1} {2}\n".format(*v.tolist()))
def amass_name_helper(name):
name, frame = name.split("-")
subject = name[:5]
motion = name[6:]
return subject, motion, frame
def make_summary_feed_dict(
iou_hook,
iou,
best_hook,
best_iou,
):
feed_dict = {}
feed_dict[iou_hook] = iou
feed_dict[best_hook] = best_iou
return feed_dict
def parse_global_step(ckpt):
basename = path.basename(ckpt)
return int(basename.split("-")[-1])
def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ,
point, label, hparams):
"""Compute IoU."""
iou = 0.
eps = 1e-9
latent_val = sess.run(latent, feed_dict)
n_points = point.shape[2]
preds = []
for start in range(0, n_points, 100000):
feed_dict[point_holder] = point[:, :, start:start + 100000]
feed_dict[latent_holder] = latent_val
pred = sess.run(occ, feed_dict)
preds.append(pred)
pred = np.concatenate(preds, axis=2)
pred = (pred >= hparams.level_set).astype(np.float32)
label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1)
iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps)
return iou
def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans,
joints, hparams):
"""Compute the prior term as a glue loss."""
n_dims = hparams.n_dims
# Invert the transformation
r_inv = inv_transforms[..., :n_dims, :n_dims]
t_inv = inv_transforms[..., :n_dims, -1:]
r = tf.transpose(r_inv, [0, 2, 1])
t = -tf.matmul(r, t_inv)
transforms = tf.concat(
[tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2)
transforms = tf.matmul(transforms, inv_first_frame_trans)
# Compute transformations of father joints and apply it to vectors from frame0
father_transforms = tf.reduce_sum(
tf.expand_dims(transforms, axis=1) *
connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]),
axis=0)
end_pts_homo = tf.expand_dims(
tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1)
end_pts_transformed = tf.matmul(father_transforms, end_pts_homo)
end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims]
# Compute vectors in current configuration
pred_links = tf.reshape(joints, [hparams.n_parts, n_dims])
# Compute distance between links and transformed vectors
return tf.reduce_sum(tf.square(pred_links - end_pts_transformed))
def vanilla_theta_gradient(model_fn, batch_holder, hparams):
"""A vanilla gradient estimator for the pose, theta."""
latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,
"gen_mesh")
if hparams.sample_vert > 0:
points = batch_holder["point"]
weights = batch_holder["weight"]
n_vert = tf.shape(points)[2]
sample_indices = tf.random.uniform([1, 1, hparams.sample_vert],
minval=0,
maxval=n_vert,
dtype=tf.int32)
points = tf.gather(points, sample_indices, axis=2, batch_dims=2)
weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2)
batch_holder["point"] = points
batch_holder["weight"] = weights
unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, "gen_mesh")
return latent_holder, latent, occ_eval, tf.reduce_mean(
tf.square(occ - hparams.level_set))
def reparam_theta_gradient(model_fn, batch_holder, hparams):
"""A gradient estimaor for the pose, theta, using the reparam trick."""
sigma = hparams.bandwidth
n_samples = hparams.n_noisy_samples
latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,
"gen_mesh")
if hparams.sample_vert > 0:
points = batch_holder["point"]
weights = batch_holder["weight"]
n_vert = tf.shape(points)[2]
sample_indices = tf.random.uniform([1, 1, hparams.sample_vert],
minval=0,
maxval=n_vert,
dtype=tf.int32)
points = tf.gather(points, sample_indices, axis=2, batch_dims=2)
weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2)
batch_holder["point"] = points
batch_holder["weight"] = weights
dist = tfd.Normal(loc=0., scale=sigma)
n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert
noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims))
unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None,
"gen_mesh")
occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1])
occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3)
return latent_holder, latent, occ_eval, tf.reduce_mean(
tf.square(occ - hparams.level_set))
def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss,
sess, k, hparams):
"""Optimize the pose, theta, during tracking."""
sess.run(reset_op)
loss_val = 0
glue_val = 0
with trange(hparams.max_steps_per_frame) as t:
for unused_i in t:
loss_val, unused_var, rec_val, glue_val = sess.run(
[loss, train_op, rec_loss, glue_loss], feed_dict)
t.set_description("Frame_{0} {1:.4f}|{2:.4f}".format(
k, rec_val, glue_val))
return loss_val, glue_val
|
examples/exception.py | pawelmhm/scrapy-playwright | 155 | 12794242 | import logging
from pathlib import Path
from scrapy import Spider, Request
from scrapy.crawler import CrawlerProcess
from scrapy_playwright.page import PageCoroutine
class HandleTimeoutMiddleware:
def process_exception(self, request, exception, spider):
logging.info("Caught exception: %s", exception.__class__)
return Request(
url="https://httpbin.org/get",
meta={
"playwright": True,
"playwright_page_coroutines": [
PageCoroutine(
"screenshot", path=Path(__file__).parent / "recovered.png", full_page=True
),
],
},
)
class HandleExceptionSpider(Spider):
"""
Handle exceptions in the Playwright downloader, such as TimeoutError
"""
name = "awesome"
custom_settings = {
"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT": 1000,
"DOWNLOADER_MIDDLEWARES": {
HandleTimeoutMiddleware: 100,
},
}
def start_requests(self):
yield Request(
url="https://httpbin.org/delay/300",
meta={"playwright": True},
)
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
process = CrawlerProcess(
settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"DOWNLOAD_HANDLERS": {
"https": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
# "http": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
},
"RETRY_TIMES": 0,
}
)
process.crawl(HandleExceptionSpider)
process.start()
|
tests/opening_test.py | karlch/vimiv | 268 | 12794257 | <reponame>karlch/vimiv
# vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Test the opening of different file-types with vimiv."""
import os
from unittest import main
from vimiv_testcase import VimivTestCase
class OpeningTest(VimivTestCase):
"""Open with different file-types Test."""
@classmethod
def setUpClass(cls):
cls.init_test(cls)
def test_opening_with_directory(self):
"""Opening with a directory."""
expected_dir = os.path.abspath("vimiv/testimages")
self.init_test(["vimiv/testimages"])
self.assertEqual(expected_dir, os.getcwd())
expected_files = ["animation", "arch-logo.png", "arch_001.jpg",
"directory", "symlink_to_image", "vimiv.bmp",
"vimiv.svg", "vimiv.tiff"]
self.assertEqual(self.vimiv["library"].files, expected_files)
self.assertTrue(self.vimiv["library"].is_focus())
self.assertTrue(self.vimiv["library"].grid.is_visible())
def test_opening_with_image(self):
"""Open with an image."""
expected_dir = os.path.abspath("vimiv/testimages")
self.init_test(["vimiv/testimages/arch_001.jpg"])
# Check moving and image population
self.assertEqual(expected_dir, os.getcwd())
expected_images = ["arch_001.jpg", "symlink_to_image", "vimiv.bmp",
"vimiv.svg", "vimiv.tiff", "arch-logo.png"]
for image in [os.path.abspath(im) for im in expected_images]:
self.assertIn(image, self.vimiv.get_paths())
def test_opening_with_symlink(self):
"""Open with a symlink to an image."""
expected_dir = os.path.abspath("vimiv/testimages")
self.init_test(["vimiv/testimages/symlink_to_image"])
# Check moving and image population
self.assertEqual(expected_dir, os.getcwd())
expected_images = ["symlink_to_image", "vimiv.bmp", "vimiv.svg",
"vimiv.tiff", "arch-logo.png", "arch_001.jpg"]
expected_images = [os.path.abspath(image) for image in expected_images]
for image in [os.path.abspath(im) for im in expected_images]:
self.assertIn(image, self.vimiv.get_paths())
def test_opening_with_whitespace(self):
"""Open an image with whitespace and symlink in directory."""
expected_dir = os.path.abspath("vimiv/testimages/directory/")
self.init_test(["vimiv/testimages/directory/symlink with spaces .jpg"])
# Check moving and image population
self.assertEqual(expected_dir, os.getcwd())
expected_images = ["symlink with spaces .jpg"]
expected_images = [os.path.abspath(image) for image in expected_images]
self.assertEqual(expected_images, self.vimiv.get_paths())
def test_opening_recursively(self):
"""Open all images recursively."""
# Need to backup because we init in the wrong directory here
working_dir = self.working_directory
os.chdir("vimiv/testimages")
self.init_test(["."], to_set=["recursive"], values=["true"])
self.assertEqual(8, len(self.vimiv.get_paths()))
self.settings.reset()
self.working_directory = working_dir
def tearDown(self):
self.vimiv.quit()
os.chdir(self.working_directory)
if __name__ == "__main__":
main()
|
docs/examples/robot_motion_1.py | codecademy-engineering/gpiozero | 743 | 12794290 | from gpiozero import Robot, Motor, MotionSensor
from signal import pause
robot = Robot(left=Motor(4, 14), right=Motor(17, 18))
pir = MotionSensor(5)
pir.when_motion = robot.forward
pir.when_no_motion = robot.stop
pause()
|
doge/filter/__init__.py | zhu327/doge | 163 | 12794336 | from typing import Any
from gevent.monkey import patch_thread # type: ignore
from doge.common.doge import Executer, Request, Response
from doge.common.utils import import_string
patch_thread()
class BaseFilter(Executer):
def __init__(self, context: Any, _next: Executer):
self.next = _next
def execute(self, req: Request) -> Response:
return self.next.execute(req)
class FilterChain:
def __init__(self, context: Any):
self.context = context
def then(self, executer: Executer) -> Executer:
filters = self.context.url.get_param("filters", [])
for cls in reversed([import_string(f) for f in filters]):
executer = cls(self.context, executer)
return executer
|
DPGAnalysis/Skims/python/DoubleMuon_cfg.py | ckamtsikis/cmssw | 852 | 12794360 | <reponame>ckamtsikis/cmssw<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:/afs/cern.ch/cms/CAF/CMSCOMM/COMM_GLOBAL/CRUZET3/CMSSW_2_1_2/src/DPGAnalysis/Skims/python/reco_50908_210_CRZT210_V1P.root')
)
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.5 $'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/DPGAnalysis/Skims/python/DoubleMuon_cfg.py,v $'),
annotation = cms.untracked.string('CRUZET4 DoubleMuon skim')
)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'CRZT210_V1::All'
process.prefer("GlobalTag")
process.load("Configuration.StandardSequences.ReconstructionCosmics_cff")
process.doubleMuonFilter = cms.EDFilter("TrackCountFilter",
src = cms.InputTag('cosmicMuonsBarrelOnly'),
minNumber = cms.uint32(2)
)
process.doubleMuonPath = cms.Path(process.doubleMuonFilter)
process.out = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('doubleMuonPath')),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('doubleMuonPath')),
fileName = cms.untracked.string('doubleMuon.root')
)
process.this_is_the_end = cms.EndPath(process.out)
|
tests/spot/sub_account/test_sub_account_api_get_ip_restriction.py | Banging12/binance-connector-python | 512 | 12794389 | import responses
import pytest
from binance.spot import Spot as Client
from tests.util import mock_http_response
from tests.util import random_str
from binance.lib.utils import encoded_string
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
email = "<EMAIL>"
subAccountApiKey = random_str()
complete_params = {"email": email, "subAccountApiKey": subAccountApiKey}
parameterized_test_params = [
({"email": None, "subAccountApiKey": None}),
({"email": "", "subAccountApiKey": subAccountApiKey}),
({"email": email, "subAccountApiKey": ""}),
]
client = Client(key, secret)
@pytest.mark.parametrize("params", parameterized_test_params)
def test_sub_account_api_get_ip_restriction_without_missing_param(params):
"""Tests the API endpoint to get IP Restriction for a sub-account API key without subAccountApiKey"""
client.sub_account_api_get_ip_restriction.when.called_with(**params).should.throw(
ParameterRequiredError
)
@mock_http_response(
responses.GET,
"/sapi/v1/sub-account/subAccountApi/ipRestriction\\?"
+ encoded_string(complete_params),
mock_item,
200,
)
def test_sub_account_api_get_ip_restriction():
"""Tests the API endpoint to get IP Restriction for a sub-account API key"""
client.sub_account_api_get_ip_restriction(**complete_params).should.equal(mock_item)
|
zella-graphics/animation/main.py | whitmans-max/python-examples | 140 | 12794406 | #!/usr/bin/env python3
# date: 2020.05.29
# It use normal loop to animate point and checkMouse to close program on click
from graphics import * # PEP8: `import *` is not preferred
import random
import time
# --- main ---
win = GraphWin("My Window",500,500)
win.setBackground(color_rgb(0,0,0))
pt = Point(250, 250)
pt.setOutline(color_rgb(255,255,0))
pt.draw(win)
while True:
if win.checkMouse():
break
dx = random.randint(-10, 10)
dy = random.randint(-10, 10)
pt.move(dx, dy)
time.sleep(0.1)
win.close()
|
parsifal/apps/activities/migrations/0003_auto_20210906_0158.py | ShivamPytho/parsifal | 342 | 12794438 | <reponame>ShivamPytho/parsifal
# Generated by Django 3.2.6 on 2021-09-06 01:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reviews', '0035_auto_20210829_0005'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('activities', '0002_alter_activity_id'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='activity_type',
field=models.CharField(choices=[('F', 'Follow'), ('C', 'Comment'), ('S', 'Star')], max_length=1, verbose_name='type'),
),
migrations.AlterField(
model_name='activity',
name='content',
field=models.CharField(blank=True, max_length=500, verbose_name='content'),
),
migrations.AlterField(
model_name='activity',
name='date',
field=models.DateTimeField(auto_now_add=True, verbose_name='date'),
),
migrations.AlterField(
model_name='activity',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='from user'),
),
migrations.AlterField(
model_name='activity',
name='review',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='reviews.review', verbose_name='review'),
),
migrations.AlterField(
model_name='activity',
name='to_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='to user'),
),
]
|
labs/03_neural_recsys/movielens_paramsearch_results.py | soufiomario/labs-Deep-learning | 1,398 | 12794501 | <filename>labs/03_neural_recsys/movielens_paramsearch_results.py
import pandas as pd
from pathlib import Path
import json
def load_results_df(folder='results'):
folder = Path(folder)
results_dicts = []
for p in sorted(folder.glob('**/results.json')):
with p.open('r') as f:
results_dicts.append(json.load(f))
return pd.DataFrame.from_dict(results_dicts)
if __name__ == "__main__":
df = load_results_df().sort_values(by=['test_mae'], ascending=True)
print(df.head(5))
|
bin/ipynb2rst.py | albapa/QUIP | 229 | 12794512 | #!/usr/bin/env python3
import sys
import os
import glob
if len(sys.argv[1:]) == 0:
dirs = [os.getcwd()]
else:
dirs = sys.argv[1:]
for dir in dirs:
for notebook in glob.glob(os.path.join(dir, '*.ipynb')):
cmd = 'ipython nbconvert --to rst {0}'.format(notebook)
print(cmd)
os.system(cmd)
|
tasks/__init__.py | vladcorneci/golden-gate | 262 | 12794530 | # Copyright 2017-2020 Fitbit, Inc
# SPDX-License-Identifier: Apache-2.0
"""
Invoke configuration for Golden Gate
"""
# First check that we are running in a Python >= 3.5 environment
from __future__ import print_function
import sys
if not sys.version_info.major == 3 and sys.version_info.minor >= 5:
print(
"""You are using 'invoke' in a Python 2.x environment, but Python >= 3.5 is required.
You have probably not activated the 'gg' conda environment, please check the 'Getting Started'
guide for more details on how to setup your environment""")
sys.exit(1)
# Imports
import os
import subprocess
from invoke import Collection, Config, task
from . import android
from . import apple
from . import pylon
from . import native
from . import clean
from . import wasm
from . import doc
from . import docker
# Assuming you haven't moved the default location of '.git', the .git/ folder (even for submodules)
# will be at the root of the repo. Thus, find the folder .git/ is within and assume that's the root
GIT_DIR = subprocess.check_output("git rev-parse --show-toplevel",
shell=True).strip().decode("utf-8")
ROOT_DIR = GIT_DIR
# Initialize constants that are common among all platforms/products
def initialize_constants(cfg):
cfg.C = {}
# We can't access the paths variable by using dot notation, since there is a paths() function
# on a Config object. We much use Dictionary syntax.
# http://docs.pyinvoke.org/en/0.15.0/api/config.html#module-invoke.config
cfg.C.ROOT_DIR = ROOT_DIR
cfg.C.BIN_DIR = os.path.join(cfg.C.ROOT_DIR, "bin")
cfg.C.BUILD_ROOT_DIR = os.path.join(cfg.C.ROOT_DIR, "xp/build")
cfg.C.BUILD_DIR = os.path.join(cfg.C.ROOT_DIR, "xp/build/cmake")
cfg.C.BUILD_DIR_NATIVE = os.path.join(cfg.C.BUILD_DIR, "native")
cfg.C.PLATFORM_DIR = os.path.join(cfg.C.ROOT_DIR, "platform")
cfg.C.APPS_DIR = os.path.join(cfg.C.BUILD_DIR_NATIVE, "apps")
cfg.C.APPLE_BUILD_TEMP_DIR = os.path.join(cfg.C.PLATFORM_DIR, "apple/output")
cfg.C.DOC_DIR = os.path.join(cfg.C.ROOT_DIR, "docs")
config = Config(project_location=ROOT_DIR)
initialize_constants(config)
# Add collections
ns = Collection()
ns.add_collection(android)
ns.add_collection(apple)
ns.add_collection(pylon)
ns.add_collection(native)
ns.add_collection(clean)
ns.add_collection(wasm)
ns.add_collection(doc)
ns.add_collection(docker)
# After collections are set up, set the config.
ns.configure(config)
ns.configure(android.config)
ns.configure(apple.config)
ns.configure(pylon.config)
|
blackstone/rules/citation_rules.py | goro53467/Blackstone | 541 | 12794535 | CITATION_PATTERNS = [
{
"label": "GENERIC_CASE_CITATION",
"pattern": [
{"IS_BRACKET": True, "OP": "?"},
{"SHAPE": "dddd"},
{"IS_BRACKET": True, "OP": "?"},
{"LIKE_NUM": True, "OP": "?"},
{"TEXT": {"REGEX": "^[A-Z]"}, "OP": "?"},
{"ORTH": ".", "OP": "?"},
{"TEXT": {"REGEX": r"^[A-Z\.]"}},
{"ORTH": ".", "OP": "?"},
{"LIKE_NUM": True},
],
}
]
|
testing/adios2/bindings/python/TestBPSelectSteps_nompi.py | taniabanerjee/ADIOS2 | 190 | 12794560 | <filename>testing/adios2/bindings/python/TestBPSelectSteps_nompi.py
#!/usr/bin/env python
#
# Distributed under the OSI-approved Apache License, Version 2.0. See
# accompanying file Copyright.txt for details.
#
# TestBPSelectSteps_nompi.py: test step selection by reading in Python
# in ADIOS2 File Write
# Created on: Jan 29, 2021
# Author: <NAME> <EMAIL>
import unittest
import shutil
import numpy as np
import adios2
TESTDATA_FILENAME = "steps_int32.bp"
class TestAdiosSelectSteps(unittest.TestCase):
def setUp(self):
total_steps = 10
with adios2.open(TESTDATA_FILENAME, "w") as fh:
for i in range(total_steps):
fh.write("step", np.array([i], dtype=np.int32), [1], [0], [1])
fh.end_step()
def tearDown(self):
shutil.rmtree(TESTDATA_FILENAME)
def test_select_steps_reading_fullAPI(self):
selected_steps = [3, 5, 7]
param_string = ",".join([str(i) for i in selected_steps])
adios = adios2.ADIOS()
ioReadBP = adios.DeclareIO("hellopy")
ioReadBP.SetParameter(TESTDATA_FILENAME, param_string)
fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read)
var = ioReadBP.InquireVariable("step")
var.SetStepSelection([0, len(selected_steps)])
data = np.zeros(len(selected_steps), dtype=np.int32)
fh.Get(var, data, adios2.Mode.Sync)
self.assertTrue(all([data[i] == selected_steps[i] for i in
range(len(selected_steps))]))
if __name__ == '__main__':
unittest.main()
|
build/lib/pyconfluent/kafka_streams/processor/serialization/_bytes.py | newellp2019/pyconfluent | 330 | 12794616 | <reponame>newellp2019/pyconfluent
from .deserializer import Deserializer
from .serializer import Serializer
class BytesSerializer(Serializer[bytes]):
def serialize(self, topic: str, data: bytes) -> bytes:
return data
def configure(self, configs, is_key):
pass
def close(self):
pass
class BytesDeserializer(Deserializer[bytes]):
def deserialize(self, topic: str, data: bytes) -> bytes:
return data
def configure(self, configs, is_key):
pass
def close(self):
pass
|
pytorchvideo/models/net.py | kevinmtian/pytorchvideo | 2,391 | 12794664 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from typing import List, Optional
import torch
import torch.nn as nn
from pytorchvideo.layers.utils import set_attributes
from pytorchvideo.models.weight_init import init_net_weights
class Net(nn.Module):
"""
Build a general Net models with a list of blocks for video recognition.
::
Input
↓
Block 1
↓
.
.
.
↓
Block N
↓
The ResNet builder can be found in `create_resnet`.
"""
def __init__(self, *, blocks: nn.ModuleList) -> None:
"""
Args:
blocks (torch.nn.module_list): the list of block modules.
"""
super().__init__()
assert blocks is not None
self.blocks = blocks
init_net_weights(self)
def forward(self, x: torch.Tensor) -> torch.Tensor:
for idx in range(len(self.blocks)):
x = self.blocks[idx](x)
return x
class DetectionBBoxNetwork(nn.Module):
"""
A general purpose model that handles bounding boxes as part of input.
"""
def __init__(self, model: nn.Module, detection_head: nn.Module):
"""
Args:
model (nn.Module): a model that preceeds the head. Ex: stem + stages.
detection_head (nn.Module): a network head. that can take in input bounding boxes
and the outputs from the model.
"""
super().__init__()
self.model = model
self.detection_head = detection_head
def forward(self, x: torch.Tensor, bboxes: torch.Tensor):
"""
Args:
x (torch.tensor): input tensor
bboxes (torch.tensor): accociated bounding boxes.
The format is N*5 (Index, X_1,Y_1,X_2,Y_2) if using RoIAlign
and N*6 (Index, x_ctr, y_ctr, width, height, angle_degrees) if
using RoIAlignRotated.
"""
features = self.model(x)
out = self.detection_head(features, bboxes)
return out.view(out.shape[0], -1)
class MultiPathWayWithFuse(nn.Module):
"""
Build multi-pathway block with fusion for video recognition, each of the pathway
contains its own Blocks and Fusion layers across different pathways.
::
Pathway 1 ... Pathway N
↓ ↓
Block 1 Block N
↓⭠ --Fusion----↓
"""
def __init__(
self,
*,
multipathway_blocks: nn.ModuleList,
multipathway_fusion: Optional[nn.Module],
inplace: Optional[bool] = True,
) -> None:
"""
Args:
multipathway_blocks (nn.module_list): list of models from all pathways.
multipathway_fusion (nn.module): fusion model.
inplace (bool): If inplace, directly update the input list without making
a copy.
"""
super().__init__()
set_attributes(self, locals())
def forward(self, x: List[torch.Tensor]) -> torch.Tensor:
assert isinstance(
x, list
), "input for MultiPathWayWithFuse needs to be a list of tensors"
if self.inplace:
x_out = x
else:
x_out = [None] * len(x)
for pathway_idx in range(len(self.multipathway_blocks)):
if self.multipathway_blocks[pathway_idx] is not None:
x_out[pathway_idx] = self.multipathway_blocks[pathway_idx](
x[pathway_idx]
)
if self.multipathway_fusion is not None:
x_out = self.multipathway_fusion(x_out)
return x_out
|
scripts/legacy/make_maestro_index.py | lucaspbastos/mirdata | 224 | 12794719 | import argparse
import hashlib
import json
import csv
import os
MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def make_maestro_index(data_path):
metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json')
print(metadata_path)
maestro_index = {}
with open(metadata_path, 'r') as fhandle:
metadata = json.load(fhandle)
for i, row in enumerate(metadata):
print(i)
trackid = row['midi_filename'].split('.')[0]
maestro_index[trackid] = {}
midi_path = os.path.join(data_path, row['midi_filename'])
midi_checksum = md5(midi_path)
maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum]
audio_path = os.path.join(data_path, row['audio_filename'])
audio_checksum = md5(audio_path)
maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum]
with open(MAESTRO_INDEX_PATH, 'w') as fhandle:
json.dump(maestro_index, fhandle, indent=2)
def main(args):
print("creating index...")
make_maestro_index(args.maestro_data_path)
print("done!")
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.')
PARSER.add_argument(
'maestro_data_path', type=str, help='Path to MAESTRO data folder.'
)
main(PARSER.parse_args())
|
mmdnn/conversion/tensorflow/rewriter/lstm_rewriter.py | kmader/MMdnn | 3,442 | 12794728 | from mmdnn.conversion.rewriter.rewriter import UnitRewriterBase
import numpy as np
import re
class LSTMRewriter(UnitRewriterBase):
def __init__(self, graph, weights_dict):
return super(LSTMRewriter, self).__init__(graph, weights_dict)
def process_lstm_cell(self, match_result):
if 'lstm_cell' not in match_result._pattern_to_op.keys():
return
kwargs = dict()
top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']]
w_e = match_result.get_op("cell_kernel")
w = self._weights_dict[w_e.name.replace('/read', '')]
num_units = w.shape[1]//4
[wx, wh] = np.split(w, [-1 * num_units])
input_size = wx.shape[0]
kwargs['num_units'] = num_units
kwargs['input_size'] = input_size
if hasattr(top_node, 'kwargs'):
top_node.kwargs.update(kwargs)
else:
top_node.kwargs = kwargs
def process_rnn_h_zero(self, match_result):
if 'h_zero' not in match_result._name_to_pattern.keys():
return
kwargs = dict()
top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']]
fill_size = match_result.get_op('fill_size')
fill_value = match_result.get_op('fill_value')
kwargs['fill_size'] = fill_size.get_attr('value').int_val[0]
kwargs['fill_value'] = fill_value.get_attr('value').float_val[0]
if hasattr(top_node, 'kwargs'):
top_node.kwargs.update(kwargs)
else:
top_node.kwargs = kwargs
def process_match_result(self, match_result, pattern_name):
if pattern_name == 'lstm_cell':
self.process_lstm_cell(match_result)
elif pattern_name == 'h_zero':
if self.check_match_scope(match_result, 'LSTMCellZeroState'):
self.process_rnn_h_zero(match_result)
'''For some short pattern, to avoid match other pattern, check it's scope'''
def check_match_scope(self, match_result, scope_name):
ops = match_result._pattern_to_op.values()
for op in ops:
op_name_splits = op.name.split('/')
if len(op_name_splits) < 2:
return False
if re.sub(r'(_\d+)*$', '', op_name_splits[-2]) != scope_name:
if len(op_name_splits) > 2:
if re.sub(r'(_\d+)*$', '', op_name_splits[-3]) != scope_name:
return False
else:
return False
return True
def run(self):
return super(LSTMRewriter, self).run(['lstm_cell', 'h_zero'], 'tensorflow') |
esphome/components/nextion/base_component.py | OttoWinter/esphomeyaml | 249 | 12794740 | from string import ascii_letters, digits
import esphome.config_validation as cv
import esphome.codegen as cg
from esphome.components import color
from esphome.const import (
CONF_VISIBLE,
)
from . import CONF_NEXTION_ID
from . import Nextion
CONF_VARIABLE_NAME = "variable_name"
CONF_COMPONENT_NAME = "component_name"
CONF_WAVE_CHANNEL_ID = "wave_channel_id"
CONF_WAVE_MAX_VALUE = "wave_max_value"
CONF_PRECISION = "precision"
CONF_WAVEFORM_SEND_LAST_VALUE = "waveform_send_last_value"
CONF_TFT_URL = "tft_url"
CONF_ON_SLEEP = "on_sleep"
CONF_ON_WAKE = "on_wake"
CONF_ON_SETUP = "on_setup"
CONF_TOUCH_SLEEP_TIMEOUT = "touch_sleep_timeout"
CONF_WAKE_UP_PAGE = "wake_up_page"
CONF_AUTO_WAKE_ON_TOUCH = "auto_wake_on_touch"
CONF_WAVE_MAX_LENGTH = "wave_max_length"
CONF_BACKGROUND_COLOR = "background_color"
CONF_BACKGROUND_PRESSED_COLOR = "background_pressed_color"
CONF_FOREGROUND_COLOR = "foreground_color"
CONF_FOREGROUND_PRESSED_COLOR = "foreground_pressed_color"
CONF_FONT_ID = "font_id"
def NextionName(value):
valid_chars = f"{ascii_letters + digits}."
if not isinstance(value, str) or len(value) > 29:
raise cv.Invalid("Must be a string less than 29 characters")
for char in value:
if char not in valid_chars:
raise cv.Invalid(
f"Must only consist of upper/lowercase characters, numbers and the period '.'. The character '{char}' cannot be used."
)
return value
CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema(
{
cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion),
cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color),
cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color),
cv.Optional(CONF_VISIBLE, default=True): cv.boolean,
}
)
CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend(
cv.Schema(
{
cv.Required(CONF_COMPONENT_NAME): NextionName,
cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255),
}
)
)
CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend(
cv.Schema(
{
cv.Optional(CONF_COMPONENT_NAME): NextionName,
cv.Optional(CONF_VARIABLE_NAME): NextionName,
}
)
)
CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend(
cv.Schema(
{
cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255),
}
)
)
CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend(
cv.Schema(
{
cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color),
cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color),
}
)
)
async def setup_component_core_(var, config, arg):
if CONF_VARIABLE_NAME in config:
cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME]))
elif CONF_COMPONENT_NAME in config:
cg.add(
var.set_variable_name(
config[CONF_COMPONENT_NAME],
config[CONF_COMPONENT_NAME] + arg,
)
)
if CONF_BACKGROUND_COLOR in config:
color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR])
cg.add(var.set_background_color(color_component))
if CONF_BACKGROUND_PRESSED_COLOR in config:
color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR])
cg.add(var.set_background_pressed_color(color_component))
if CONF_FOREGROUND_COLOR in config:
color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR])
cg.add(var.set_foreground_color(color_component))
if CONF_FOREGROUND_PRESSED_COLOR in config:
color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR])
cg.add(var.set_foreground_pressed_color(color_component))
if CONF_FONT_ID in config:
cg.add(var.set_font_id(config[CONF_FONT_ID]))
if CONF_VISIBLE in config:
cg.add(var.set_visible(config[CONF_VISIBLE]))
|
benchmark/bench_dd.py | Watch-Later/recipes | 1,418 | 12794764 | <reponame>Watch-Later/recipes
#!/usr/bin/python3
import re, subprocess
bs = 1
count = 1024 * 1024
while bs <= 1024 * 1024 * 8:
args = ['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d' % bs, 'count=%d' % count]
result = subprocess.run(args, capture_output=True)
seconds = 0
message = str(result.stderr)
if m := re.search('copied, (.*?) s, ', message):
seconds = float(m.group(1))
elif m := re.search('bytes transferred in (.*?) secs', message):
seconds = float(m.group(1))
else:
print('Unable to parse dd output:\n%s' % message)
break
print('bs=%7d count=%7d %6.3fs %8.3fus/record %9.3fMB/s' %
(bs, count, seconds, seconds * 1e6 / count, bs * count / 1e6 / seconds))
bs *= 2
if seconds > 1:
count /= 2
result = """
Raspberry Pi 4 running FreeBSD 13-RELEASE:
freebsd% python3.9 bench_dd.py
bs= 1 count=1048576 3.307s 3.154us/record 0.317MB/s
bs= 2 count= 524288 1.682s 3.209us/record 0.623MB/s
bs= 4 count= 262144 0.824s 3.144us/record 1.272MB/s
bs= 8 count= 262144 0.855s 3.262us/record 2.453MB/s
bs= 16 count= 262144 0.831s 3.171us/record 5.046MB/s
bs= 32 count= 262144 0.813s 3.101us/record 10.321MB/s
bs= 64 count= 262144 0.848s 3.236us/record 19.779MB/s
bs= 128 count= 262144 0.848s 3.235us/record 39.569MB/s
bs= 256 count= 262144 0.863s 3.293us/record 77.746MB/s
bs= 512 count= 262144 0.844s 3.220us/record 159.029MB/s
bs= 1024 count= 262144 0.894s 3.411us/record 300.221MB/s
bs= 2048 count= 262144 0.984s 3.755us/record 545.461MB/s
bs= 4096 count= 262144 1.106s 4.219us/record 970.906MB/s
bs= 8192 count= 131072 0.675s 5.148us/record 1591.372MB/s
bs= 16384 count= 131072 0.917s 6.992us/record 2343.125MB/s
bs= 32768 count= 131072 1.385s 10.567us/record 3100.959MB/s
bs= 65536 count= 65536 1.189s 18.144us/record 3611.984MB/s
bs= 131072 count= 32768 1.130s 34.500us/record 3799.209MB/s
bs= 262144 count= 16384 1.155s 70.499us/record 3718.413MB/s
bs= 524288 count= 8192 1.264s 154.328us/record 3397.221MB/s
bs=1048576 count= 4096 1.543s 376.625us/record 2784.138MB/s
bs=2097152 count= 2048 2.041s 996.766us/record 2103.957MB/s
bs=4194304 count= 1024 2.441s 2383.790us/record 1759.511MB/s
bs=8388608 count= 512 2.690s 5253.455us/record 1596.779MB/s
Raspberry Pi 4 running Raspbian GNU/Linux 10 armv7, kernel 5.10
$ python3 bench_dd.py
bs= 1 count=1048576 1.067s 1.018us/record 0.982MB/s
bs= 2 count= 524288 0.529s 1.009us/record 1.982MB/s
bs= 4 count= 524288 0.540s 1.030us/record 3.885MB/s
bs= 8 count= 524288 0.537s 1.025us/record 7.805MB/s
bs= 16 count= 524288 0.533s 1.016us/record 15.741MB/s
bs= 32 count= 524288 0.537s 1.023us/record 31.265MB/s
bs= 64 count= 524288 1.527s 2.913us/record 21.972MB/s
bs= 128 count= 262144 0.758s 2.892us/record 44.258MB/s
bs= 256 count= 262144 0.760s 2.899us/record 88.300MB/s
bs= 512 count= 262144 0.768s 2.930us/record 174.728MB/s
bs= 1024 count= 262144 0.795s 3.034us/record 337.543MB/s
bs= 2048 count= 262144 0.817s 3.117us/record 657.138MB/s
bs= 4096 count= 262144 0.886s 3.378us/record 1212.454MB/s
bs= 8192 count= 262144 1.406s 5.365us/record 1527.034MB/s
bs= 16384 count= 131072 1.294s 9.875us/record 1659.057MB/s
bs= 32768 count= 65536 1.245s 19.003us/record 1724.402MB/s
bs= 65536 count= 32768 1.227s 37.450us/record 1749.962MB/s
bs= 131072 count= 16384 1.264s 77.148us/record 1698.972MB/s
bs= 262144 count= 8192 1.257s 153.500us/record 1707.781MB/s
bs= 524288 count= 4096 1.303s 318.062us/record 1648.385MB/s
bs=1048576 count= 2048 1.503s 733.804us/record 1428.960MB/s
bs=2097152 count= 1024 1.839s 1796.094us/record 1167.618MB/s
bs=4194304 count= 512 1.833s 3580.527us/record 1171.421MB/s
bs=8388608 count= 256 1.860s 7266.406us/record 1154.437MB/s
Raspberry Pi 4 running Debian 11 arm64, kernel 5.10
$ ./bench_dd.py
bs= 1 count=1048576 1.464s 1.396us/record 0.716MB/s
bs= 2 count= 524288 0.729s 1.390us/record 1.439MB/s
bs= 4 count= 524288 0.735s 1.402us/record 2.852MB/s
bs= 8 count= 524288 0.740s 1.411us/record 5.670MB/s
bs= 16 count= 524288 0.746s 1.423us/record 11.246MB/s
bs= 32 count= 524288 0.737s 1.407us/record 22.750MB/s
bs= 64 count= 524288 0.738s 1.408us/record 45.465MB/s
bs= 128 count= 524288 0.745s 1.421us/record 90.060MB/s
bs= 256 count= 524288 0.752s 1.434us/record 178.504MB/s
bs= 512 count= 524288 0.780s 1.488us/record 344.122MB/s
bs= 1024 count= 524288 0.831s 1.585us/record 645.859MB/s
bs= 2048 count= 524288 0.914s 1.742us/record 1175.405MB/s
bs= 4096 count= 524288 1.096s 2.090us/record 1960.027MB/s
bs= 8192 count= 262144 0.750s 2.861us/record 2863.609MB/s
bs= 16384 count= 262144 1.125s 4.290us/record 3819.446MB/s
bs= 32768 count= 131072 1.001s 7.638us/record 4289.905MB/s
bs= 65536 count= 65536 0.975s 14.882us/record 4403.740MB/s
bs= 131072 count= 65536 1.834s 27.978us/record 4684.865MB/s
bs= 262144 count= 32768 2.088s 63.717us/record 4114.190MB/s
bs= 524288 count= 16384 2.347s 143.225us/record 3660.587MB/s
bs=1048576 count= 8192 3.553s 433.748us/record 2417.480MB/s
bs=2097152 count= 4096 5.754s 1404.768us/record 1492.881MB/s
bs=4194304 count= 2048 6.109s 2982.832us/record 1406.148MB/s
bs=8388608 count= 1024 6.307s 6159.189us/record 1361.966MB/s
Raspberry Pi 4 running Ubuntu server 21.04 arm64, kernel 5.11
$ ./bench_dd.py
bs= 1 count=1048576 5.409s 5.159us/record 0.194MB/s
bs= 2 count= 524288 2.828s 5.393us/record 0.371MB/s
bs= 4 count= 262144 1.415s 5.397us/record 0.741MB/s
bs= 8 count= 131072 0.682s 5.202us/record 1.538MB/s
bs= 16 count= 131072 0.719s 5.483us/record 2.918MB/s
bs= 32 count= 131072 0.674s 5.143us/record 6.222MB/s
bs= 64 count= 131072 0.704s 5.373us/record 11.911MB/s
bs= 128 count= 131072 0.711s 5.425us/record 23.593MB/s
bs= 256 count= 131072 0.690s 5.262us/record 48.655MB/s
bs= 512 count= 131072 0.714s 5.449us/record 93.955MB/s
bs= 1024 count= 131072 0.707s 5.392us/record 189.911MB/s
bs= 2048 count= 131072 0.751s 5.728us/record 357.517MB/s
bs= 4096 count= 131072 0.802s 6.116us/record 669.720MB/s
bs= 8192 count= 131072 1.038s 7.916us/record 1034.902MB/s
bs= 16384 count= 65536 0.833s 12.712us/record 1288.837MB/s
bs= 32768 count= 65536 1.325s 20.212us/record 1621.207MB/s
bs= 65536 count= 32768 1.282s 39.113us/record 1675.575MB/s
bs= 131072 count= 16384 1.211s 73.936us/record 1772.773MB/s
bs= 262144 count= 8192 1.185s 144.619us/record 1812.651MB/s
bs= 524288 count= 4096 1.091s 266.418us/record 1967.912MB/s
bs=1048576 count= 2048 1.372s 670.063us/record 1564.891MB/s
bs=2097152 count= 1024 1.543s 1507.129us/record 1391.488MB/s
bs=4194304 count= 512 1.650s 3223.105us/record 1301.324MB/s
bs=8388608 count= 256 1.583s 6185.391us/record 1356.197MB/s
================================================================
Raspberry Pi 3 running Raspbian GNU/Linux 10 armv7, kernel 5.10
$ ./bench_dd.py
bs= 1 count=1048576 1.507s 1.437us/record 0.696MB/s
bs= 2 count= 524288 0.753s 1.437us/record 1.392MB/s
bs= 4 count= 524288 0.757s 1.444us/record 2.770MB/s
bs= 8 count= 524288 0.762s 1.454us/record 5.503MB/s
bs= 16 count= 524288 0.763s 1.456us/record 10.992MB/s
bs= 32 count= 524288 0.767s 1.463us/record 21.878MB/s
bs= 64 count= 524288 0.897s 1.711us/record 37.394MB/s
bs= 128 count= 524288 0.899s 1.715us/record 74.630MB/s
bs= 256 count= 524288 0.925s 1.764us/record 145.141MB/s
bs= 512 count= 524288 0.943s 1.799us/record 284.672MB/s
bs= 1024 count= 524288 1.013s 1.933us/record 529.725MB/s
bs= 2048 count= 262144 0.565s 2.155us/record 950.259MB/s
bs= 4096 count= 262144 0.671s 2.559us/record 1600.774MB/s
bs= 8192 count= 262144 0.996s 3.799us/record 2156.141MB/s
bs= 16384 count= 262144 1.627s 6.208us/record 2639.224MB/s
bs= 32768 count= 131072 1.456s 11.111us/record 2949.152MB/s
bs= 65536 count= 65536 1.365s 20.821us/record 3147.534MB/s
bs= 131072 count= 32768 1.324s 40.391us/record 3245.109MB/s
bs= 262144 count= 16384 1.301s 79.400us/record 3301.561MB/s
bs= 524288 count= 8192 1.369s 167.107us/record 3137.440MB/s
bs=1048576 count= 4096 1.862s 454.695us/record 2306.109MB/s
bs=2097152 count= 2048 2.197s 1072.520us/record 1955.351MB/s
bs=4194304 count= 1024 2.454s 2396.406us/record 1750.247MB/s
bs=8388608 count= 512 2.584s 5046.152us/record 1662.377MB/s
Raspberry Pi 3 running Ubuntu server 21.04 arm64, kernel 5.11
$ ./bench_dd.py
bs= 1 count=1048576 10.017s 9.553us/record 0.105MB/s
bs= 2 count= 524288 5.021s 9.577us/record 0.209MB/s
bs= 4 count= 262144 2.505s 9.554us/record 0.419MB/s
bs= 8 count= 131072 1.251s 9.546us/record 0.838MB/s
bs= 16 count= 65536 0.631s 9.623us/record 1.663MB/s
bs= 32 count= 65536 0.629s 9.605us/record 3.332MB/s
bs= 64 count= 65536 0.630s 9.606us/record 6.663MB/s
bs= 128 count= 65536 0.636s 9.700us/record 13.195MB/s
bs= 256 count= 65536 0.634s 9.667us/record 26.481MB/s
bs= 512 count= 65536 0.635s 9.687us/record 52.854MB/s
bs= 1024 count= 65536 0.645s 9.840us/record 104.064MB/s
bs= 2048 count= 65536 0.655s 10.002us/record 204.760MB/s
bs= 4096 count= 65536 0.688s 10.498us/record 390.177MB/s
bs= 8192 count= 65536 0.903s 13.782us/record 594.390MB/s
bs= 16384 count= 65536 1.343s 20.487us/record 799.712MB/s
bs= 32768 count= 32768 1.105s 33.717us/record 971.844MB/s
bs= 65536 count= 16384 0.987s 60.240us/record 1087.909MB/s
bs= 131072 count= 16384 1.854s 113.177us/record 1158.110MB/s
bs= 262144 count= 8192 1.801s 219.850us/record 1192.377MB/s
bs= 524288 count= 4096 1.796s 438.547us/record 1195.511MB/s
bs=1048576 count= 2048 1.972s 963.125us/record 1088.723MB/s
bs=2097152 count= 1024 2.151s 2100.605us/record 998.356MB/s
bs=4194304 count= 512 2.253s 4400.293us/record 953.187MB/s
bs=8388608 count= 256 2.306s 9005.898us/record 931.457MB/s
Raspberry Pi 3 running Debian 11 arm64, kernel 5.10
$ ./bench_dd.py
bs= 1 count=1048576 2.171s 2.070us/record 0.483MB/s
bs= 2 count= 524288 1.069s 2.039us/record 0.981MB/s
bs= 4 count= 262144 0.543s 2.071us/record 1.931MB/s
bs= 8 count= 262144 0.539s 2.058us/record 3.888MB/s
bs= 16 count= 262144 0.543s 2.070us/record 7.730MB/s
bs= 32 count= 262144 0.543s 2.072us/record 15.443MB/s
bs= 64 count= 262144 0.544s 2.077us/record 30.817MB/s
bs= 128 count= 262144 0.552s 2.105us/record 60.802MB/s
bs= 256 count= 262144 0.557s 2.126us/record 120.423MB/s
bs= 512 count= 262144 0.572s 2.184us/record 234.471MB/s
bs= 1024 count= 262144 0.599s 2.286us/record 447.998MB/s
bs= 2048 count= 262144 0.656s 2.501us/record 818.834MB/s
bs= 4096 count= 262144 0.767s 2.926us/record 1399.933MB/s
bs= 8192 count= 262144 1.018s 3.883us/record 2109.512MB/s
bs= 16384 count= 131072 0.757s 5.776us/record 2836.329MB/s
bs= 32768 count= 131072 1.252s 9.549us/record 3431.527MB/s
bs= 65536 count= 65536 1.116s 17.026us/record 3849.261MB/s
bs= 131072 count= 32768 1.052s 32.093us/record 4084.183MB/s
bs= 262144 count= 16384 1.045s 63.790us/record 4109.505MB/s
bs= 524288 count= 8192 1.092s 133.292us/record 3933.372MB/s
bs=1048576 count= 4096 2.321s 566.655us/record 1850.465MB/s
bs=2097152 count= 2048 2.984s 1457.168us/record 1439.197MB/s
bs=4194304 count= 1024 3.431s 3350.625us/record 1251.798MB/s
bs=8388608 count= 512 3.456s 6750.234us/record 1242.714MB/s
================================================================
Raspberry Pi 2 running Raspbian GNU/Linux 10 armv7, kernel 5.10
$ ./bench_dd.py
bs= 1 count=1048576 2.294s 2.188us/record 0.457MB/s
bs= 2 count= 524288 1.155s 2.203us/record 0.908MB/s
bs= 4 count= 262144 0.573s 2.187us/record 1.829MB/s
bs= 8 count= 262144 0.581s 2.215us/record 3.611MB/s
bs= 16 count= 262144 0.579s 2.210us/record 7.239MB/s
bs= 32 count= 262144 0.582s 2.221us/record 14.405MB/s
bs= 64 count= 262144 0.767s 2.926us/record 21.874MB/s
bs= 128 count= 262144 0.725s 2.767us/record 46.261MB/s
bs= 256 count= 262144 0.794s 3.028us/record 84.557MB/s
bs= 512 count= 262144 0.773s 2.951us/record 173.523MB/s
bs= 1024 count= 262144 0.799s 3.050us/record 335.763MB/s
bs= 2048 count= 262144 1.093s 4.170us/record 491.168MB/s
bs= 4096 count= 131072 0.547s 4.170us/record 982.276MB/s
bs= 8192 count= 131072 1.039s 7.929us/record 1033.159MB/s
bs= 16384 count= 65536 0.771s 11.765us/record 1392.607MB/s
bs= 32768 count= 65536 1.511s 23.059us/record 1421.036MB/s
bs= 65536 count= 32768 2.009s 61.321us/record 1068.740MB/s
bs= 131072 count= 16384 1.858s 113.374us/record 1156.103MB/s
bs= 262144 count= 8192 2.055s 250.829us/record 1045.111MB/s
bs= 524288 count= 4096 2.036s 496.960us/record 1054.989MB/s
bs=1048576 count= 2048 2.070s 1010.869us/record 1037.301MB/s
bs=2097152 count= 1024 2.084s 2035.068us/record 1030.507MB/s
bs=4194304 count= 512 2.097s 4094.844us/record 1024.289MB/s
bs=8388608 count= 256 2.096s 8189.414us/record 1024.323MB/s
Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2
arm_freq=1000
core_freq=500
sdram_freq=400
over_voltage=0
over_voltage_sdram_p=0
over_voltage_sdram_i=0
over_voltage_sdram_c=0
$ ./bench_dd.py
bs= 1 count=1048576 2.071s 1.975us/record 0.506MB/s
bs= 2 count= 524288 1.038s 1.979us/record 1.011MB/s
bs= 4 count= 262144 0.520s 1.984us/record 2.016MB/s
bs= 8 count= 262144 0.520s 1.982us/record 4.036MB/s
bs= 16 count= 262144 0.524s 2.001us/record 7.997MB/s
bs= 32 count= 262144 0.524s 1.999us/record 16.006MB/s
bs= 64 count= 262144 0.692s 2.640us/record 24.246MB/s
bs= 128 count= 262144 0.654s 2.494us/record 51.329MB/s
bs= 256 count= 262144 0.653s 2.492us/record 102.746MB/s
bs= 512 count= 262144 0.672s 2.564us/record 199.718MB/s
bs= 1024 count= 262144 0.732s 2.792us/record 366.773MB/s
bs= 2048 count= 262144 0.785s 2.993us/record 684.160MB/s
bs= 4096 count= 262144 0.968s 3.694us/record 1108.962MB/s
bs= 8192 count= 262144 1.612s 6.148us/record 1332.376MB/s
bs= 16384 count= 131072 1.504s 11.471us/record 1428.238MB/s
bs= 32768 count= 65536 1.497s 22.840us/record 1434.649MB/s
bs= 65536 count= 32768 1.432s 43.706us/record 1499.482MB/s
bs= 131072 count= 16384 1.437s 87.693us/record 1494.671MB/s
bs= 262144 count= 8192 1.426s 174.119us/record 1505.548MB/s
bs= 524288 count= 4096 1.415s 345.540us/record 1517.302MB/s
bs=1048576 count= 2048 1.428s 697.305us/record 1503.756MB/s
bs=2097152 count= 1024 1.430s 1396.846us/record 1501.348MB/s
bs=4194304 count= 512 1.442s 2815.664us/record 1489.632MB/s
bs=8388608 count= 256 1.444s 5642.461us/record 1486.693MB/s
================================================================
HP e8300, CPU i7-3770
freebsd13% ./bench_dd.py
bs= 1 count=1048576 0.728s 0.694us/record 1.440MB/s
bs= 2 count=1048576 0.573s 0.547us/record 3.658MB/s
bs= 4 count=1048576 0.565s 0.539us/record 7.418MB/s
bs= 8 count=1048576 0.575s 0.548us/record 14.595MB/s
bs= 16 count=1048576 0.572s 0.546us/record 29.329MB/s
bs= 32 count=1048576 0.574s 0.548us/record 58.435MB/s
bs= 64 count=1048576 0.573s 0.546us/record 117.174MB/s
bs= 128 count=1048576 0.568s 0.542us/record 236.122MB/s
bs= 256 count=1048576 0.577s 0.550us/record 465.528MB/s
bs= 512 count=1048576 0.585s 0.558us/record 917.797MB/s
bs= 1024 count=1048576 0.591s 0.564us/record 1815.495MB/s
bs= 2048 count=1048576 0.610s 0.582us/record 3517.599MB/s
bs= 4096 count=1048576 0.648s 0.618us/record 6624.642MB/s
bs= 8192 count=1048576 0.716s 0.683us/record 12000.920MB/s
bs= 16384 count=1048576 0.886s 0.845us/record 19391.838MB/s
bs= 32768 count=1048576 1.414s 1.349us/record 24291.204MB/s
bs= 65536 count= 524288 1.167s 2.226us/record 29446.678MB/s
bs= 131072 count= 262144 1.049s 4.001us/record 32757.097MB/s
bs= 262144 count= 131072 0.996s 7.597us/record 34507.742MB/s
bs= 524288 count= 131072 1.938s 14.784us/record 35462.791MB/s
bs=1048576 count= 65536 1.954s 29.814us/record 35170.740MB/s
bs=2097152 count= 32768 1.978s 60.353us/record 34748.329MB/s
bs=4194304 count= 16384 2.007s 122.520us/record 34233.639MB/s
bs=8388608 count= 8192 2.103s 256.698us/record 32678.930MB/s
debian11$ ./bench_dd.py
bs= 1 count=1048576 0.558s 0.532us/record 1.880MB/s
bs= 2 count=1048576 0.550s 0.524us/record 3.814MB/s
bs= 4 count=1048576 0.551s 0.526us/record 7.611MB/s
bs= 8 count=1048576 0.550s 0.525us/record 15.252MB/s
bs= 16 count=1048576 0.550s 0.524us/record 30.509MB/s
bs= 32 count=1048576 0.550s 0.524us/record 61.048MB/s
bs= 64 count=1048576 0.553s 0.527us/record 121.398MB/s
bs= 128 count=1048576 0.556s 0.530us/record 241.471MB/s
bs= 256 count=1048576 0.565s 0.538us/record 475.482MB/s
bs= 512 count=1048576 0.583s 0.556us/record 921.523MB/s
bs= 1024 count=1048576 0.608s 0.580us/record 1764.989MB/s
bs= 2048 count=1048576 0.640s 0.611us/record 3353.923MB/s
bs= 4096 count=1048576 0.701s 0.669us/record 6126.015MB/s
bs= 8192 count=1048576 0.870s 0.830us/record 9870.674MB/s
bs= 16384 count=1048576 1.191s 1.136us/record 14427.529MB/s
bs= 32768 count= 524288 1.004s 1.915us/record 17109.038MB/s
bs= 65536 count= 262144 0.822s 3.135us/record 20902.551MB/s
bs= 131072 count= 262144 1.496s 5.705us/record 22973.575MB/s
bs= 262144 count= 131072 1.468s 11.200us/record 23406.614MB/s
bs= 524288 count= 65536 1.519s 23.171us/record 22626.825MB/s
bs=1048576 count= 32768 1.495s 45.614us/record 22988.023MB/s
bs=2097152 count= 16384 1.487s 90.750us/record 23109.237MB/s
bs=4194304 count= 8192 1.474s 179.918us/record 23312.281MB/s
bs=8388608 count= 4096 1.588s 387.625us/record 21641.067MB/s
"""
|
library/connecter/ansible/yaml/read2file.py | GNHJM/lykops | 141 | 12794776 | import os
from library.connecter.ansible.yaml import Yaml_Base
from library.utils.file import read_file
from library.utils.path import get_pathlist
class Read_File(Yaml_Base):
def router(self, this_path, this_basedir=None, yaml_tpye='main', preserve=True, together=False, name='', describe=''):
'''
检测来自文件的yaml语法等是否正确的路由器
:参数
filename:文件
name:名称
this_basedir:目录
yaml_tpye:yaml文件类型
preserve:是否写入数据库
together:是否返回该main下所有文件内容
name:yaml文件内容写入数据的名称
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
if yaml_tpye in ('full_roles' , 'main') :
result = self.main(this_path, preserve=preserve, together=together, name=name, describe=describe)
elif yaml_tpye == 'include' :
result = self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name, describe=describe)
elif yaml_tpye == 'roles' :
result = self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together, name=name, describe=describe)
else :
self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles')
return (False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles')
return result
def main(self, filename, preserve=True, together=False, name='', describe=''):
'''
检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查
include:只能为相对路径
roles:只能为字母和数字组合
:参数
filename:文件
name:名称
preserve:是否写入数据库
together:是否返回该main下所有文件内容
name:yaml文件内容写入数据的名称
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
if preserve and together:
sub_preserve = False
else :
sub_preserve = preserve
result = self.yaml_loader(filename)
if result[0] :
(filename, content, yaml_data) = result[1:]
else :
self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1])
return (False, '文件' + filename + '转化成yaml数据时失败,' + result[1])
result = self.check_main(yaml_data)
if result[0] :
(roles_list, includefile_dict) = result[1:]
else :
self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1])
return (False, '文件' + filename + '未通过yaml语法检测,' + result[1])
this_basedir = os.path.dirname(filename)
include_content = {}
roles_content = {}
for file, file_type in includefile_dict.items() :
result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve)
if not result[0] :
self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1])
return (False, '文件' + filename + '中的include文件名为' + file + '未通过yaml语法检测,' + result[1])
else :
file = os.path.basename(file)
include_content.update({file:result[1]})
for roles in roles_list :
result = self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together)
if result[0] :
include_content.update(result[2])
roles = os.path.basename(roles)
roles_content.update({roles:result[1]})
else :
self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,roles名为' + roles + '未通过yaml语法检测,原因:' + result[1])
return (False, '文件' + filename + '中的roles名为' + roles + '未通过yaml语法检测,' + result[1])
data = {
'main' : content,
'include': include_content,
'roles': roles_content,
}
if preserve :
result = self.write2db(name, data, 'main', describe=describe)
if not result[0] :
self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1])
return (False, '文件' + filename + '通过yaml语法检测,但无法写入数据库' + result[1])
self.logger.info('检测yaml文件' + filename + '类型为full_roles或者main语法成功')
if together :
return (True, data)
else :
return (True, content)
def include(self, file, this_basedir=None, file_type='main', preserve=True, name='', describe=''):
'''
检测include文件的语法等是否正确
:参数
this_basedir:引用该文件的上级目录
file:文件
this_path:引用时的路径
file_type:类型
preserve:是否写入数据库
name:yaml文件内容写入数据的名称
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,include文件内容(格式为字典,可能为空))
失败为False,返回失败原因
'''
if file_type not in ('main', 'tasks', 'var') :
self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误')
return (False, '参数file_type错误')
result = self._isinclude(file)
if not result[0] :
self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误,原因:' + result[1])
return result
if this_basedir is None or not this_basedir :
filename = file
else :
try :
filename = this_basedir + '/' + file
except :
filename = file
result = self.yaml_loader(filename)
if result[0] :
(content, yaml_data) = result[2:]
else :
self.logger.error('检测yaml文件' + file + '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1])
return (False, result[1])
result = self.check_include(yaml_data, file_type=file_type)
if not result[0] :
self.logger.error('检测yaml文件' + file + '类型为include语法失败,语法检测未通过,原因:' + result[1])
return (False, result[1])
if preserve :
result = self.write2db(name, content, 'include', describe=describe)
if not result[0] :
self.logger.error('检测yaml文件' + file + '类型为include语法失败,但无法写入数据库,原因:' + result[1])
return (False, '无法写入数据库' + result[1])
self.logger.info('检测yaml文件' + filename + '类型为include语法成功')
return (True, content)
def roles(self, roles_path, this_basedir=None, preserve=True, together=False, name='', describe=''):
'''
检测单个roles的语法等是否正确
:参数
this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/
roles_path:引用该roles的main文件写的roles路径
preserve:是否写入数据库
together:是否返回该roles下所有文件内容
name:yaml文件内容写入数据的名称
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空))
失败为False,返回失败原因
'''
content_dict = {}
if preserve and together:
sub_preserve = False
else :
sub_preserve = preserve
if not name :
name = roles_path
result = self._isrolesname(name)
if not result :
self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写')
return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写')
else :
if this_basedir is None or not this_basedir:
this_roles_path = roles_path
else :
try :
this_roles_path = this_basedir + '/roles/' + roles_path
except :
this_roles_path = roles_path
include_content = {}
for this_dir in ('tasks', 'vars', 'handlers', 'meta', 'defaults') :
yaml_file = this_roles_path + '/' + this_dir + '/main.yaml'
result = read_file(yaml_file)
if not result[0] :
if this_dir == 'tasks' :
self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml不存在')
return (False, this_dir + '/main.yaml不存在')
continue
else :
content_dict[this_dir] = result[1]
temp_dir = this_roles_path + '/templates/'
content_dict['templates'] = {}
result = get_pathlist(temp_dir, get_death=0, max_size=4 * 1024 * 1024)
if result[0] :
temp_list = result[1]
for temp in temp_list :
result = read_file(temp)
if result[0] :
temp_file = os.path.basename(temp)
content_dict['templates'][temp_file] = result[1]
if not content_dict['templates'] :
del content_dict['templates']
result = self.check_roles(content_dict)
if result[0] :
includefile_dict = result[1]
for file, file_type in includefile_dict.items() :
result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve)
if not result[0] :
self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles包含的include文件' + file + '未通过语法检测,原因:' + result[1])
return (False, 'roles包含的include文件' + file + '未通过语法检测,' + result[1])
else :
include_content.update({file:result[1]})
else :
self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml语法错误,原因:' + result[1])
return (False, this_dir + '/main.yaml语法错误,' + result[1])
data = {
'main' : {},
'include': include_content,
'roles': {name:content_dict},
}
if preserve :
result = self.write2db(name, data, 'roles', describe=describe)
if not result[0] :
self.logger.error('检测yaml文件roles名为' + roles_path + '失败,无法写入数据库,' + result[1])
return (False, '无法写入数据库,' + result[1])
self.logger.info('检测yaml文件roles名为' + roles_path + '成功')
if together :
return (True, content_dict, include_content)
else :
return (True, {}, {})
|
lib-other/pylib/consensus/test/test_consensus.py | endolith/Truthcoin | 161 | 12794789 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for Truthcoin's consensus functions.
Verifies that the consensus algorithm works as expected.
Check test_answers.txt for expected results.
"""
from __future__ import division, unicode_literals, absolute_import
import os
import sys
import platform
import json
import numpy as np
import numpy.ma as ma
if platform.python_version() < "2.7":
unittest = __import__("unittest2")
else:
import unittest
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(HERE, os.pardir))
import consensus
def prp(o):
print(json.dumps(outcome, indent=3, sort_keys=True))
class TestConsensus(unittest.TestCase):
def setUp(self):
self.votes_unmasked = np.array([
[1, 1, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 1, 1],
])
self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked))
def test_Factory(self):
outcome = consensus.Factory(self.votes)
self.assertAlmostEquals(outcome["Certainty"], 0.228237569613, places=11)
def test_Factory_scaled(self):
scalar_decision_params = [
{"scaled": True, "min": 0.1, "max": 0.5},
{"scaled": True, "min": 0.2, "max": 0.7},
{"scaled": False, "min": 0, "max": 1},
{"scaled": False, "min": 0, "max": 1},
]
outcome = consensus.Factory(self.votes, Scales=scalar_decision_params)
self.assertAlmostEquals(outcome["Certainty"], 0.618113325804, places=11)
def tearDown(self):
del self.votes_unmasked
del self.votes
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestConsensus)
unittest.TextTestRunner(verbosity=2).run(suite)
|
ikalog/ui/panel/preview.py | fetus-hina/IkaLog | 285 | 12794808 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import os.path
import threading
import wx
import cv2
from ikalog.utils import Localization
from ikalog.ui.events import *
_ = Localization.gettext_translation('IkaUI', fallback=True).gettext
class FileDropTarget(wx.FileDropTarget):
def __init__(self, observer):
wx.FileDropTarget.__init__(self)
self.observer = observer
def OnDropFiles(self, x, y, filenames):
self.observer.on_drop_files(x, y, filenames)
return True
class InputFilePanel(wx.Panel):
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
# This is used to determine if a file dialog is open or not.
self.prev_file_path = ''
# Textbox for input file
self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '')
self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input)
self.button = wx.Button(self, wx.ID_ANY, _('Browse'))
self.button.Bind(wx.EVT_BUTTON, self.on_button_click)
# Drag and drop
drop_target = FileDropTarget(self)
self.text_ctrl.SetDropTarget(drop_target)
top_sizer = wx.BoxSizer(wx.HORIZONTAL)
top_sizer.Add(self.text_ctrl, proportion=1)
top_sizer.Add(self.button)
self.SetSizer(top_sizer)
def should_open_file(self, file_path):
return os.path.isfile(file_path) and self.prev_file_path != file_path
def update_button_label(self):
file_path = self.text_ctrl.GetValue()
if self.should_open_file(file_path):
self.button.SetLabel(_('Open'))
else:
self.button.SetLabel(_('Browse'))
# wx event
def on_text_input(self, event):
self.update_button_label()
# wx event
def on_button_click(self, event):
file_path = self.text_ctrl.GetValue()
if self.should_open_file(file_path):
evt = InputFileAddedEvent(input_file=file_path)
wx.PostEvent(self, evt)
self.prev_file_path = file_path
self.update_button_label()
return
# file_path is invalid. Open a file dialog.
file_dialog = wx.FileDialog(self, _('Select a video file'))
if file_dialog.ShowModal() != wx.ID_OK:
return
file_path = file_dialog.GetPath()
self.text_ctrl.SetValue(file_path)
# Callback from wx.FileDropTarget.OnDropFiles
def on_drop_files(self, x, y, filenames):
if not filenames:
return
self.text_ctrl.SetValue(filenames[0])
class PreviewPanel(wx.Panel):
def SetEventHandlerEnable(self, obj, enable):
orig_state = obj.GetEvtHandlerEnabled()
obj.SetEvtHandlerEnabled(enable)
return orig_state
# IkaLog event
def on_show_preview(self, context):
img = context['engine'].get('preview', context['engine']['frame'])
if img is None:
return False
try:
self.lock.acquire()
self.latest_frame = cv2.resize(img, self.preview_size)
self.refresh_at_next = True
finally:
self.lock.release()
# wx event
def on_input_initialized(self, event):
self.show_header(event.source)
# wx event
def on_ikalog_pause(self, event):
self._pause = event.pause
self.draw_preview()
# wx event
def on_preview_click(self, event):
evt = IkalogPauseEvent(pause=(not self._pause))
wx.PostEvent(self, evt)
# wx event
def on_enter_preview(self, event):
self._enter = True
self.draw_preview()
# wx event
def on_leave_preview(self, event):
self._enter = False
self.draw_preview()
# wx event
def on_input_file_added(self, event):
# Propagate the event to the upper level.
wx.PostEvent(self, event)
source_message = {
'amarec': _('Capture through AmarecTV'),
'dshow_capture': _('HDMI Video input (DirectShow, recommended)'),
'opencv_capture': _('HDMI Video input (OpenCV driver)'),
'screen': _('Realtime Capture from desktop'),
'file': _('Read from pre-recorded video file (for testing)'),
}
def show_header(self, source):
self.video_input_source_text.SetLabel(
PreviewPanel.source_message.get(source, ''))
self.show_input_file((source == 'file'))
def show_input_file(self, show):
self.input_file_panel.Show(show)
self.Layout()
def draw_preview(self):
frame_rgb = None
try:
self.lock.acquire()
if self.latest_frame is None:
if self._prev_bmp:
dc.DrawBitmap(self._prev_bmp, 0, 0)
return False
width, height = self.preview_size
frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB)
finally:
self.lock.release()
if frame_rgb is None:
return False
bmp = wx.BitmapFromBuffer(width, height, frame_rgb)
dc = wx.ClientDC(self.preview_panel)
dc.DrawBitmap(bmp, 0, 0)
self._prev_bmp = bmp
if self._enter:
ox = int(width / 2)
oy = int(height / 2)
if self._pause:
# Draw a triangle representing 'play'.
dc.DrawPolygon([(ox - 20, oy - 30),
(ox - 20, oy + 30),
(ox + 20, oy)])
else:
# Draw two rectangles representing 'pause'.
dc.DrawRectangle(ox - 20, oy - 30, 15, 60)
dc.DrawRectangle(ox + 10, oy - 30, 15, 60)
# wx event
def OnTimer(self, event):
self.lock.acquire()
if self.latest_frame is None:
self.lock.release()
return
self.lock.release()
if not self.refresh_at_next:
return
self.draw_preview()
self.refresh_at_next = False
def __init__(self, *args, **kwargs):
self._prev_bmp = None
self._enter = False
self._pause = False
self.refresh_at_next = False
self.latest_frame = None
self.lock = threading.Lock()
wx.Panel.__init__(self, *args, **kwargs)
self.timer = wx.Timer(self)
self.timer.Start(100)
self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer)
self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED,
self.on_input_initialized)
self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause)
# Preview
self.preview_size = (640, 360)
# Preview image.
self.preview_panel = wx.Panel(self, wx.ID_ANY, size=self.preview_size)
self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click)
self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview)
self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview)
# Video Input
self.video_input_title_text = wx.StaticText(
self, wx.ID_ANY, _('Video Input'))
self.video_input_source_text = wx.StaticText(self, wx.ID_ANY, '')
self.input_file_panel = InputFilePanel(self, wx.ID_ANY)
self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED,
self.on_input_file_added)
self.show_input_file(False)
self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.video_input_source_sizer.Add(
self.video_input_source_text, flag=wx.LEFT, border=10)
self.video_input_source_sizer.Add(self.input_file_panel, proportion=1)
# Sizer to set the width of the text box to 640.
self.video_input_sizer = wx.BoxSizer(wx.VERTICAL)
self.video_input_sizer.Add(self.video_input_title_text)
self.video_input_sizer.Add(self.video_input_source_sizer,
flag=wx.EXPAND | wx.ALL, border=5)
self.video_input_sizer.Add((640, 5))
# Top sizer
self.top_sizer = wx.BoxSizer(wx.VERTICAL)
self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5)
self.top_sizer.Add(self.preview_panel)
self.SetSizer(self.top_sizer)
if __name__ == "__main__":
import sys
import wx
application = wx.App()
frame = wx.Frame(None, wx.ID_ANY, 'Preview', size=(640, 360))
preview = PreviewPanel(frame, size=(640, 360))
layout = wx.BoxSizer(wx.VERTICAL)
layout.Add(preview)
frame.SetSizer(layout)
frame.Show()
application.MainLoop()
|
3d-tracking/tools/visualize_kitti.py | sadjadasghari/3d-vehicle-tracking | 603 | 12794829 | import os
import re
import sys
import argparse
import json
import numpy as np
from glob import glob
import cv2
from utils.plot_utils import RandomColor
def parse_args():
parser = argparse.ArgumentParser(
description='Monocular 3D Tracking Visualizer',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('set', choices=['gta', 'kitti'])
parser.add_argument('split', choices=['train', 'val', 'test'],
help='Which data split to use in testing')
parser.add_argument('--session', default='623',
help='Name of the session, to separate exp')
parser.add_argument('--epoch', default='100',
help='How many epochs you used to separate exp')
parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd',
help='Flags for running evaluation code')
parser.add_argument('--save_vid', action='store_true', default=False,
help='Flags for saving video')
parser.add_argument('--save_txt', action='store_true', default=False,
help='Flags for saving txt')
parser.add_argument('--dry_run', action='store_true', default=False,
help='Show command without running')
parser.add_argument('--overwrite', action='store_true', default=False,
help='Overwrite the output files')
args = parser.parse_args()
return args
print(' '.join(sys.argv))
args = parse_args()
if args.set == 'kitti':
IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'})
re_pattern = re.compile('[0-9]{4}')
else:
IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'})
re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])')
SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format(
**{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split})
out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format(
**{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SETTING': args.flag})
FONT = cv2.FONT_HERSHEY_SIMPLEX
FOURCC = cv2.VideoWriter_fourcc(*'mp4v')
fps = 15
np.random.seed(777)
rm_color = RandomColor(30)
tid2color = {}
def mkdir(path):
if not os.path.isdir(path):
print("Making directory {}".format(path))
os.makedirs(path) # Use with care
def gen_result(out_path, out_name, save_vid=False, save_txt=True,
dry_run=False, overwrite=False):
print("Reading meta data...")
info = json.load(open('{}{}.json'.format(out_path, out_name), 'r'))
if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name))
for seqid in range(len(info)):
file_seq = re_pattern.search(info[seqid]['filename']).group(0)
print('Reading {} from {}{}...'.format(file_seq, out_path, out_name))
if dry_run:
continue
seqout = []
vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq)
txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq)
if not overwrite:
if not os.path.isfile(txt_name) and save_txt:
pass
elif not os.path.isfile(vid_name) and save_vid:
pass
else:
print("SKIP running. Generated file {} Found".format(txt_name))
continue
if save_vid:
images = sorted(glob(IMAGE_PATH.format(file_seq)))
img = cv2.imread(images[0])
vidsize = (img.shape[1], img.shape[0]) # height, width
out = cv2.VideoWriter(vid_name, FOURCC, fps, vidsize)
demoinfo = info[seqid]['frames']
for idx, frame in enumerate(demoinfo):
if save_vid:
img = cv2.imread(images[idx])
img = cv2.putText(img, str(idx), (20, 30),
cv2.FONT_HERSHEY_COMPLEX, 1,
(180, 180, 180), 2)
for trk in frame['hypotheses']:
x1, y1, x2, y2, conf = trk['det_box']
xc, yc = trk['xc'], trk['yc']
if save_vid:
if trk['id'] not in tid2color:
tid2color[trk['id']] = rm_color.get_random_color(scale=255)
img = cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)),
tid2color[trk['id']], 2)
img = cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)),
tid2color[trk['id']], 4)
img = cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)),
cv2.FONT_HERSHEY_COMPLEX, 1,
tid2color[trk['id']], 2)
img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)),
cv2.FONT_HERSHEY_COMPLEX, 0.8,
tid2color[trk['id']], 2)
if save_txt:
'''
submit_txt = ' '.join([
str(idx),
str(int(trk['id'])),
'Car',
'-1 -1',
trk['alpha'],
str(x1), str(y1), str(x2), str(y2),
trk['dim'],
trk['loc'],
trk['rot'],
str(conf)])
'''
submit_txt = ' '.join([
str(idx),
str(int(trk['id'])),
'Car',
'-1 -1 -10',
str(x1), str(y1), str(x2), str(y2),
'-1 -1 -1',
'-1000 -1000 -1000 -10',
str(conf)])
#'''
submit_txt += '\n'
seqout.append(submit_txt)
if save_vid: out.write(img)
if save_txt:
print("{} saved.".format(txt_name))
with open(txt_name, 'w') as f:
f.writelines(seqout)
if save_vid:
print("{} saved.".format(vid_name))
out.release()
if __name__ == '__main__':
# Not using out_name, too slow
output_list = [os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')]
my_list = ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep']
for dir_name in output_list:
print(dir_name)
save_vid = args.save_vid
if save_vid:
is_in = False
for ml in my_list:
is_in = is_in or (ml in dir_name)
save_vid = is_in
gen_result(SAVE_PATH,
dir_name,
save_vid=save_vid,
save_txt=args.save_txt,
dry_run=args.dry_run,
overwrite=args.overwrite
)
|
test/single/test_task_service.py | Infi-zc/horovod | 7,676 | 12794832 | # Copyright 2021 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import io
import re
import unittest
from horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient
from horovod.runner.common.util import secret
class FaultyStream:
"""This stream raises an exception after some text has been written."""
def __init__(self, stream):
self.stream = stream
self.raised = False
def write(self, b):
if not self.raised and len(self.stream.getvalue()) > 1024:
self.raised = True
raise RuntimeError()
self.stream.write(b)
def close(self):
pass
class TaskServiceTest(unittest.TestCase):
cmd = 'for i in {1..10000}; do echo "a very very useful log line #$i"; done'
cmd_single_line = f'{cmd} | wc'
@staticmethod
def cmd_with(stdout, stderr):
return f"bash -c '{stderr} >&2 & {stdout}'"
def test_run_command(self):
key = secret.make_secret_key()
service = BasicTaskService('test service', 0, key, nics=None, verbose=2)
try:
client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1)
client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {})
exit = client.wait_for_command_exit_code()
self.assertEqual(0, exit)
self.assertEqual((True, 0), client.command_result())
finally:
service.shutdown()
def test_stream_command_output(self):
self.do_test_stream_command_output(
self.cmd_with(self.cmd, self.cmd),
capture_stdout=True, capture_stderr=True,
prefix_output_with_timestamp=True
)
def test_stream_command_output_stdout(self):
self.do_test_stream_command_output(
self.cmd_with(self.cmd, self.cmd_single_line),
capture_stdout=True, capture_stderr=False,
prefix_output_with_timestamp=True
)
def test_stream_command_output_stderr(self):
self.do_test_stream_command_output(
self.cmd_with(self.cmd_single_line, self.cmd),
capture_stdout=False, capture_stderr=True,
prefix_output_with_timestamp=True
)
def test_stream_command_output_neither(self):
self.do_test_stream_command_output(
self.cmd_with(self.cmd_single_line, self.cmd_single_line),
capture_stdout=False, capture_stderr=False,
prefix_output_with_timestamp=True
)
def test_stream_command_output_un_prefixed(self):
self.do_test_stream_command_output(
self.cmd_with(self.cmd, self.cmd),
capture_stdout=True, capture_stderr=True,
prefix_output_with_timestamp=False
)
def do_test_stream_command_output(self,
command,
capture_stdout, capture_stderr,
prefix_output_with_timestamp):
stdout = io.StringIO()
stderr = io.StringIO()
key = secret.make_secret_key()
service = BasicTaskService('test service', 0, key, nics=None, verbose=2)
try:
client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1)
stdout_t, stderr_t = client.stream_command_output(stdout, stderr)
client.run_command(command, {},
capture_stdout=capture_stdout, capture_stderr=capture_stderr,
prefix_output_with_timestamp=prefix_output_with_timestamp)
client.wait_for_command_termination(delay=0.2)
self.assertEqual((True, 0), client.command_result())
if stdout_t is not None:
stdout_t.join(1.0)
self.assertEqual(False, stdout_t.is_alive())
if stderr_t is not None:
stderr_t.join(1.0)
self.assertEqual(False, stderr_t.is_alive())
finally:
service.shutdown()
stdout = stdout.getvalue()
stderr = stderr.getvalue()
# remove timestamps from each line in outputs
if prefix_output_with_timestamp:
stdout_no_ts = re.sub('^[^[]+', '', stdout, flags=re.MULTILINE)
stderr_no_ts = re.sub('^[^[]+', '', stderr, flags=re.MULTILINE)
# test we are removing something (hopefully timestamps)
if capture_stdout:
self.assertNotEqual(stdout_no_ts, stdout)
if capture_stderr:
self.assertNotEqual(stderr_no_ts, stderr)
stdout = stdout_no_ts
stderr = stderr_no_ts
# remove prefix
stdout_no_prefix = re.sub('\[0\]<stdout>:', '', stdout, flags=re.MULTILINE)
stderr_no_prefix = re.sub('\[0\]<stderr>:', '', stderr, flags=re.MULTILINE)
# test we are removing something (hopefully prefixes)
if capture_stdout:
self.assertNotEqual(stdout_no_prefix, stdout)
if capture_stderr:
self.assertNotEqual(stderr_no_prefix, stderr)
stdout = stdout_no_prefix
stderr = stderr_no_prefix
if capture_stdout and capture_stderr:
# both streams should be equal
self.assertEqual(stdout, stderr)
# streams should have meaningful number of lines and characters
if capture_stdout:
self.assertTrue(len(stdout) > 1024)
self.assertTrue(len(stdout.splitlines()) > 10)
if capture_stderr:
self.assertTrue(len(stderr) > 1024)
self.assertTrue(len(stderr.splitlines()) > 10)
def test_stream_command_output_reconnect(self):
self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True)
def test_stream_command_output_no_reconnect(self):
self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None)
def do_test_stream_command_output_reconnect(self, attempts, succeeds):
key = secret.make_secret_key()
stdout = io.StringIO()
stderr = io.StringIO()
stdout_s = FaultyStream(stdout)
stderr_s = FaultyStream(stderr)
service = BasicTaskService('test service', 0, key, nics=None, verbose=2)
try:
client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=attempts)
stdout_t, stderr_t = client.stream_command_output(stdout_s, stderr_s)
client.run_command(self.cmd_with(self.cmd, self.cmd), {},
capture_stdout=True, capture_stderr=True,
prefix_output_with_timestamp=False)
client.wait_for_command_termination(delay=0.2)
terminated, exit = client.command_result()
self.assertEqual(True, terminated)
if succeeds is not None:
self.assertEqual(succeeds, exit == 0)
if stdout_t is not None:
stdout_t.join(1.0)
self.assertEqual(False, stdout_t.is_alive())
if stderr_t is not None:
stderr_t.join(1.0)
self.assertEqual(False, stderr_t.is_alive())
finally:
service.shutdown()
stdout = stdout.getvalue()
stderr = stderr.getvalue()
# we are likely to loose some lines, so output is hard to evaluate
if succeeds:
self.assertGreaterEqual(len(stdout), 1024)
self.assertGreater(len(stdout.splitlines()), 10)
self.assertTrue(stdout_s.raised)
self.assertGreaterEqual(len(stderr), 1024)
self.assertGreater(len(stderr.splitlines()), 10)
self.assertTrue(stderr_s.raised)
# assert stdout and stderr similarity (how many lines both have in common)
stdout = re.sub('\[0\]<stdout>:', '', stdout, flags=re.MULTILINE)
stderr = re.sub('\[0\]<stderr>:', '', stderr, flags=re.MULTILINE)
stdout_set = set(stdout.splitlines())
stderr_set = set(stderr.splitlines())
intersect = stdout_set.intersection(stderr_set)
self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)), 0.90)
else:
# we might have retrieved data only for one of stdout and stderr
# so we expect some data for at least one of them
self.assertGreaterEqual(len(stdout) + len(stderr), 1024)
self.assertGreater(len(stdout.splitlines()) + len(stderr.splitlines()), 10)
self.assertTrue(stdout_s.raised or stderr_s.raised)
|
cut_twist_process/cut_part.py | ericosmic/2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement | 886 | 12794837 | # -*- coding: utf-8 -*-
# @Time : 19-11-19 22:25
# @Author : <NAME>
# @Reference : None
# @File : cut_twist_join.py
# @IDE : PyCharm Community Edition
"""
将身份证正反面从原始图片中切分出来。
需要的参数有:
1.图片所在路径。
输出结果为:
切分后的身份证正反面图片。
"""
import os
import cv2
import numpy as np
def point_judge(center, bbox):
"""
用于将矩形框的边界按顺序排列
:param center: 矩形中心的坐标[x, y]
:param bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
:return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上
"""
left = []
right = []
for i in range(4):
if bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边
right.append(bbox[i])
else:
left.append(bbox[i])
if right[0][1] > right[1][1]: # 如果y点坐标大,则是右上
right_down = right[1]
right_up = right[0]
else:
right_down = right[0]
right_up = right[1]
if left[0][1] > left[1][1]: # 如果y点坐标大,则是左上
left_down = left[1]
left_up = left[0]
else:
left_down = left[0]
left_up = left[1]
return left_down, right_down, left_up, right_up
def gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用
"""
将图片灰度化,并滤波
:param img: 输入RGB图片
:param image_name: 输入图片名称,测试时使用
:param save_path: 滤波结果保存路径,测试时使用
:return: 灰度化、滤波后图片
"""
# img = cv2.imread(image_path + image_name) # 读取图片
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片
# cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) # 保存,方便查看
img_blurred = cv2.filter2D(img_gray, -1,
kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行滤波,是锐化操作
img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32))
# cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改
return img_blurred
def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用
"""
求取梯度,二值化
:param img_blurred: 滤波后的图片
:param image_name: 图片名,测试用
:param save_path: 保存路径,测试用
:return: 二值化后的图片
"""
gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0)
gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1)
img_gradient = cv2.subtract(gradX, gradY)
img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代
# 这里改进成自适应阈值,貌似没用
img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3)
# cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel)
img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel)
img_closed = cv2.erode(img_closed, None, iterations=9)
img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀
# 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小
return img_closed
def find_bbox(img, img_closed): # 寻找身份证正反面区域
"""
根据二值化结果判定并裁剪出身份证正反面区域
:param img: 原始RGB图片
:param img_closed: 二值化后的图片
:return: 身份证正反面区域
"""
(contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数
# 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _)
contours = sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序
countours_res = []
for i in range(0, len(contours)):
area = cv2.contourArea(contours[i]) # 计算面积
if (area <= 0.4 * img.shape[0] * img.shape[1]) and (area >= 0.05 * img.shape[0] * img.shape[1]):
# 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的)
rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数
box = cv2.boxPoints(rect)
left_down, right_down, left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box)
src = np.float32([left_down, right_down, left_up, right_up]) # 这里注意必须对应
dst = np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))],
[int(max(rect[1][0], rect[1][1])),
int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定
m = cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵
result = cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))),
flags=cv2.INTER_CUBIC) # 投影变换
countours_res.append(result)
return countours_res # 返回身份证区域
def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线
"""
根据规则,强行将粘连的区域切分
:param img_closed_original: 二值化图片
:return: 处理后的二值化图片
"""
img_closed = img_closed_original.copy()
img_closed = img_closed // 250
#print(img_closed.shape)
width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数
start_region_flag = 0
start_region_index = 0 # 身份证起始点高度值
end_region_index = 0 # 身份证结束点高度值
for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代
if start_region_flag == 0 and width_sum[i] > 330:
start_region_flag = 1
start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点
if width_sum[i] > 330:
end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点
# 身份证区域中白点最少的高度值,认为这是正反面的交点
# argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值
min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0]
img_closed_original[min_line_position][:] = 0
for i in range(1, 11): # 参数可变,分割10个点
temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i]
if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内
img_closed_original[temp_line_position][:] = 0 # 强制变为0
return img_closed_original
def cut_part_img(img, cut_percent):
"""
# 从宽度和高度两个方向,裁剪身份证边缘
:param img: 身份证区域
:param cut_percent: 裁剪的比例
:return: 裁剪后的身份证区域
"""
height, width, _ = img.shape
height_num = int(height * cut_percent) # 需要裁剪的高度值
h_start = 0 + height_num // 2 # 左右等比例切分
h_end = height - height_num // 2 - 1
width_num = int(width * cut_percent) # 需要裁剪的宽度值
w_start = 0 + width_num // 2
w_end = width - width_num // 2 - 1
return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片
def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片
"""
裁剪出一张图片中的身份证正反面区域
:param img_path: 图片所在路径
:param img_name: 图片名称
:param save_path: 结果保存路径 测试用
:param problem_path: 出错图片中间结果保存 测试用
:return: 身份证正反面图片
"""
img_path_name = os.path.join(img_path, img_name)
if not os.path.exists(img_path_name): # 判断图片是否存在
print('img {name} is not exits'.format(name=img_path_name))
return 1, [] # 图片不存在,直接返回,报错加一
img = cv2.imread(img_path_name) # 读取图片
img_blurred = gray_and_fliter(img, img_name) # 灰度化并滤波
img_t = cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32))
# 对图像进行锐化
img_binary = gradient_and_binary(img_blurred) # 二值化
res_bbox = find_bbox(img_t, img_binary) # 切分正反面
if len(res_bbox) != 2: # 异常处理
print('Error happened when cut img {name}, try exception cut program '.format(name=img_path_name))
# cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred)
# cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary)
# cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果
img_binary = find_cut_line(img_binary) # 强制分割正反面
res_bbox = find_bbox(img_t, img_binary)
if len(res_bbox) != 2: # 纠正失败
print('Failed to cut img {name}, exception program end'.format(name=img_path_name))
return 1, None
else: # 纠正成功
print('Correctly cut img {name}, exception program end'.format(name=img_path_name))
return 0, res_bbox
else: # 裁剪过程正常
# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0))
# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0))
# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img)
return 0, res_bbox
def process_img(img_path, save_path, problem_path):
"""
切分一个目录下的所有图片
:param img_path: 图片所在路径
:param save_path: 结果保存路径
:param problem_path: 问题图片保存路径
:return: None
"""
if not os.path.exists(img_path): # 判断图片路径是否存在
print('img path {name} is not exits, program break.'.format(name=img_path))
return
if not os.path.exists(save_path): # 保存路径不存在,则创建路径
os.makedirs(save_path)
if not os.path.exists(problem_path): # 保存路径不存在,则创建路径
os.makedirs(problem_path)
img_names = os.listdir(img_path)
error_count = 0
error_names = []
for img_name in img_names:
error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name, save_path, problem_path)
error_count += error_temp
if error_temp == 0:
cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0))
cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0))
else:
error_names.append(img_name)
print('total error number is: ', error_count)
print('error images mame :')
for error_img_name in error_names:
print(error_img_name)
return
if __name__ == '__main__':
origin_img_path = './problem_imgs/'
cutted_save_path = './res_imgs/'
cut_problem_path = './temp_imgs/'
#process_img(img_path=origin_img_path, save_path=cutted_save_path, problem_path=cut_problem_path)
|
finance_ml/sampling/utils.py | BTETON/finance_ml | 446 | 12794839 | <gh_stars>100-1000
import pandas as pd
def get_ind_matrix(bar_idx, t1):
ind_m = pd.DataFrame(0, index=bar_idx,
columns=range(t1.shape[0]))
for i, (t0_, t1_) in enumerate(t1.iteritems()):
ind_m.loc[t0_:t1_, i] = 1
return ind_m
def get_avg_uniq(ind_m, c=None):
if c is None:
c = ind_m.sum(axis=1)
ind_m = ind_m.loc[c > 0]
c = c.loc[c > 0]
u = ind_m.div(c, axis=0)
avg_u = u[u > 0].mean()
avg_u = avg_u.fillna(0)
return avg_u
|
atlas/foundations_contrib/src/test/helpers/test_lazy_redis.py | DeepLearnI/atlas | 296 | 12794846 | <filename>atlas/foundations_contrib/src/test/helpers/test_lazy_redis.py
import unittest
from mock import Mock
from foundations_contrib.helpers.lazy_redis import LazyRedis
class TestLazyRedis(unittest.TestCase):
class MockObject(object):
def __init__(self):
self.value = 5
self.name = 'mock'
def setUp(self):
pass
def test_get_attr_returns_attribute_value(self):
lazy_redis = LazyRedis(self._callback)
self.assertEqual(lazy_redis.value, 5)
def test_get_attr_returns_attribute_name(self):
lazy_redis = LazyRedis(self._callback)
self.assertEqual(lazy_redis.name, 'mock')
def test_get_attr_raises_attribute_error(self):
lazy_redis = LazyRedis(self._callback)
with self.assertRaises(AttributeError) as context:
lazy_redis.redis
self.assertIn("'MockObject' object has no attribute 'redis'",
context.exception.args)
def test_get_attr_raises_attribute_error_different_attribute(self):
lazy_redis = LazyRedis(self._callback)
with self.assertRaises(AttributeError) as context:
lazy_redis.potato
self.assertIn("'MockObject' object has no attribute 'potato'",
context.exception.args)
def _callback(self):
return self.MockObject()
|
OnlineStudy/rbac/service/routers.py | NanRenTeam-9/MongoMicroCourse | 132 | 12794873 | from collections import OrderedDict
from django.utils.module_loading import import_string
from django.conf import settings
from django.urls.resolvers import URLResolver, URLPattern
import re
def check_url_exclude(url):
for regex in settings.AUTO_DISCOVER_EXCLUDE:
if re.match(regex, url):
return True
def recursive_url(pre_namespace, pre_url, urlpattern, url_order_dict):
"""
递归发现url
:param pre_namespace: 根别名
:param pre_url: url前缀
:param urlpattern: 路由关系表
:param url_order_dict 有序url字典,用于保存递归中获取的所有路由
:return:
"""
for item in urlpattern:
if isinstance(item, URLPattern): # 非路由分发
if not item.name:
continue
if pre_namespace:
name = '%s:%s' % (pre_namespace, item.name)
else:
name = item.name
url = pre_url + item.pattern.regex.pattern
url = url.replace('^', '').replace('$', '') # 去掉正则表达式里的前缀和后缀
if check_url_exclude(url):
continue
url_order_dict[name] = {'name': name, 'url': url}
elif isinstance(item, URLResolver): # 路由分发
if pre_namespace:
if item.namespace:
namespace = '%s:%s' % (pre_namespace, item.namespace)
else:
# namespace = item.namespace # 另一种写法
namespace = pre_namespace
else:
if item.namespace:
namespace = item.namespace
else:
namespace = None
# print(item.pattern.regex.pattern)
recursive_url(namespace, pre_url + item.pattern.regex.pattern, item.url_patterns, url_order_dict)
def get_all_url_dict():
url_order_dict = OrderedDict()
root = import_string(settings.ROOT_URLCONF)
recursive_url(None, '/', root.urlpatterns, url_order_dict)
return url_order_dict
|
baselines/neural_best_buddies/get_missing.py | iviazovetskyi/rewriting | 526 | 12794875 | <reponame>iviazovetskyi/rewriting
import os
from netdissect import pidfile
from options.options import Options
from tqdm import tqdm
opt = Options().parse()
def get_imgs():
img_nums = sorted([int(f.strip().split(f'{base_name}_')[1].split('.')[0]) for f in os.listdir(opt.source)])
file_names = [f'{base_name}_{num}.png' for num in img_nums]
return img_nums, file_names
def get_imgnums(root):
base_name = os.path.basename(root)
img_nums = sorted([int(f.strip().split(f'{base_name}_')[1].split('.')[0]) for f in os.listdir(root)])
file_names = [f'{base_name}_{num}.png' for num in img_nums]
return list(zip(img_nums, file_names))[:10000]
def check_missing(src_root, corr_root):
dne = []
for imgnum, file_path in tqdm(get_imgnums(src_root)):
if not os.path.exists(os.path.join(corr_root, str(imgnum), 'BtoA.npy')):
dne.append(imgnum)
return dne
missing = check_missing(opt.source, opt.results_dir)
base_name = os.path.basename(opt.source)
def main():
import numpy as np
from models import vgg19_model
from algorithms import neural_best_buddies as NBBs
from util import util
from util import MLS
vgg19 = vgg19_model.define_Vgg19(opt)
img_nums, images = get_imgs()
for imgnum in tqdm(missing):
print(imgnum)
save_dir = os.path.join(opt.results_dir, str(imgnum))
if os.path.exists(os.path.join(save_dir, 'BtoA.npy')):
continue
try:
print('Working on', imgnum)
source_path = os.path.join(opt.source, f'{base_name}_{imgnum}.png')
A = util.read_image(source_path, opt.imageSize)
B = util.read_image(opt.target, opt.imageSize)
print(A.shape, B.shape)
nbbs = NBBs.sparse_semantic_correspondence(vgg19, opt.gpu_ids, opt.tau,
opt.border_size, save_dir,
opt.k_per_level, opt.k_final,
opt.fast)
points = nbbs.run(A, B)
mls = MLS.MLS(v_class=np.int32)
mls.run_MLS_in_folder(root_folder=save_dir)
except Exception as e:
print(e)
with open(os.path.join(save_dir, 'no_correspondence.txt'), 'w') as f:
f.write('')
if __name__ == "__main__":
main()
|
scripts/deepimpact/brute-force.py | d1shs0ap/pyserini | 451 | 12794942 | import argparse
import json
import os
from scipy.sparse import csr_matrix
from tqdm import tqdm
import numpy as np
from multiprocessing import Pool, Manager
def token_dict_to_sparse_vector(token_dict, token2id):
matrix_row, matrix_col, matrix_data = [], [], []
tokens = token_dict.keys()
col = []
data = []
for tok in tokens:
if tok in token2id:
col.append(token2id[tok])
data.append(token_dict[tok])
matrix_row.extend([0] * len(col))
matrix_col.extend(col)
matrix_data.extend(data)
vector = csr_matrix((matrix_data, (matrix_row, matrix_col)), shape=(1, len(token2id)))
return vector
parser = argparse.ArgumentParser()
parser.add_argument('--corpus', type=str, help='path to corpus with vectors', required=True)
parser.add_argument('--topics', type=str, help='path to topics with vectors', required=True)
parser.add_argument('--tokens', type=str, help='path to token list', required=True)
parser.add_argument('--run', type=str, help='path to run file', required=True)
parser.add_argument('--threads', type=int, help='threads for hnsw', required=False, default=12)
args = parser.parse_args()
token2id = {}
with open(args.tokens) as tok_f:
for idx, line in enumerate(tok_f):
tok = line.rstrip()
token2id[tok] = idx
corpus = []
for file in sorted(os.listdir(args.corpus)):
file = os.path.join(args.corpus, file)
if file.endswith('json') or file.endswith('jsonl'):
print(f'Loading {file}')
with open(file, 'r') as f:
for idx, line in enumerate(tqdm(f.readlines())):
info = json.loads(line)
corpus.append(info)
ids = []
vectors = []
matrix_row, matrix_col, matrix_data = [], [], []
for i, d in enumerate(tqdm(corpus)):
weight_dict = d['vector']
tokens = weight_dict.keys()
col = [token2id[tok] for tok in tokens]
data = weight_dict.values()
matrix_row.extend([i] * len(weight_dict))
matrix_col.extend(col)
matrix_data.extend(data)
ids.append(d['id'])
vectors = csr_matrix((matrix_data, (matrix_row, matrix_col)), shape=(len(corpus), len(token2id)))
topic_ids = []
topic_vectors = []
with open(args.topics) as topic_f:
for line in topic_f:
info = json.loads(line)
topic_ids.append(info['id'])
topic_vectors.append(token_dict_to_sparse_vector(info['vector'], token2id))
vectors_T = vectors.T
manager = Manager()
results = manager.dict()
def run_search(idx):
global results
qid = topic_ids[idx]
t_vec = topic_vectors[idx]
scores = np.array(t_vec.dot(vectors_T).todense())[0]
top_idx = sorted(range(len(scores)), key=lambda x: scores[x], reverse=True)[:1000]
result = [(ids[x], scores[x]) for x in top_idx]
results[qid] = result
with Pool(args.threads) as p:
for _ in tqdm(p.imap_unordered(run_search, list(range(len(topic_ids)))), total=len(topic_ids)):
pass
with open(args.run, 'w') as f:
for qid in results:
for idx, item in enumerate(results[qid]):
did = item[0]
score = item[1]
f.write(f'{qid} Q0 {did} {idx+1} {score} bf\n')
|
notebooks/snippets/nbody/create_n.py | IsabelAverill/Scipy-2017---Numba | 149 | 12794944 | @njit
def create_n_random_particles(n, m, domain=1):
'''
Creates `n` particles with mass `m` with random coordinates
between 0 and `domain`
'''
parts = numpy.zeros((n), dtype=particle_dtype)
#attribute access only in @jitted function
for p in parts:
p.x = numpy.random.random() * domain
p.y = numpy.random.random() * domain
p.z = numpy.random.random() * domain
p.m = m
p.phi = 0
return parts
|
A1014280203/6/6.py | saurabh896/python-1 | 3,976 | 12794947 | <filename>A1014280203/6/6.py
import nltk
import string
import os
# simply extend word like: it's => it is
def extend_word(text):
if text.find('\'') > 0:
old2new = dict()
words = text.split()
for word in words:
if word.find('\'') > 0:
parts = word.split('\'')
if parts[1] == 'm':
parts[1] = 'am'
elif parts[1] == 's':
parts[1] = 'is'
elif parts[1] == 're':
parts[1] = 'are'
elif parts[1] == 't':
parts[1] = 'not'
elif parts[1] == 've':
parts[1] = 'have'
elif parts[1] == 'll':
parts[1] = 'will'
elif parts[1] == 'd':
if words[words.index(word) + 1] == 'better':
parts[1] = 'had'
else:
parts[1] = 'would'
if parts[0].endswith('n'):
parts[0] = parts[0][:-1]
old2new[word] = ' '.join(parts)
_text = text
for old_word in old2new.keys():
_text = _text.replace(old_word, old2new[old_word])
return _text
def return_order_key(record):
return record[1]
def show_important_word(records):
# only this function was changed
items = sorted(records.items(), key=return_order_key, reverse=True)
# frequency of word
freq = 0
for item in items:
word, tag = nltk.pos_tag([item[0]])[0]
if tag.startswith('NN'):
print(word)
if item[1] < freq:
return
freq = item[1]
# no appropriate word found
if not freq:
print(items[0][0])
def process_file(filename):
with open(filename, 'r') as file:
article = file.read()
no_pun_text = article
_punctuation = string.punctuation.replace('\'', '')
# delete punctuation except '''
for pun in _punctuation:
no_pun_text = no_pun_text.replace(pun, '')
complete_text = extend_word(no_pun_text)
records = dict()
for word in complete_text.lower().split():
records[word] = records.get(word, 0) + 1
print('='*30)
print('current file:', filename)
print('-'*20)
show_important_word(records)
def process_files(path='.'):
files = os.listdir(path)
for file in files:
if file.endswith('.txt'):
process_file(os.path.join(path, file))
process_files() |
tests/test_logic/test_tree/test_functions.py | cdhiraj40/wemake-python-styleguide | 1,931 | 12794962 | import pytest
from wemake_python_styleguide.logic.tree import functions
@pytest.mark.parametrize(('function_call', 'function_name'), [
# Simple builtin functions
('print("Hello world!")', 'print'),
('int("10")', 'int'),
('bool(1)', 'bool'),
('open("/tmp/file.txt", "r")', 'open'),
('str(10)', 'str'),
# Functions in modules
('datetime.timedelta(days=1)', 'datetime.timedelta'),
('cmath.sqrt(100)', 'cmath.sqrt'),
# Functions in (made up) objects
('dt.strftime("%H:%M")', 'dt.strftime'),
('obj.funct()', 'obj.funct'),
])
def test_given_function_called_no_split(
parse_ast_tree, function_call: str, function_name: str,
) -> None:
"""Test given_function_called without splitting the modules."""
tree = parse_ast_tree(function_call)
node = tree.body[0].value
called_function = functions.given_function_called(node, [function_name])
assert called_function == function_name
@pytest.mark.parametrize(('function_call', 'function_name'), [
# Simple builtin functions
('print("Hello world!")', 'print'),
('int("10")', 'int'),
('bool(1)', 'bool'),
('open("/tmp/file.txt", "r")', 'open'),
('str(10)', 'str'),
# Functions in modules
('datetime.timedelta(days=1)', 'timedelta'),
('cmath.sqrt(100)', 'sqrt'),
# Functions in (made up) objects
('dt.strftime("%H:%M")', 'strftime'),
('obj.funct()', 'funct'),
])
def test_given_function_called_with_split(
parse_ast_tree, function_call: str, function_name: str,
) -> None:
"""Test given_function_called splitting the modules."""
tree = parse_ast_tree(function_call)
node = tree.body[0].value
called_function = functions.given_function_called(
node,
[function_name],
split_modules=True,
)
assert called_function == function_name
|
microsoft_problems/problem_9.py | loftwah/Daily-Coding-Problem | 129 | 12794964 | """This problem was asked Microsoft.
Using a read7() method that returns 7 characters from a file, implement readN(n) which reads n characters.
For example, given a file with the content “Hello world”, three read7() returns “Hello w”, “orld” and then “”.
""" |
weld-python/weld/grizzly/core/indexes/base.py | tustvold/weld | 2,912 | 12794966 | <gh_stars>1000+
from abc import ABC
class Index(ABC):
"""
Base class for an index in Grizzly.
"""
pass
|
tests/pyconverter-test/cases/array_generics2.py | jaydeetay/pxt | 977 | 12794984 | <gh_stars>100-1000
obstacles: List[List[number]] = []
obstacles.removeAt(0).removeAt(0) |
tests/test_model_field_list.py | havron/wtforms-alchemy | 161 | 12795024 | <gh_stars>100-1000
import sqlalchemy as sa
from wtforms.fields import FormField
from wtforms_components import PassiveHiddenField
from tests import FormRelationsTestCase, MultiDict
from wtforms_alchemy import ModelFieldList, ModelForm
class ModelFieldListTestCase(FormRelationsTestCase):
def create_models(self):
class Event(self.base):
__tablename__ = 'event'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255), nullable=False)
class Location(self.base):
__tablename__ = 'location'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255), nullable=True)
event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id))
event = sa.orm.relationship(Event, backref='locations')
self.Event = Event
self.Location = Location
def save(self, event=None, data=None):
if not data:
data = {
'name': u'Some event',
'locations-0-name': u'Some location',
'locations-0-description': u'Some description'
}
if not event:
event = self.Event()
self.session.add(event)
form = self.EventForm(MultiDict(data))
else:
form = self.EventForm(MultiDict(data), obj=event)
form.validate()
form.populate_obj(event)
self.session.commit()
return event
class TestReplaceStrategy(ModelFieldListTestCase):
def create_forms(self):
class LocationForm(ModelForm):
class Meta:
model = self.Location
class EventForm(ModelForm):
class Meta:
model = self.Event
locations = ModelFieldList(FormField(LocationForm))
self.LocationForm = LocationForm
self.EventForm = EventForm
def test_assigment_and_deletion(self):
self.save()
event = self.session.query(self.Event).first()
assert event.locations[0].name == u'Some location'
data = {
'name': u'Some event'
}
form = self.EventForm(MultiDict(data))
form.validate()
form.populate_obj(event)
self.session.commit()
event = self.session.query(self.Event).first()
assert event.locations == []
class TestUpdateStrategy(ModelFieldListTestCase):
def create_models(self):
class Event(self.base):
__tablename__ = 'event'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255), nullable=False)
class Location(self.base):
__tablename__ = 'location'
TYPES = (u'', u'football field', u'restaurant')
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255), nullable=True)
description = sa.Column(sa.Unicode(255), default=u'')
type = sa.Column(
sa.Unicode(255),
info={'choices': zip(TYPES, TYPES)},
default=u''
)
event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id))
event = sa.orm.relationship(Event, backref='locations')
def __repr__(self):
return 'Location(id=%r, name=%r)' % (self.id, self.name)
self.Event = Event
self.Location = Location
def create_forms(self):
class LocationForm(ModelForm):
class Meta:
model = self.Location
only = ['name', 'description', 'type']
id = PassiveHiddenField()
class EventForm(ModelForm):
class Meta:
model = self.Event
locations = ModelFieldList(
FormField(LocationForm),
population_strategy='update'
)
self.LocationForm = LocationForm
self.EventForm = EventForm
def test_with_none_as_formdata_for_existing_objects(self):
event = self.save()
form = self.EventForm(MultiDict(), obj=event)
assert form.locations[0].data['id']
def test_single_entry_update(self):
event = self.save()
location_id = event.locations[0].id
data = {
'name': u'Some event',
'locations-0-id': location_id,
'locations-0-name': u'Some other location'
}
self.save(event, data)
assert len(event.locations) == 1
assert event.locations[0].id == location_id
assert event.locations[0].name == u'Some other location'
def test_creates_new_objects_for_entries_with_unknown_identifiers(self):
event = self.save()
location_id = event.locations[0].id
data = {
'name': u'Some event',
'locations-0-id': 12,
'locations-0-name': u'Some other location'
}
self.save(event, data)
assert event.locations
assert event.locations[0].id != location_id
def test_replace_entry(self):
data = {
'name': u'Some event',
'locations-0-name': u'Some location',
'locations-0-description': u'Some description',
'locations-0-type': u'restaurant'
}
event = self.save(data=data)
location_id = event.locations[0].id
self.session.commit()
data = {
'name': u'Some event',
'locations-0-name': u'Some other location',
}
self.save(event, data)
location = event.locations[0]
assert location.id != location_id
assert location.name == u'Some other location'
assert location.description == u''
assert location.type == u''
assert len(event.locations) == 1
def test_replace_and_update(self):
data = {
'name': u'Some event',
'locations-0-name': u'Location 1',
'locations-0-description': u'Location 1 description',
'locations-1-name': u'Location 2',
'locations-1-description': u'Location 2 description',
}
event = self.save(data=data)
self.session.commit()
data = {
'name': u'Some event',
'locations-0-id': event.locations[1].id,
'locations-0-name': u'Location 2 updated',
'locations-0-description': u'Location 2 description updated',
'locations-1-name': u'Location 3',
}
self.save(event, data)
self.session.commit()
location = event.locations[0]
location2 = event.locations[1]
assert location.name == u'Location 2 updated'
assert location.description == u'Location 2 description updated'
assert len(event.locations) == 2
assert location2.name == u'Location 3'
assert location2.description == u''
def test_multiple_entries(self):
event = self.save()
location_id = event.locations[0].id
data = {
'name': u'Some event',
'locations-0-name': u'Some location',
'locations-1-id': str(location_id), # test coercing works
'locations-1-name': u'Some other location',
'locations-2-name': u'Third location',
'locations-3-id': 123,
'locations-3-name': u'Fourth location'
}
self.save(event, data)
assert len(event.locations) == 4
assert event.locations[0].id == location_id
assert event.locations[0].name == u'Some other location'
assert event.locations[1].name == u'Some location'
assert event.locations[2].name == u'Third location'
assert event.locations[3].name == u'Fourth location'
def test_delete_all_field_list_entries(self):
event = self.save()
data = {
'name': u'Some event'
}
self.save(event, data)
assert not event.locations
def test_update_and_remove(self):
location = self.Location(
name=u'Location #2'
)
event = self.Event(
name=u'Some event',
locations=[
self.Location(
name=u'Location #1'
),
location
]
)
self.session.add(event)
self.session.commit()
data = {
'locations-0-id': location.id,
'locations-0-name': u'Location',
}
self.save(event, data)
self.session.refresh(event)
assert len(event.locations) == 1
assert event.locations[0] == location
|
alipay/aop/api/domain/SsdataDataserviceDatapropertyBatchqueryModel.py | antopen/alipay-sdk-python-all | 213 | 12795081 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SsdataDataserviceDatapropertyBatchqueryModel(object):
def __init__(self):
self._action = None
self._action_param = None
self._base = None
self._data_channel = None
self._visit_ac = None
self._visit_biz_line = None
self._visit_bu = None
@property
def action(self):
return self._action
@action.setter
def action(self, value):
self._action = value
@property
def action_param(self):
return self._action_param
@action_param.setter
def action_param(self, value):
self._action_param = value
@property
def base(self):
return self._base
@base.setter
def base(self, value):
self._base = value
@property
def data_channel(self):
return self._data_channel
@data_channel.setter
def data_channel(self, value):
self._data_channel = value
@property
def visit_ac(self):
return self._visit_ac
@visit_ac.setter
def visit_ac(self, value):
self._visit_ac = value
@property
def visit_biz_line(self):
return self._visit_biz_line
@visit_biz_line.setter
def visit_biz_line(self, value):
self._visit_biz_line = value
@property
def visit_bu(self):
return self._visit_bu
@visit_bu.setter
def visit_bu(self, value):
self._visit_bu = value
def to_alipay_dict(self):
params = dict()
if self.action:
if hasattr(self.action, 'to_alipay_dict'):
params['action'] = self.action.to_alipay_dict()
else:
params['action'] = self.action
if self.action_param:
if hasattr(self.action_param, 'to_alipay_dict'):
params['action_param'] = self.action_param.to_alipay_dict()
else:
params['action_param'] = self.action_param
if self.base:
if hasattr(self.base, 'to_alipay_dict'):
params['base'] = self.base.to_alipay_dict()
else:
params['base'] = self.base
if self.data_channel:
if hasattr(self.data_channel, 'to_alipay_dict'):
params['data_channel'] = self.data_channel.to_alipay_dict()
else:
params['data_channel'] = self.data_channel
if self.visit_ac:
if hasattr(self.visit_ac, 'to_alipay_dict'):
params['visit_ac'] = self.visit_ac.to_alipay_dict()
else:
params['visit_ac'] = self.visit_ac
if self.visit_biz_line:
if hasattr(self.visit_biz_line, 'to_alipay_dict'):
params['visit_biz_line'] = self.visit_biz_line.to_alipay_dict()
else:
params['visit_biz_line'] = self.visit_biz_line
if self.visit_bu:
if hasattr(self.visit_bu, 'to_alipay_dict'):
params['visit_bu'] = self.visit_bu.to_alipay_dict()
else:
params['visit_bu'] = self.visit_bu
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SsdataDataserviceDatapropertyBatchqueryModel()
if 'action' in d:
o.action = d['action']
if 'action_param' in d:
o.action_param = d['action_param']
if 'base' in d:
o.base = d['base']
if 'data_channel' in d:
o.data_channel = d['data_channel']
if 'visit_ac' in d:
o.visit_ac = d['visit_ac']
if 'visit_biz_line' in d:
o.visit_biz_line = d['visit_biz_line']
if 'visit_bu' in d:
o.visit_bu = d['visit_bu']
return o
|
ajax_datatable/templatetags/ajax_datatable_tags.py | ivi3/django-ajax-datatable | 101 | 12795100 | <gh_stars>100-1000
from django import template
register = template.Library()
################################################################################
# Support for generic editing in the front-end
@register.filter
def model_verbose_name(model):
"""
Sample usage:
{{model|model_name}}
"""
return model._meta.verbose_name
@register.filter
def model_verbose_name_plural(model):
"""
Sample usage:
{{model|model_name}}
"""
return model._meta.verbose_name_plural
@register.filter
def model_name(model):
"""
Sample usage:
{{model|model_name}}
"""
return model._meta.model_name
@register.filter
def app_label(model):
"""
Sample usage:
{{model|app_label}}
"""
return model._meta.app_label
@register.simple_tag(takes_context=True)
def testhasperm(context, model, action):
"""
Returns True iif the user have the specified permission over the model.
For 'model', we accept either a Model class, or a string formatted as "app_label.model_name".
Sample usage:
{% testhasperm model 'view' as can_view_objects %}
{% if not can_view_objects %}
<h2>Sorry, you have no permission to view these objects</h2>
{% endif %}
"""
user = context['request'].user
if isinstance(model, str):
app_label, model_name = model.split('.')
else:
app_label = model._meta.app_label
model_name = model._meta.model_name
required_permission = '%s.%s_%s' % (app_label, action, model_name)
return user.is_authenticated and user.has_perm(required_permission)
@register.tag
def ifhasperm(parser, token):
"""
Check user permission over specified model.
(You can specify either a model or an object).
Sample usage:
{% ifhasperm model 'add' %}
<div style="color: #090">User can add objects</div>
{% else %}
<div style="color: #900">User cannot add objects</div>
{% endifhasperm %}
"""
# Separating the tag name from the parameters
try:
tag, model, action = token.contents.split()
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
"'%s' tag takes three parameters" % tag)
default_states = ['ifhasperm', 'else']
end_tag = 'endifhasperm'
# Place to store the states and their values
states = {}
# Let's iterate over our context and find our tokens
while token.contents != end_tag:
current = token.contents
states[current.split()[0]] = parser.parse(default_states + [end_tag])
token = parser.next_token()
model_var = parser.compile_filter(model)
action_var = parser.compile_filter(action)
return CheckPermNode(states, model_var, action_var)
class CheckPermNode(template.Node):
def __init__(self, states, model_var, action_var):
self.states = states
self.model_var = model_var
self.action_var = action_var
def render(self, context):
# Resolving variables passed by the user
model = self.model_var.resolve(context)
action = self.action_var.resolve(context)
# Check user permission
if testhasperm(context, model, action):
html = self.states['ifhasperm'].render(context)
else:
html = self.states['else'].render(context) if 'else' in self.states else ''
return html
|
Machine Learning/TensorflowExamples/simple_gradient_descent.py | sarojjethva/Learning-Resources | 639 | 12795104 | """
Author: <NAME>
Github: github.com/yashbmewada
Program for demonstrating simple line fitting using Tensorflow and Gradient Descent Algorithm
This program trains the model to fit two values, slope(m) and x-intercept(b) in the equation
of line y=mx+b. Here we would provide very small dataset of randomly generated pointset xs and ys
and train the tensorflow model to adjust the values of m and b in order to fit a straight line.
This straight line can further be used to predict any unknown value Y for a given unknown X based on the
learned value of m and b.
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # called in order to minimize the warnings about SSE4.1 instructions.
import tensorflow as tf
"""
Random points of X and Y form the training data. aka Dataset (only training. no validation or test)
"""
xs = [0.00,2.00,4.00,6.00,8.00,10.00,12.00,14.00] #features
ys = [-0.82,-0.90,-0.12,0.26,0.31,0.64,1.02,1.00] #labels (actual outputs)
"""
Initial values for m and b. These values would be adjusted to fit the above dataset point
"""
m_initial = -0.50
b_initial = 1.00
"""
tf.Variable : allows us to create variables whose values can be adjusted in order to learn at each pass on the dataset.
"""
m = tf.Variable(m_initial)
b = tf.Variable(b_initial)
"""
In order to adjust and fit the line, we try to minimize the "error" between two given values of (x,y) so that the
line can be fit properly as we minimize the value of distances between our m and b i.e. predicted_y and actual y
(from "ys").
"""
error = 0.0
"""
We write an operation for calculation of error and also iteration over the value of X and Y from the Dataset [xs,ys].
Running this over around 1000 times we would be able to minimize the error to a respecable fit for the line.
"""
for x,y in zip(xs,ys):
predicted_y = m*x + b
error += (y-predicted_y)**2 # this is the square of difference of error added to the total error 'cost' which we minimize.
"""
Now, in order to train over this operation set we defined above, we use tensorflow Gradient Descent Optimizer which allows
us to train over this data set and we pass the "error" to the minimize() function of this optimizer as a parameter.abs
here while initialization of the Gradient Descent optimizer, we define a learning_rate = 0.001.
This learning rate defines the magnitude OR "how big" of a jump we want to make while minimizing the "cost" / "error".abs
Remember Too Small a learning rate would make your training very slow and Too big learning rate would make the training never find
an optimum solution. Best Learning Rate can be found by trying different values. Here we take 0.001 randomly as it usually works in
most cases.
"""
optimizer_op = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(error)
"""
Tensorflow uses a "session" to run the above mentioned training steps.
So before starting the session it is always advisable to initialize variables randomly.
"""
init_op = tf.global_variables_initializer()
"""
All the calculations would now be done in a Session
"""
with tf.Session() as session:
session.run(init_op)
_ITERATIONS = 1000 #number of passes on the dataset
for iteration in range(_ITERATIONS):
session.run(optimizer_op) #calling our optimization operator to minimize error
slope, intercept = session.run((m,b)) #calling our adjusted values
print('slope: ', slope , 'Intercept: ', intercept) |
admin/client.py | stackriot/flocker | 2,690 | 12795128 | # Copyright 2015 ClusterHQ Inc. See LICENSE file for details.
"""
Run the client installation tests.
"""
import os
import shutil
import sys
import tempfile
from characteristic import attributes
import docker
from effect import TypeDispatcher, sync_performer, perform
from twisted.python.usage import Options, UsageError
from flocker.provision import PackageSource
from flocker.provision._effect import Sequence, perform_sequence
from flocker.provision._install import (
ensure_minimal_setup,
task_cli_pkg_install,
task_cli_pip_prereqs,
task_cli_pip_install,
cli_pip_test,
)
from flocker.provision._ssh import (
Run, Sudo, Put, Comment, perform_sudo, perform_put)
@attributes(['image', 'package_manager'])
class DockerImage(object):
"""Holder for Docker image information."""
DOCKER_IMAGES = {
'centos-7': DockerImage(image='centos:7', package_manager='yum'),
'debian-8': DockerImage(image='debian:8', package_manager='apt'),
'fedora-22': DockerImage(image='fedora:22', package_manager='dnf'),
'ubuntu-14.04': DockerImage(image='ubuntu:14.04', package_manager='apt'),
'ubuntu-16.04': DockerImage(image='ubuntu:16.04', package_manager='apt'),
}
# No distribution is officially supported using pip, but the code can
# test the pip instructions using any of the images.
PIP_DISTRIBUTIONS = DOCKER_IMAGES.keys()
# Some distributions have packages created for them.
# Although CentOS 7 is not a supported client distribution, the client
# packages get built, and can be tested.
PACKAGED_CLIENT_DISTRIBUTIONS = (
'centos-7',
'ubuntu-14.04',
'ubuntu-16.04',
)
class ScriptBuilder(TypeDispatcher):
"""
Convert an Effect sequence to a shell script.
The effects are those defined in flocker.provision._effect and
flocker.provision._ssh._model.
"""
def __init__(self, effects):
self.lines = [
'#!/bin/bash',
'set -ex'
]
TypeDispatcher.__init__(self, {
Run: self.perform_run,
Sudo: perform_sudo,
Put: perform_put,
Comment: self.perform_comment,
Sequence: perform_sequence
})
perform(self, effects)
# Add blank line to terminate script with a newline
self.lines.append('')
self._script = '\n'.join(self.lines)
@sync_performer
def perform_run(self, dispatcher, intent):
"""
For Run effects, add the command line.
"""
self.lines.append(intent.command)
@sync_performer
def perform_comment(self, dispatcher, intent):
"""
For Comment effects, prefix the comment with #
"""
self.lines.append('# ' + intent.comment)
def script(self):
"""
Return the generated shell script.
"""
return self._script
def make_script_file(directory, effects):
"""
Create a shell script file from a sequence of effects.
:param bytes directory: The directory in which to create the script.
:param Effect effects: An effect which contains the commands,
typically a Sequence containing multiple commands.
:return: The base filename of the script.
"""
builder = ScriptBuilder(effects)
fd, filename = tempfile.mkstemp(dir=directory, text=True)
os.write(fd, builder.script())
os.close(fd)
os.chmod(filename, 0555)
return os.path.basename(filename)
class DockerContainer:
"""
Run commands in a Docker container.
"""
def __init__(self, image):
# Getting Docker to work correctly on any client platform can
# be tricky. See
# http://doc-dev.clusterhq.com/gettinginvolved/client-testing.html
# for details.
params = docker.utils.kwargs_from_env(assert_hostname=False)
self.docker = docker.Client(version='1.16', **params)
self.image = image
@classmethod
def from_distribution(cls, distribution):
"""
Create a DockerContainer with a given distribution name.
"""
return cls(DOCKER_IMAGES[distribution].image)
def start(self):
"""
Start the Docker container.
"""
# On OS X, shared volumes must be in /Users, so use the home directory.
# See 'Mount a host directory as a data volume' at
# https://docs.docker.com/userguide/dockervolumes/
self.tmpdir = tempfile.mkdtemp(dir=os.path.expanduser('~'))
try:
self.docker.pull(self.image)
container = self.docker.create_container(
image=self.image, command='/bin/bash', tty=True,
volumes=['/mnt/script'],
)
self.container_id = container[u'Id']
self.docker.start(
self.container_id,
binds={
self.tmpdir: {'bind': '/mnt/script', 'ro': True},
}
)
except:
os.rmdir(self.tmpdir)
raise
def stop(self):
"""
Stop the Docker container.
"""
self.docker.stop(self.container_id)
self.docker.remove_container(self.container_id)
shutil.rmtree(self.tmpdir)
def execute(self, commands, out=sys.stdout):
"""
Execute a set of commands in the Docker container.
The set of commands provided to one call of ``execute`` will be
executed in a single session. This means commands will see the
environment created by previous commands.
The output of the commands is sent to the ``out`` file object,
which must have a ``write`` method.
:param Effect commands: An Effect containing the commands to run,
probably a Sequence of Effects, one for each command to run.
:param out: Where to send command output. Any object with a
``write`` method.
:return int: The exit status of the commands. If all commands
succeed, this will be zero. If any command fails, this will
be non-zero.
"""
script_file = make_script_file(self.tmpdir, commands)
script = '/mnt/script/{}'.format(script_file)
session = self.docker.exec_create(self.container_id, script)
session_id = session[u'Id']
for output in self.docker.exec_start(session, stream=True):
out.write(output)
return self.docker.exec_inspect(session_id)[u'ExitCode']
class RunOptions(Options):
description = "Run the client tests."
optParameters = [
['distribution', None, None,
'The target distribution. '
'One of {}. With --pip, one of {}'.format(
', '.join(PACKAGED_CLIENT_DISTRIBUTIONS),
', '.join(PIP_DISTRIBUTIONS))],
['branch', None, None, 'Branch to grab packages from'],
['flocker-version', None, None, 'Flocker version to install'],
['build-server', None, 'http://build.clusterhq.com/',
'Base URL of build server for package downloads'],
]
optFlags = [
['pip', None, 'Install using pip rather than packages.'],
]
synopsis = ('Usage: run-client-tests --distribution <distribution> '
'[--branch <branch>] [--flocker-version <version>] '
'[--build-server <url>] [--pip]')
def __init__(self, top_level):
"""
:param FilePath top_level: The top-level of the flocker repository.
"""
Options.__init__(self)
self.top_level = top_level
def postOptions(self):
if self['distribution'] is None:
raise UsageError("Distribution required.")
self['package_source'] = PackageSource(
version=self['flocker-version'],
branch=self['branch'],
build_server=self['build-server'],
)
def get_steps_pip(distribution, package_source=PackageSource()):
"""
Get commands to run for testing client pip installation.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: An ``Effect`` to pass to a ``Dispatcher`` that supports
``Sequence``, ``Run``, ``Sudo``, ``Comment``, and ``Put``.
"""
if distribution not in PIP_DISTRIBUTIONS:
raise UsageError(
"Distribution %r not supported. Available distributions: %s"
% (distribution, ', '.join(PIP_DISTRIBUTIONS)))
package_manager = DOCKER_IMAGES[distribution].package_manager
virtualenv = 'flocker-client'
steps = [
ensure_minimal_setup(package_manager),
task_cli_pip_prereqs(package_manager),
task_cli_pip_install(virtualenv, package_source),
cli_pip_test(virtualenv, package_source),
]
return steps
def get_steps_pkg(distribution, package_source=PackageSource()):
"""
Get commands to run for testing client package installation.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: An ``Effect`` to pass to a ``Dispatcher`` that supports
``Sequence``, ``Run``, ``Sudo``, ``Comment``, and ``Put``.
"""
if distribution not in PACKAGED_CLIENT_DISTRIBUTIONS:
raise UsageError(
"Distribution %r not supported. Available distributions: %s"
% (distribution, ', '.join(PACKAGED_CLIENT_DISTRIBUTIONS)))
package_manager = DOCKER_IMAGES[distribution].package_manager
steps = [
ensure_minimal_setup(package_manager),
task_cli_pkg_install(distribution, package_source),
]
return steps
def run_steps(container, steps, out=sys.stdout):
"""
Run a sequence of commands in a container.
:param DockerContainer container: Container in which to run the test.
:param Effect steps: Steps to to run the test.
:param file out: Stream to write output.
:return int: Exit status of steps.
"""
container.start()
try:
for commands in steps:
status = container.execute(commands, out)
if status != 0:
return status
finally:
container.stop()
return 0
def main(args, base_path, top_level):
"""
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the Flocker repository.
"""
options = RunOptions(top_level=top_level)
try:
options.parseOptions(args)
except UsageError as e:
sys.exit("%s: %s\n" % (base_path.basename(), e))
distribution = options['distribution']
package_source = options['package_source']
if options['pip']:
get_steps = get_steps_pip
else:
get_steps = get_steps_pkg
steps = get_steps(distribution, package_source)
container = DockerContainer.from_distribution(distribution)
status = run_steps(container, steps)
sys.exit(status)
|
Notebooks/Visualization/DataReader.py | keuntaeklee/pytorch-PPUU | 159 | 12795171 | """A class with static methods which can be used to access the data about
experiments.
This includes reading logs to parse success cases, reading images, costs
and speed.
"""
import numpy as np
from glob import glob
import torch
import pandas
import re
import json
from functools import lru_cache
import imageio
EPISODES = 561
class DataReader:
"""Container class for the static data access methods"""
EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json'
@staticmethod
@lru_cache(maxsize=1)
def get_experiments_mapping():
"""Reads the experiments mapping from a json file
EXPERIMENTS_MAPPING_FILE
"""
with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f:
x = json.load(f)
return x
@staticmethod
def get_images(experiment, seed, checkpoint, episode):
"""Get simulator images for a given model evaluation on a
given episode"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png'
images = []
for image_path in sorted(glob(image_paths)):
with open(image_path, 'rb') as f:
images.append(f.read())
return images
@staticmethod
def get_gradients(experiment, seed, checkpoint, episode):
"""Get gradients for a given model evaluation on a given episode"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'
images = []
for image_path in sorted(glob(gradient_paths)):
with open(image_path, 'rb') as f:
images.append(f.read())
return images
@staticmethod
def get_last_gradient(experiment, seed, checkpoint, episode):
"""Get the last gradient for the model and episode
Returns:
(value, x, y) - tuple, where value is the max value of the
gradient, x, y are the location of this max
value in the gradient image.
"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'
images = sorted(glob(gradient_paths))
if len(images) == 0:
return (0, 0, 0)
image_path = sorted(glob(gradient_paths))[-1]
image = imageio.imread(image_path)
mx_index = np.argmax(image)
value = image.flatten()[mx_index]
middle_x = image.shape[0] / 2
middle_y = image.shape[1] / 2
x = mx_index // image.shape[1]
x -= middle_x
y = mx_index % image.shape[1]
y -= middle_y
if value == 0:
return (0, 0, 0)
else:
return (value, x, y)
@staticmethod
def get_evaluation_log_file(experiment, seed, step):
"""Retuns a path to the eval logs for given model"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{step}' + '.model.log'
paths = glob(regex)
assert len(paths) == 1, \
f'paths for {regex} is not length of 1, and is equal to {paths}'
return paths[0]
@staticmethod
def get_training_log_file(experiment, seed):
"""Retuns a path to the eval logs for given model"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'policy_networks/' + path[1] + \
f'-seed={seed}-novalue' + '.log'
paths = glob(regex)
assert len(paths) == 1, \
f'paths for {regex} is not length of 1, and is equal to {paths}'
return paths[0]
@staticmethod
@lru_cache(maxsize=100)
def find_option_values(option,
experiment=None,
seed=None,
checkpoint=None):
"""Returns possible values for selected option.
Depending on option, returns:
if option == 'seed' - returns all seeds for given experiment.
experiment has to passed.
if option == 'checkpoint' - returns all checkpoints for given
experiment and seed.
experiment and seed have to be
passed.
if option == 'episode' - returns all episodes for given
model
experiment, seed, and checkpoint have
to be passed.
"""
if option == 'seed':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] + 'planning_results/' + path[1] + '*.log')
regexp = r"seed=(\d+)-"
elif option == 'checkpoint':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] + 'planning_results/' +
path[1] + f'-seed={seed}' + '*.model.log')
regexp = r'-novaluestep(\d+)\.'
elif option == 'episode':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] +
'planning_results/videos_simulator/' +
path[1] +
f'-seed={seed}-novaluestep{checkpoint}.model/ep*')
regexp = r'model/ep(\d+)'
values = []
for log in logs:
m = re.search(regexp, log)
if m:
result = m.group(1)
values.append(int(result))
else:
print(f'{log} doesn\'t contain {option}')
# log files for each step are generated for seeds
values = list(set(values))
values.sort()
return values
@staticmethod
def get_success_rate(experiment, seed, step):
"""get the success rate for a given model"""
log_file = DataReader.get_evaluation_log_file(experiment, seed, step)
with open(log_file, 'r') as f:
last_line = f.readlines()[-1]
last_colon = last_line.rfind(':')
success_rate = float(last_line[(last_colon + 2):])
return success_rate
@staticmethod
def get_success_rates_for_experiment(experiment):
"""get success rate arrays for each seed for the given experiment
across all checkpoints.
The resulting shape of the np array is
(seeds, checkpoints), where seeds is the number of seeds,
and checkpints is the number of checkpoints.
"""
seeds = DataReader.find_option_values('seed', experiment)
result = {}
steps = []
min_length = 100
max_length = 0
for seed in seeds:
result[seed] = []
checkpoints = DataReader.find_option_values(
'checkpoint', experiment, seed)
if len(steps) < len(checkpoints):
steps = checkpoints
for checkpoint in checkpoints:
success = DataReader.get_success_rate(
experiment, seed, checkpoint)
result[seed].append(success)
min_length = min(min_length, len(result[seed]))
max_length = max(max_length, len(result[seed]))
if len(result) > 0:
result = np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge')
for seed in result])
steps = np.array(steps)
return steps, result
else:
return None, None
@staticmethod
def get_learning_curves_for_seed(experiment, seed):
"""Gets the training and validation total losses for a given experiment
and seed.
"""
path = DataReader.get_training_log_file(experiment, seed)
with open(path, 'r') as f:
lines = f.readlines()
regex = re.compile(".*step\s(\d+).*\s\[.*\π\:\s(.*)\].*\[.*\π\:\s(.*)\]")
steps = []
train_losses = []
validation_losses = []
for line in lines:
match = regex.match(line)
if match:
steps.append(int(match.group(1)))
train_losses.append(float(match.group(2)))
validation_losses.append(float(match.group(3)))
result = dict(
steps=steps,
train_losses=train_losses,
validation_losses=validation_losses,
)
return result
@staticmethod
def get_learning_curves_for_experiment(experiment):
seeds = DataReader.find_option_values('seed', experiment)
result = {}
steps = []
min_length = 100
max_length = 0
train = {}
validation = {}
for seed in seeds:
result[seed] = []
curves = DataReader.get_learning_curves_for_seed(experiment, seed)
for i, step in enumerate(curves['steps']):
train.setdefault(step, []).append(curves['train_losses'][i])
validation.setdefault(step, []).append(curves['validation_losses'][i])
train_means = []
train_stds = []
validation_means = []
validation_stds = []
for key in train:
train_means.append(float(np.mean(train[key])))
train_stds.append(float(np.std(train[key])))
validation_means.append(float(np.mean(validation[key])))
validation_stds.append(float(np.std(validation[key])))
result = dict(
steps=list(train.keys()),
train=(train_means, train_stds),
validation=(validation_means, validation_stds),
)
return result
@staticmethod
def get_episodes_with_outcome(experiment, seed, step, outcome):
"""Gets episodes with given outcome for a given model.
If outcome == 1, returns successful episodes,
if outcome == 0, returns failing episodes.
"""
path = DataReader.get_evaluation_log_file(experiment, seed, step)
with open(path, 'r') as f:
lines = f.readlines()
regex = re.compile(".*ep:\s+(\d+).*\|\ssuccess:\s+(\d).*")
result = []
for line in lines:
match = regex.match(line)
if match:
if int(match.group(2)) == outcome:
result.append(int(match.group(1)))
return result
@staticmethod
def get_episode_success_map(experiment, seed, step):
"""Gets a 0-1 array of shape (episodes) where episodes is
the number of episodes.
Ith value in the result is 0 if the ith episode failed,
and 1 otherwise.
"""
successes = DataReader.get_episodes_with_outcome(experiment,
seed,
step,
1)
successes = np.array(successes) - 1
result = np.zeros(EPISODES)
result[successes] = 1
return result
@staticmethod
def get_episodes_success_counts(experiment):
"""For a given experiment, for all episodes checks performance of all
the models with all possible seeds and checkpoints, and returns
an array of shape (episodes) where episodes is the number of episodes,
where Ith value is the number of models in this experiment that
succeeded in this episode.
"""
seeds = DataReader.find_option_values('seed', experiment)
result = np.zeros(EPISODES)
for seed in seeds:
checkpoints = DataReader.find_option_values(
'checkpoint', experiment, seed)
for checkpoint in checkpoints:
success = DataReader.get_episodes_with_outcome(experiment,
seed,
checkpoint,
1)
success = np.array(success)
success = success - 1
one_hot = np.zeros((len(success), EPISODES))
one_hot[np.arange(len(success)), success] = 1
one_hot = np.sum(one_hot, axis=0),
one_hot = np.squeeze(one_hot)
result += one_hot
return result
@staticmethod
def get_episode_speeds(experiment, seed, checkpoint, episode):
""" Returns an array of speeds for given model and given episode"""
return DataReader.get_model_speeds(experiment,
seed,
checkpoint)[episode - 1]
@staticmethod
def get_episode_costs(experiment, seed, checkpoint, episode):
""" Returns an array of data frames with all the costs for
given evaluation """
costs = DataReader.get_model_costs(experiment,
seed,
checkpoint)
if costs is not None:
return costs[episode - 1]
else:
return None
@staticmethod
@lru_cache(maxsize=10)
def get_model_costs(experiment, seed, checkpoint):
""" Returns an array of costs for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs'
costs_paths = glob(regex)
if len(costs_paths) == 0:
print(
f'costs_paths for {regex} is {costs_paths} and it\'s length is not 1')
return None
else:
raw_costs = torch.load(costs_paths[0])
# list of DataFrame, one per episode
costs = [pandas.DataFrame(cost if type(cost) == type([]) else cost.tolist()) for cost in raw_costs]
return costs
@staticmethod
@lru_cache(maxsize=10)
def get_model_speeds(experiment, seed, checkpoint):
""" Returns an array of speeds for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.states'
states_paths = glob(regex)
assert len(states_paths) == 1, \
f'states_paths for {regex} is {states_paths} and it\'s length is not 1'
states_path = states_paths[0]
states = torch.load(states_path)
result = []
for i in range(len(states)):
episode_states = states[i]
episode_states = list(map(lambda x: x[-1], episode_states))
episode_states = torch.stack(episode_states)
result.append(episode_states[:, 2:].norm(dim=1)) # is it correct
return result
@staticmethod
@lru_cache(maxsize=10)
def get_model_states(experiment, seed, checkpoint):
""" Returns an array of states for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.states'
states_paths = glob(regex)
assert len(states_paths) == 1, \
f'states_paths for {regex} is {states_paths} and it\'s length is not 1'
states_path = states_paths[0]
states = torch.load(states_path)
result = []
for i in range(len(states)):
episode_states = states[i]
episode_states = list(map(lambda x: x[-1], episode_states))
episode_states = torch.stack(episode_states)
result.append(episode_states)
return result
|
src/genie/libs/parser/iosxe/tests/ShowLldpNeighborsDetail/cli/equal/golden_output_3_expected.py | balmasea/genieparser | 204 | 12795172 | expected_output = {
"interfaces": {
"GigabitEthernet1/0/32": {
"if_name": "GigabitEthernet1/0/32",
"port_id": {
"222": {
"neighbors": {
"not advertised": {
"neighbor_id": "not advertised",
"chassis_id": "FE80::EC22:9A75:BBC7:71AF",
"port_id": "222",
"port_description": "Description",
"system_name": "not advertised",
"system_description": '{"SN":"SN-NR","Owner":"OWNER"}',
"time_remaining": 92,
"management_address": "0000:0000:0000:0000:0000:ffff:7f00:0001",
"auto_negotiation": "not supported",
}
}
}
},
}
},
"total_entries": 1,
}
|
tests/contrib/backends/hbase/test_domain_cache.py | buildfail/frontera | 1,267 | 12795213 | <filename>tests/contrib/backends/hbase/test_domain_cache.py<gh_stars>1000+
# -*- coding: utf-8 -*-
from frontera.contrib.backends.hbase.domaincache import DomainCache
from happybase import Connection
import logging
import unittest
class TestDomainCache(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.conn = Connection(host="hbase-docker")
if b'domain_metadata' not in self.conn.tables():
self.conn.create_table('domain_metadata', {
'm': {'max_versions': 1, 'block_cache_enabled': 1,}
})
t = self.conn.table('domain_metadata')
t.delete('d1')
t.delete('d2')
t.delete('d3')
t.delete('d4')
def test_domain_cache_both_generations(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
# eviction should happen
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
assert dc['d1'] == {'domain': 1}
assert dc['d2'] == {'domain': 2}
assert dc['d3'] == {'domain': [3, 2, 1]}
assert dc['d4'] == {'domain': 4}
def test_domain_cache_get_with_default(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
assert dc.get('d1', {}) == {'domain': 1}
assert dc.get('d3', {}) == {'domain': [3, 2, 1]}
def test_domain_cache_setdefault(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
assert dc.setdefault('d1', {}) == {'domain': 1}
assert dc.setdefault('d5', {'domain': 6}) == {'domain': 6}
dc.flush()
assert dc.setdefault('d3', {}) == {'domain': [3, 2, 1]}
def test_domain_cache_setdefault_with_second_gen_flush(self):
dc = DomainCache(2, self.conn, 'domain_metadata', batch_size=3)
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
dc.setdefault('d1', {})['domain'] += 1
assert dc.setdefault('d1', {}) == {'domain': 2}
def test_empty_key(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
with self.assertRaises(KeyError):
dc[''] = {'test':1}
def test_deletion(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
with self.assertRaises(KeyError):
del dc['d1']
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
del dc['d1'] # second gen
del dc['d3'] # first gen
dc.flush()
del dc['d4'] # hbase
def test_contains(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
assert 'd1' in dc # second gen
assert 'd3' in dc # first gen
dc.flush()
assert 'd4' in dc
def test_pop(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
assert dc.pop('d1') == {'domain': 1}
assert 'd1' not in dc
assert dc.pop('d3') == {'domain': [3, 2, 1]}
assert 'd3' not in dc
dc.flush()
assert dc.pop('d4') == {'domain': 4}
assert 'd4' not in dc |
examples/util/lookups.py | OptionMetrics/petl | 495 | 12795257 | from __future__ import division, print_function, absolute_import
# lookup()
##########
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
lkp = etl.lookup(table1, 'foo', 'bar')
lkp['a']
lkp['b']
# if no valuespec argument is given, defaults to the whole
# row (as a tuple)
lkp = etl.lookup(table1, 'foo')
lkp['a']
lkp['b']
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.lookup(table2, ('foo', 'bar'), 'baz')
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.lookup(table1, 'foo', 'bar', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
# lookupone()
#############
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
# if the specified key is not unique and strict=False (default),
# the first value wins
lkp = etl.lookupone(table1, 'foo', 'bar')
lkp['a']
lkp['b']
# if the specified key is not unique and strict=True, will raise
# DuplicateKeyError
try:
lkp = etl.lookupone(table1, 'foo', strict=True)
except etl.errors.DuplicateKeyError as e:
print(e)
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz')
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.lookupone(table1, 'foo', 'bar', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
# dictlookup()
##############
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
lkp = etl.dictlookup(table1, 'foo')
lkp['a']
lkp['b']
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.dictlookup(table2, ('foo', 'bar'))
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.dictlookup(table1, 'foo', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
# dictlookupone()
#################
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
# if the specified key is not unique and strict=False (default),
# the first value wins
lkp = etl.dictlookupone(table1, 'foo')
lkp['a']
lkp['b']
# if the specified key is not unique and strict=True, will raise
# DuplicateKeyError
try:
lkp = etl.dictlookupone(table1, 'foo', strict=True)
except etl.errors.DuplicateKeyError as e:
print(e)
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.dictlookupone(table2, ('foo', 'bar'))
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.dictlookupone(table1, 'foo', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
|
Subsets and Splits