metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jeromegit/prettier_html_table",
"score": 2
}
|
#### File: prettier_html_table/prettier_html_table/prettier_html_table.py
```python
DEFAULT_THEME = 'blue_dark'
DEFAULT_FONT_FAMILY = 'Arial, Helvetica, sans-serif'
DEFAULT_FONT_SIZE = 'medium'
DEFAULT_TEXT_ALIGN = 'left'
class ThemeColors:
def __init__(self, header_fg_color, header_bg_color, border, odd_bg_color, even_bg_color):
self.header_fg_color = header_fg_color
self.header_bg_color = header_bg_color
self.border = border
self.odd_bg_color = odd_bg_color
self.even_bg_color = even_bg_color
THEME_COLORS = { # header_fg_color header_bg_color border odd_bg_color even_bg_color
'blue_dark': ThemeColors('#FFFFFF', '#305496', '#305496', '#D9E1F2', '#FFFFFF'),
'blue_light': ThemeColors('#305496', '#FFFFFF', '#305496', '#D9E1F2', '#FFFFFF'),
'green_dark': ThemeColors('#FFFFFF', '#548235', '#548235', '#E2EFDA', '#FFFFFF'),
'green_light': ThemeColors('#548235', '#FFFFFF', '#548235', '#E2EFDA', '#FFFFFF'),
'grey_dark': ThemeColors('#FFFFFF', '#808080', '#808080', '#EDEDED', '#FFFFFF'),
'grey_light': ThemeColors('#808080', '#FFFFFF', '#808080', '#EDEDED', '#FFFFFF'),
'orange_dark': ThemeColors('#FFFFFF', '#C65911', '#C65911', '#FCE4D6', '#FFFFFF'),
'orange_light': ThemeColors('#C65911', '#FFFFFF', '#C65911', '#FCE4D6', '#FFFFFF'),
'red_dark': ThemeColors('#FFFFFF', '#823535', '#823535', '#efdada', '#FFFFFF'),
'red_light': ThemeColors('#823535', '#FFFFFF', '#823535', '#efdada', '#FFFFFF'),
'yellow_dark': ThemeColors('#FFFFFF', '#BF8F00', '#BF8F00', '#FFF2CC', '#FFFFFF'),
'yellow_light': ThemeColors('#BF8F00', '#FFFFFF', '#BF8F00', '#FFF2CC', '#FFFFFF'),
}
def create_html_start():
return '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html lang="en" xmlns="http://www.w3.org/1999/xhtml"
xmlns:v="urn:schemas-microsoft-com:vml" xmlns:o="urn:schemas-microsoft-com:office:office">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
'''
def create_css(theme, font_family, font_size, text_align):
theme = theme.lower()
if theme in THEME_COLORS:
tc = THEME_COLORS[theme]
else:
raise ValueError(f"theme:{theme} is an invalid theme. It should be on of these themes:{THEME_COLORS.keys()}")
return '''
<STYLE TYPE="text/css">
body {
font-family: %s;
font-size: %s;
}
table {
border: 0px;
border-collapse: collapse;
}
td, th {
font-family: %s;
font-size: %s;
text-align: %s;
padding: 0px 5px 0px 5px;
white-space: nowrap;
}
th {
border-bottom: 2px solid %s;
}
thead, tfoot {
color: %s;
background: %s;
font-weight: bold;
text-align: center;
}
tr.odd { background: %s; }
tr.even { background: %s; }
</STYLE>
</HEADER>
''' % (font_family, font_size, font_family, font_size, text_align, tc.border,
tc.header_fg_color, tc.header_bg_color,
tc.odd_bg_color, tc.even_bg_color)
def create_row(row_data, is_header, row_number=0):
if is_header:
element = 'TH'
odd_or_even_class =''
else:
element = 'TD'
odd_or_even = 'odd' if row_number % 2 else 'even'
odd_or_even_class = f'CLASS="{odd_or_even}"'
return f'<TR {odd_or_even_class}>' + " ".join(map(lambda v: f'<{element}>{v}</{element}>', row_data)) + "</TR>"
def create_table(data,
header=None, footer=None,
theme=DEFAULT_THEME,
font_family=DEFAULT_FONT_FAMILY, font_size=DEFAULT_FONT_SIZE,
text_align=DEFAULT_TEXT_ALIGN,
add_line_number=None
):
if type(data) is list and all(type(l) is list for l in data):
if header:
header_data = header
body_data = data
else:
header_data = data[0]
body_data = data[1:]
elif type(data).__name__ == 'DataFrame':
if header:
header_data = header
else:
header_data = data.columns.to_list()
body_data = data.values.tolist()
else:
raise Exception(f"data:{data} must either be a list of lists or a pandas.DataFrame")
body = '<BODY><TABLE>'
body = body + "<THEAD>" + create_row(header_data, True) + "</THEAD>"
body = body + "<TBODY>"
r = 1
for row in body_data:
body = body + create_row(row, False, r)
r += 1
body = body + "<TBODY>"
body = body + "</TABLE>"
html = create_html_start() + create_css(theme, font_family, font_size, text_align) + body
return html
```
|
{
"source": "jeromegit/python-tabulate",
"score": 3
}
|
#### File: python-tabulate/test/test_internal.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import tabulate as T
from common import assert_equal, skip, rows_to_pipe_table_str, cols_to_pipe_str
def test_multiline_width():
"Internal: _multiline_width()"
multiline_string = "\n".join(["foo", "barbaz", "spam"])
assert_equal(T._multiline_width(multiline_string), 6)
oneline_string = "12345"
assert_equal(T._multiline_width(oneline_string), len(oneline_string))
def test_align_column_decimal():
"Internal: _align_column(..., 'decimal')"
column = ["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"]
output = T._align_column(column, "decimal")
expected = [
" 12.345 ",
"-1234.5 ",
" 1.23 ",
" 1234.5 ",
" 1e+234 ",
" 1.0e234",
]
assert_equal(output, expected)
def test_align_column_none():
"Internal: _align_column(..., None)"
column = ["123.4", "56.7890"]
output = T._align_column(column, None)
expected = ["123.4", "56.7890"]
assert_equal(output, expected)
def test_align_column_multiline():
"Internal: _align_column(..., is_multiline=True)"
column = ["1", "123", "12345\n6"]
output = T._align_column(column, "center", is_multiline=True)
expected = [" 1 ", " 123 ", "12345" + "\n" + " 6 "]
assert_equal(output, expected)
def test_wrap_text_to_colwidths():
"Internal: Test _wrap_text_to_colwidths to show it will wrap text based on colwidths"
rows = [
["mini", "medium", "decently long", "wrap will be ignored"],
[
"small",
"JustOneWordThatIsWayTooLong",
"this is unreasonably long for a single cell length",
"also ignored here",
],
]
widths = [10, 10, 20, None]
expected = [
["mini", "medium", "decently long", "wrap will be ignored"],
[
"small",
"JustOneWor\ndThatIsWay\nTooLong",
"this is unreasonably\nlong for a single\ncell length",
"also ignored here",
],
]
result = T._wrap_text_to_colwidths(rows, widths)
assert_equal(result, expected)
def test_wrap_text_wide_chars():
"Internal: Wrap wide characters based on column width"
try:
import wcwidth # noqa
except ImportError:
skip("test_wrap_text_wide_chars is skipped")
rows = [["청자청자청자청자청자", "약간 감싸면 더 잘 보일 수있는 다소 긴 설명입니다"]]
widths = [5, 20]
expected = [["청자\n청자\n청자\n청자\n청자", "약간 감싸면 더 잘\n보일 수있는 다소 긴\n설명입니다"]]
result = T._wrap_text_to_colwidths(rows, widths)
assert_equal(result, expected)
def test_wrap_text_to_numbers():
"""Internal: Test _wrap_text_to_colwidths force ignores numbers by
default so as not to break alignment behaviors"""
rows = [
["first number", 123.456789, "123.456789"],
["second number", "987654.123", "987654.123"],
]
widths = [6, 6, 6]
expected = [
["first\nnumber", 123.456789, "123.45\n6789"],
["second\nnumber", "987654.123", "987654\n.123"],
]
result = T._wrap_text_to_colwidths(rows, widths, numparses=[True, True, False])
assert_equal(result, expected)
def test_wrap_text_to_colwidths_single_ansi_colors_full_cell():
"""Internal: autowrapped text can retain a single ANSI colors
when it is at the beginning and end of full cell"""
data = [
[
(
"\033[31mThis is a rather long description that might"
" look better if it is wrapped a bit\033[0m"
)
]
]
result = T._wrap_text_to_colwidths(data, [30])
expected = [
[
"\n".join(
[
"\033[31mThis is a rather long\033[0m",
"\033[31mdescription that might look\033[0m",
"\033[31mbetter if it is wrapped a bit\033[0m",
]
)
]
]
assert_equal(expected, result)
def test_wrap_text_to_colwidths_colors_wide_char():
"""Internal: autowrapped text can retain a ANSI colors with wide chars"""
try:
import wcwidth # noqa
except ImportError:
skip("test_wrap_text_to_colwidths_colors_wide_char is skipped")
data = [[("\033[31m약간 감싸면 더 잘 보일 수있는 다소 긴" " 설명입니다 설명입니다 설명입니다 설명입니다 설명\033[0m")]]
result = T._wrap_text_to_colwidths(data, [30])
expected = [
[
"\n".join(
[
"\033[31m약간 감싸면 더 잘 보일 수있는\033[0m",
"\033[31m다소 긴 설명입니다 설명입니다\033[0m",
"\033[31m설명입니다 설명입니다 설명\033[0m",
]
)
]
]
assert_equal(expected, result)
def test_wrap_text_to_colwidths_multi_ansi_colors_full_cell():
"""Internal: autowrapped text can retain multiple ANSI colors
when they are at the beginning and end of full cell
(e.g. text and background colors)"""
data = [
[
(
"\033[31m\033[43mThis is a rather long description that"
" might look better if it is wrapped a bit\033[0m"
)
]
]
result = T._wrap_text_to_colwidths(data, [30])
expected = [
[
"\n".join(
[
"\033[31m\033[43mThis is a rather long\033[0m",
"\033[31m\033[43mdescription that might look\033[0m",
"\033[31m\033[43mbetter if it is wrapped a bit\033[0m",
]
)
]
]
assert_equal(expected, result)
def test_wrap_text_to_colwidths_multi_ansi_colors_in_subset():
"""Internal: autowrapped text can retain multiple ANSI colors
when they are around subsets of the cell"""
data = [
[
(
"This is a rather \033[31mlong description\033[0m that"
" might look better \033[93mif it is wrapped\033[0m a bit"
)
]
]
result = T._wrap_text_to_colwidths(data, [30])
expected = [
[
"\n".join(
[
"This is a rather \033[31mlong\033[0m",
"\033[31mdescription\033[0m that might look",
"better \033[93mif it is wrapped\033[0m a bit",
]
)
]
]
assert_equal(expected, result)
def test__remove_separating_lines():
with_rows = [
[0, "a"],
[1, "b"],
T.SEPARATING_LINE,
[2, "c"],
T.SEPARATING_LINE,
[3, "c"],
T.SEPARATING_LINE,
]
result, sep_lines = T._remove_separating_lines(with_rows)
expected = rows_to_pipe_table_str([[0, "a"], [1, "b"], [2, "c"], [3, "c"]])
assert_equal(expected, rows_to_pipe_table_str(result))
assert_equal("2|4|6", cols_to_pipe_str(sep_lines))
def test__reinsert_separating_lines():
with_rows = [
[0, "a"],
[1, "b"],
T.SEPARATING_LINE,
[2, "c"],
T.SEPARATING_LINE,
[3, "c"],
T.SEPARATING_LINE,
]
sans_rows, sep_lines = T._remove_separating_lines(with_rows)
T._reinsert_separating_lines(sans_rows, sep_lines)
expected = rows_to_pipe_table_str(with_rows)
assert_equal(expected, rows_to_pipe_table_str(sans_rows))
```
|
{
"source": "jerome-habana/pytorch-lightning",
"score": 2
}
|
#### File: tests/plugins/test_double_plugin.py
```python
import pickle
from unittest.mock import MagicMock
import pytest
import torch
from torch.utils.data import DataLoader, Dataset
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DoublePrecisionPlugin
from tests.helpers.boring_model import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
class RandomFloatIntDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.float_data = torch.randn(length, size)
self.int_data = torch.randint(10, (length, 1))
def __getitem__(self, index):
return self.float_data[index], self.int_data[index]
def __len__(self):
return self.len
class DoublePrecisionBoringModel(BoringModel):
def training_step(self, batch, batch_idx):
float_data, int_data = batch
assert torch.tensor([0.0]).dtype == torch.float64
assert torch.tensor([0.0], dtype=torch.float16).dtype == torch.float16
assert float_data.dtype == torch.float64
output = self(float_data)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs) -> None:
assert torch.tensor([0.0]).dtype == torch.float32
return super().training_epoch_end(outputs)
def validation_step(self, batch, batch_idx):
assert batch.dtype == torch.float64
assert torch.tensor([0.0]).dtype == torch.float64
assert torch.tensor([0.0], dtype=torch.float16).dtype == torch.float16
output = self(batch)
loss = self.loss(batch, output)
return {"x": loss}
def test_step(self, batch, batch_idx):
assert batch.dtype == torch.float64
assert torch.tensor([0.0]).dtype == torch.float64
assert torch.tensor([0.0], dtype=torch.float16).dtype == torch.float16
output = self(batch)
loss = self.loss(batch, output)
return {"y": loss}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
assert batch.dtype == torch.float64
assert torch.tensor([0.0]).dtype == torch.float64
assert torch.tensor([0.0], dtype=torch.float16).dtype == torch.float16
return self(batch)
def on_fit_start(self):
assert self.layer.weight.dtype == torch.float64
def on_after_backward(self):
assert self.layer.weight.grad.dtype == torch.float64
def train_dataloader(self):
dataset = RandomFloatIntDataset(32, 64)
assert dataset.float_data.dtype == torch.float32 # Don't start with double data
return DataLoader(dataset)
def predict_dataloader(self):
return DataLoader(RandomDataset(32, 64))
class DoublePrecisionBoringModelNoForward(BoringModel):
def training_step(self, batch, batch_idx):
assert batch.dtype == torch.float64
output = self.layer(batch)
assert output.dtype == torch.float64
loss = self.loss(batch, output)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
assert batch.dtype == torch.float64
output = self.layer(batch)
assert output.dtype == torch.float64
loss = self.loss(batch, output)
return {"x": loss}
def test_step(self, batch, batch_idx):
assert batch.dtype == torch.float64
output = self.layer(batch)
assert output.dtype == torch.float64
loss = self.loss(batch, output)
return {"y": loss}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
assert batch.dtype == torch.float64
output = self.layer(batch)
assert output.dtype == torch.float64
return output
def predict_dataloader(self):
return DataLoader(RandomDataset(32, 64))
class DoublePrecisionBoringModelComplexBuffer(BoringModel):
def __init__(self):
super().__init__()
self.register_buffer("complex_buffer", torch.complex(torch.rand(10), torch.rand(10)), False)
def on_fit_start(self):
assert self.layer.weight.dtype == torch.float64
assert self.complex_buffer.dtype == torch.complex64
@pytest.mark.parametrize(
"boring_model",
[
DoublePrecisionBoringModel,
DoublePrecisionBoringModelNoForward,
DoublePrecisionBoringModelComplexBuffer,
],
)
def test_double_precision(tmpdir, boring_model):
model = boring_model()
trainer = Trainer(max_epochs=2, default_root_dir=tmpdir, fast_dev_run=2, precision=64, log_every_n_steps=1)
trainer.fit(model)
trainer.test(model)
trainer.predict(model)
@RunIf(min_gpus=2)
def test_double_precision_ddp(tmpdir):
model = DoublePrecisionBoringModel()
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
strategy="ddp_spawn",
accelerator="gpu",
devices=2,
fast_dev_run=2,
precision=64,
log_every_n_steps=1,
)
trainer.fit(model)
trainer.validate(model)
def test_double_precision_pickle(tmpdir):
model = BoringModel()
plugin = DoublePrecisionPlugin()
model, _, __ = plugin.connect(model, MagicMock(), MagicMock())
pickle.dumps(model)
```
|
{
"source": "JeromeHXP/ondilo",
"score": 3
}
|
#### File: src/ondilo/ondilo.py
```python
from typing import Optional, Union, Callable, Dict
from requests import Response
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import TokenExpiredError
API_HOST = "https://interop.ondilo.com"
API_URL = API_HOST + "/api/customer/v1"
ENDPOINT_TOKEN = "/oauth2/token"
ENDPOINT_AUTHORIZE = "/oauth2/authorize"
DEFAULT_CLIENT_ID = "customer_api"
DEFAULT_CLIENT_SECRET = ""
DEFAULT_SCOPE = "api"
class OndiloError(Exception):
pass
class Ondilo:
def __init__(
self,
token: Optional[Dict[str, str]] = None,
client_id: str = DEFAULT_CLIENT_ID,
client_secret: str = DEFAULT_CLIENT_SECRET,
redirect_uri: str = None,
token_updater: Optional[Callable[[str], None]] = None,
):
self.host = API_HOST
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.token_updater = token_updater
self.scope = DEFAULT_SCOPE
extra = {"client_id": self.client_id, "client_secret": self.client_secret}
self._oauth = OAuth2Session(
auto_refresh_kwargs=extra,
redirect_uri=redirect_uri,
client_id=client_id,
token=token,
token_updater=token_updater,
scope=self.scope,
)
def refresh_tokens(self) -> Dict[str, Union[str, int]]:
"""Refresh and return new tokens."""
token = self._oauth.refresh_token(f"{self.host}{ENDPOINT_TOKEN}")
if self.token_updater is not None:
self.token_updater(token)
return token
def request_token(
self,
authorization_response: Optional[str] = None,
code: Optional[str] = None,
) -> Dict[str, str]:
"""
Generic method for fetching an access token.
:param authorization_response: Authorization response URL, the callback
URL of the request back to you.
:param code: Authorization code
:return: A token dict
"""
return self._oauth.fetch_token(
f"{self.host}{ENDPOINT_TOKEN}",
authorization_response=authorization_response,
code=code,
include_client_id=True,
)
def get_authurl(self):
"""Get the URL needed for the authorization code grant flow."""
authorization_url, _ = self._oauth.authorization_url(
f"{self.host}{ENDPOINT_AUTHORIZE}"
)
return authorization_url
def request(self, method: str, path: str, **kwargs) -> Response:
"""Make a request.
We don't use the built-in token refresh mechanism of OAuth2 session because
we want to allow overriding the token refresh logic.
"""
url = f"{API_URL}{path}"
try:
return getattr(self._oauth, method)(url, **kwargs)
except TokenExpiredError:
self._oauth.token = self.refresh_tokens()
return getattr(self._oauth, method)(url, **kwargs)
def get_pools(self):
req = self.request("get", "/pools")
if req.status_code != 200:
raise OndiloError
return req.json()
def get_ICO_details(self, poolId):
req = self.request("get", "/pools/" + str(poolId) + "/device")
if req.status_code != 200:
raise OndiloError
return req.json()
def get_last_pool_measures(self, poolId):
req = self.request("get", "/pools/" + str(poolId) + "/lastmeasures")
if req.status_code != 200:
raise OndiloError
return req.json()
def get_pool_recommendations(self, poolId):
req = self.request("get", "/pools/" + str(poolId) + "/recommendations")
if req.status_code != 200:
raise OndiloError
return req.json()
def validate_pool_recommendation(self, poolId, recommendationId):
req = self.request("put", "/pools/" + str(poolId) + "/recommendations/" + str(recommendationId))
if req.status_code != 200:
raise OndiloError
return req.json()
def get_user_units(self):
req = self.request("get", "/user/units")
if req.status_code != 200:
raise OndiloError
return req.json()
def get_user_info(self):
req = self.request("get", "/user/info")
if req.status_code != 200:
raise OndiloError
return req.json()
def get_pool_config(self, poolId):
req = self.request("get", "/pools/" + str(poolId) + "/configuration")
if req.status_code != 200:
raise OndiloError
return req.json()
def get_pool_shares(self, poolId):
req = self.request("get", "/pools/" + str(poolId) + "/shares")
if req.status_code != 200:
raise OndiloError
return req.json()
def get_pool_histo(self, poolId, measure, period):
req = self.request("get", "/pools/" + str(poolId) + "/measures?type=" + measure + "&period=" + period)
if req.status_code != 200:
raise OndiloError
return req.json()
```
|
{
"source": "JeromeIllgner/bioInspired",
"score": 4
}
|
#### File: bioInspired/network/ANN.py
```python
import numpy as np
from typing import List
from network.functions import null, sigmoid, gaussian, identity, mean_squared_error as mse
from network.my_types import Layer, Vector, Data
# Possible options for activation functions
funcs = [null, sigmoid, np.tanh, np.cos, gaussian]
# Numpy vectorised function to apply one vector to another.
apply = np.vectorize(lambda f, x: f(x))
class ANN:
"""
An implementation of an Artificial Neural Network using weight matrices in a simple
feed forward architecture.
:param shape: The shape of the network given by neurons per layer
:type shape: List(int)
:param seed: An optional random seed to fix initial conditions
:type seed: int
:param output_activation: Allows a non-identity activation function for the output layer
:type output_activation: bool
:ivar shape: Instance variable to store the network shape
:type shape: List(int)
:ivar layers: A list of Layers storing information about the network
:type layers: List(Layer)
"""
def __init__(self, shape: List[int], seed: int = None, output_activation: bool = False) -> None:
self.shape = np.array(shape)
self.layers = []
# Set to known seed if required
if seed:
np.random.seed(seed)
# Initialise weights, activation function and bias for each node
for layer in range(len(shape) - 1):
weights = np.random.randn(shape[layer+1], shape[layer])
activation = np.random.choice(funcs, shape[layer+1])
bias = np.random.randn(shape[layer+1])
self.layers.append(Layer(weights, activation, bias))
# Set output activation to the identity to allow for all values
if not output_activation:
self.layers[-1].activation = np.array([identity for _ in range(shape[-1])])
def predict_evaluate(self, input_vectors: Data, ground_truth: Data, error_function=mse) -> float:
"""
Performs the prediction on the sample of input vectors and compares the results against the ground truth using
the error function (MSE by default). The list of input vectors and output vectors should have the same length.
The function can also compare a single input vector against the expected output vector.
:param input_vectors: Single or list of input vectors to base predictions on
:type input_vectors: Data
:param ground_truth: Single or list of ground truths to compare against predictions. Should have same length as
input_vectors
:type ground_truth: Data
:param error_function: A function to calculate the error on two equal length lists of input vectors and ground
truths and returns a float.
:type error_function: Data, Data -> float
:return: Returns a floating point error score based on the error function.
"""
if len(input_vectors) != len(ground_truth):
raise ValueError(f"Number of test samples ({len(input_vectors)}) does not match ground "
f"truth ({len(ground_truth)})")
# Make inputs conform
if type(ground_truth[0]) is float:
ground_truth = [ground_truth]
# Perform predictions and calculate error
predictions = self.predict(input_vectors)
return error_function(predictions, ground_truth)
def predict(self, x: Data) -> np.ndarray:
"""
The prediction function of the network which takes a list of input vectors and
:param x: The input vector
:type x: Data
:return: List of output vectors
:rtype: List[Vector]
"""
# Check if it's a single input vector or many
if type(x[0]) is float:
x = [x]
# Check if vector has correct input shape and perform predictions
predictions = []
for input_vector in x:
if len(input_vector) == self.shape[0]:
predictions.append(self._predict_one(input_vector))
else:
raise ValueError(f"Size of input vector ({len(input_vector)}) does not match "
f"size of input layer ({self.shape[0]})")
return np.array(predictions)
def _predict_one(self, x: Vector) -> np.ndarray:
"""
Performs a single model prediction by propagating the input values forward through the network using matrix
multiplication, followed by applying the activation function to the sum of the result and the bias.
:param x: Single input vector
:type x: Vector
:return: Single output vector
:rtype: Vector
"""
layer_values = np.array(x)
# For each layer, apply the activation function to the sum of the weighted inputs and the bias.
# Weight summation uses matrix multiplication for speed and brevity.
for layer in self.layers:
layer_values = apply(layer.activation, [email protected] + layer.bias)
return np.array(layer_values)
@property
def size(self):
return sum(self.shape[1:]), self.shape[1:] * self.shape[:-1]
@property
def weights(self):
return np.array([layer.weights for layer in self.layers])
@weights.setter
def weights(self, new_weights):
new_weights = np.array(new_weights)
if new_weights.shape == self.weights.shape:
for layer_weights, layer in zip(new_weights, self.layers):
layer.weights = layer_weights
@property
def activation_funcs(self):
return np.array([layer.activation for layer in self.layers])
@activation_funcs.setter
def activation_funcs(self, new_funcs):
new_funcs = np.array(new_funcs)
if new_funcs.shape == self.activation_funcs.shape:
for layer_funcs, layer in zip(new_funcs, self.layers):
layer.activation = layer_funcs
@property
def biases(self):
return np.array([layer.bias for layer in self.layers])
@biases.setter
def biases(self, new_biases):
new_biases = np.array(new_biases)
if new_biases.shape == self.biases.shape:
for layer_bias, layer in zip(new_biases, self.layers):
layer.bias = layer_bias
```
#### File: bioInspired/network/__main__.py
```python
import os
from os import path
import click
import pandas
import numpy as np
from network.ANN import ANN, funcs
from network.PSO import PSO
def train_ann(ann: ANN, data):
shape = ann.shape
bias_and_activation = sum(shape[1:])
weights = sum([shape[layer] * shape[layer+1] for layer in range(len(shape) - 1)])
dimensions = 2 * bias_and_activation + weights
pso = PSO(dimensions, wrap_eval(ann, data))
best_per_epoch = pso.optimise()
encode_ann(ann, best_per_epoch[-1][0])
return best_per_epoch
def wrap_eval(ann: ANN, data: (np.ndarray, np.ndarray)):
def wrapped(dimensions):
x, y = data
encode_ann(ann, dimensions)
return ann.predict_evaluate(x, y)
return wrapped
def encode_ann(ann: ANN, dimensions: np.ndarray):
# Get sizes
ab_size, weight_size = ann.size
# Get cumulative sums for indexing
cumulative_nodes = np.cumsum(ann.shape[1:])
cumulative_weights = np.cumsum(weight_size)
# Reshape weights
weights = [dimensions[start:end] for start, end in zip(np.insert(cumulative_weights[:-1], 0, 0), cumulative_weights)]
weights = [weight.reshape(height, width) for weight, height, width in zip(weights, ann.shape[:-1], ann.shape[1:])]
# Reshape biases
bias = dimensions[-ab_size:]
bias = [bias[start:end] for start, end in zip(np.insert(cumulative_nodes[:-1], 0, 0), cumulative_nodes)]
# Reshape activations
activations = dimensions[-2 * ab_size:-ab_size]
activations = [activations[start:end] for start, end in zip(np.insert(cumulative_nodes[:-1], 0, 0), cumulative_nodes)]
activations = [np.clip(np.floor((activation+1)/2 * len(funcs)), 0, len(funcs)-1) for activation in activations]
activations = [[funcs[int(func)] for func in func_list] for func_list in activations]
# Update Values
ann.weights = weights
ann.biases = bias
ann.activation_funcs = activations
@click.command()
@click.argument('data_folder')
def network(data_folder):
data = load_data(data_folder)
ann = ANN([1, 5, 1])
result = train_ann(ann, data["1in_linear"])
print(result)
print(ann.predict([0.5]))
def load_data(folder: str):
data = {}
with click.progressbar(os.listdir(folder)) as bar:
for file in bar:
no_inputs = int(file[0])
frame = pandas.read_table(path.join(folder, file), delim_whitespace=True, header=None).to_numpy()
file = file[:-4]
data[file] = frame[:, :no_inputs], frame[:, no_inputs:]
return data
if __name__ == "__main__":
network()
```
|
{
"source": "JeromeIllgner/Lux-Design-2021",
"score": 3
}
|
#### File: python/simple/agent.py
```python
import math, sys
from typing import List
from lux.game import Game
from lux.game_map import Cell, RESOURCE_TYPES
from lux.constants import Constants
from lux.game_constants import GAME_CONSTANTS
from lux import annotate
DIRECTIONS = Constants.DIRECTIONS
game_state = None
def setup_game(observation):
## Don't Edit
global game_state
if observation["step"] == 0:
game_state = Game()
game_state._initialize(observation["updates"])
game_state._update(observation["updates"][2:])
game_state.id = observation.player
else:
game_state._update(observation["updates"])
def days_until_night(observation):
day = 30 - observation["step"] % 40
return max(day, 0)
def get_resource_tiles(map):
resource_tiles = []
for y in range(map.height):
for x in range(map.width):
cell = map.get_cell(x,y)
if cell.has_resource():
resource_tiles.append(cell)
return resource_tiles
def get_closest_available_resource_tile(unit, resource_tiles, has_researched_coal, has_researched_uranium):
min_distance = math.inf
closest_resource_tile = None
for tile in resource_tiles:
if tile.resource.type == Constants.RESOURCE_TYPES.COAL and not has_researched_coal:
continue
if tile.resource.type == Constants.RESOURCE_TYPES.URANIUM and not has_researched_uranium:
continue
distance = tile.pos.distance_to(unit.pos)
if distance < min_distance:
min_distance = distance
closest_resource_tile = tile
return closest_resource_tile
def get_nearest_city(unit, cities):
nearest_city_tile = None
min_distance = math.inf
for key, city in cities.items():
for city_tile in city.citytiles:
distance = city_tile.pos.distance_to(unit.pos)
if distance < min_distance:
min_distance = distance
nearest_city_tile = city_tile
return nearest_city_tile
def agent(observation, configuration):
global game_state
setup_game(observation)
actions = []
player = game_state.players[observation.player]
opponent = game_state.players[(observation.player + 1) % 2]
resource_tiles: List[Cell] = get_resource_tiles(game_state.map)
for unit in player.units:
if unit.is_worker() and unit.can_act():
if unit.get_cargo_space_left() > 0:
closest_resource_tile = get_closest_available_resource_tile(unit, resource_tiles, player.researched_coal(), player.researched_uranium())
if closest_resource_tile is not None:
actions.append(unit.move(unit.pos.direction_to(closest_resource_tile.pos)))
else:
nearest_city_tile = get_nearest_city(unit, player.cities)
if nearest_city_tile is not None:
move_dir = unit.pos.direction_to(nearest_city_tile.pos)
actions.append(unit.move(move_dir))
# you can add debug annotations using the functions in the annotate object
# actions.append(annotate.circle(0, 0))
return actions
```
|
{
"source": "JeromeIllgner/SemanticInpainting",
"score": 3
}
|
#### File: SemanticInpainting/Models/BescosERFNet.py
```python
import torch
import torch.nn as nn
class BescosERFNet(nn.Module):
def __init__(self, input_nc, output_nc, ngf):
self.conv1 = nn.Conv2d(input_nc, ngf * 2, 4, stride=2, padding=1)
self.conv2 = nn.Conv2d(ngf * 2, ngf * 4, 4, stride=2, padding=1)
self.conv3 = nn.Conv2d(ngf * 4, ngf * 8, 4, stride=2, padding=1)
self.conv4 = nn.Conv2d(ngf * 8, ngf * 8, 4, stride=2, padding=1)
self.conv5 = nn.Conv2d(ngf * 8, ngf * 8, 4, stride=2, padding=1)
self.conv6 = nn.Conv2d(ngf * 8, ngf * 8, 4, stride=2, padding=1)
self.conv7 = nn.Conv2d(ngf * 8, ngf * 8, 4, stride=2, padding=1)
self.deconv1 = nn.Conv2d(ngf * 8, ngf * 8, 4, stride=2, padding=1)
self.deconv2 = nn.Conv2d(ngf * 8, ngf * 8, 4, stride=2, padding=1)
self.deconv3 = nn.Conv2d(ngf * 8, ngf * 8, 4, stride=2, padding=1)
self.deconv4 = nn.Conv2d(ngf * 8, ngf * 4, 4, stride=2, padding=1)
self.deconv5 = nn.Conv2d(ngf * 4, ngf * 2, 4, stride=2, padding=1)
self.deconv6 = nn.Conv2d(ngf * 2, ngf, 4, stride=2, padding=1)
self.deconv7 = nn.Conv2d(ngf, output_nc, 4, stride=2, padding=1)
self.o1 = nn.Tanh()
```
|
{
"source": "JeromeJGuay/viking_ADCP_processing",
"score": 2
}
|
#### File: magtogoek/adcp/loader.py
```python
import logging
import sys
import typing as tp
from pathlib import Path
import numpy as np
import pandas as pd
import xarray as xr
from magtogoek.adcp.rti_reader import RtiReader
from magtogoek.adcp.rti_reader import l as rti_log
from magtogoek.adcp.tools import dday_to_datetime64
from magtogoek.utils import Logger, get_files_from_expression
from pycurrents.adcp import rdiraw, transform
from pycurrents.adcp.rdiraw import Bunch, Multiread, rawfile
# This is to prevent pycurrents from printing warnings.
logging.getLogger(rdiraw.__name__).setLevel("CRITICAL")
RDI_SONAR = ["wh", "sv", "os", "sw_pd0"]
RTI_SONAR = ["sw"]
VEL_FILL_VALUE = -32768.0
l = Logger(level=0)
class FilesFormatError(Exception):
pass
class InvalidSonarError(Exception):
pass
def load_adcp_binary(
filenames: tp.Union[str, tp.List[str]],
sonar: str,
yearbase: int,
orientation: str = None,
leading_index: int = None,
trailing_index: int = None,
sensor_depth: float = None,
bad_pressure: bool = False,
start_time: str = None,
time_step: float = None,
) -> xr.Dataset:
"""Load RDI and RTI adcp data.
Return a dataset with the ADCP data loaded. For RDI FIXME pycurrents...
Notes:
-----
The adcp orientation is taken from the first profile of the first file
if no orientation is given.
Parameters
----------
filenames:
Path/to/files
sonar:
Type of sonar (`os`, `wh`, `sv`, `sw`, `sw_pd0`)
yearbase:
year that the sampling begun.
orientation:
Adcp orientation. Either `up` or `down`. Will overwrite the value
of the binary file.
leading_index:
Number of ensemble to cut from the start.
trailing_index:
Number of ensemble to cut from the end.
sensor_depth:
If provided, the adcp depth (meter) will be adjusted so that its median equal `sensor_depth`.
bad_pressure:
If True, XducerDepth is set to 0 or to `sensor_depth` if provided.
start_time:
Format 'YYYY-MM-DDThh:mm:ss.ssss'.
If provided, a new time coordinate vector, starting at `start_time`, is used instead of the
one found in the raw adcp file.
Use the parameter `time_step` to use a different time step than the one found in the adcp raw adcp file.
time_step:
Time step in seconds. Only use if a `start_time` value is provided.
Returns
-------
Dataset with the loaded adcp data
"""
l.reset()
l.section("Loading adcp data", t=True)
filenames = get_files_from_expression(filenames)
if sonar == "sv":
l.warning(
"""(from pycurrents)
- The SV support is under development. Missing features:
- The 0x7000-0x7004 IDs are not being parsed and stored.
- See pycurrents.adcp.rdiraw module for for information"""
)
# ------------------------ #
# Reading the data file(s) #
# ------------------------ #
if sonar in RTI_SONAR:
l.log(_print_filenames("RTI ENS", filenames))
data = RtiReader(filenames=filenames).read(
start_index=leading_index, stop_index=trailing_index
)
l.logbook += rti_log.logbook
elif sonar in RDI_SONAR:
if sonar == "sw_pd0":
sonar = "wh"
l.log(_print_filenames("RTI pd0", filenames))
else:
l.log(_print_filenames("RDI pd0", filenames))
if trailing_index:
trailing_index *= -1
try:
data = Multiread(fnames=filenames, sonar=sonar, yearbase=yearbase).read(
start=leading_index, stop=trailing_index
)
if not data:
raise ValueError(
"The sum of the trim values is greater than the number of ensemble."
)
data.vel = np.asarray(data.vel)
if "vbvel" in data:
data.vbvel = np.asarray(data.vbvel)
if "bt_vel" in data:
data.bt_vel = np.asarray(data.bt_vel)
except RuntimeError:
print(
f"ERROR: The input_files are not in a RDI pd0 format. RDI sonar : {RDI_SONAR}"
)
sys.exit()
# Reading the files FixedLeaders to check for invalid config.
# noinspection PyTupleAssignmentBalance
data.sysconfig["up"], invalid_config_count = check_pd0_fixed_leader(
filenames=filenames,
sonar=sonar,
yearbase=yearbase,
leading_index=leading_index,
trailing_index=trailing_index,
)
if invalid_config_count:
l.warning(
f"Invalid configuration, msb=`11111111` and lsb=`11111111`, found in the SysCfg of {invalid_config_count} FixedLeader. "
)
else:
raise InvalidSonarError(
f"{sonar} is not a valid. Valid sonar: `os`, `wh`, `sv`, `sw`, `sw_pd0` "
)
# -------------------- #
# Compares orientation #
# -------------------- #
sysconfig_orientation = "up" if data.sysconfig["up"] else "down"
if not orientation:
orientation = sysconfig_orientation
else:
if orientation != sysconfig_orientation:
l.warning(
f"The user input adcp orientation is {orientation} but the one found in the file was {sysconfig_orientation}."
)
l.log(f"Adcp orientation is set to {orientation}.")
# ---------------------------- #
# Convert `dday` to datetime64 #
# ---------------------------- #
time, time_string, bad_dday = _get_time(data.dday, yearbase, start_time, time_step)
# ----------------------------------------------------------- #
# Convert depth relative to the ADCP to depth below surface #
# ----------------------------------------------------------- #
if bad_pressure:
l.log("XducerDepth were discarded by the user.")
if sensor_depth:
data.XducerDepth[:] = sensor_depth
l.log(f"XducerDepth set to {sensor_depth} m.")
else:
data.XducerDepth[:] = 0
l.log("XducerDepth set to 0 m.")
average_xducer_depth = np.round(np.median(data.XducerDepth), 3)
l.log(f"Sensor depth (XducerDepth) in raw file : {average_xducer_depth} m")
xducer_depth = data.XducerDepth
depth_difference = 0
if sensor_depth:
depth_difference = round(average_xducer_depth - sensor_depth, 3)
if abs(depth_difference) > 0:
l.log(
[
f"The difference between the raw file sensor depth and the user"
f"provided `sensor_depth` ({sensor_depth} m) is {depth_difference} m",
]
)
l.log(
f"{-depth_difference} m was added to the depths measured by the instrument."
)
average_xducer_depth = sensor_depth
xducer_depth -= depth_difference
if sonar == "os":
depth = data.dep
else:
if orientation == "down":
depth = average_xducer_depth + data.dep
else:
depth = average_xducer_depth - data.dep
if (depth < 0).all():
l.warning("Bin depths are all negative, ADCP orientation is probably wrong.")
# --------------------- #
# Initializing the dataset #
# --------------------- #
dataset = xr.Dataset(coords={"depth": depth, "time": time})
# --------------------------------------- #
# Dealing with the coordinates system #
# --------------------------------------- #
original_coordsystem = data.trans["coordsystem"]
if original_coordsystem != "earth":
l.log(f"The velocity data are in {data.trans['coordsystem']} coordinate")
coordsystem2earth(data=data, orientation=orientation)
if data.trans["coordsystem"] == "xyz":
l.warning("Roll, Pitch or Heading seems to be missing from the data file.")
l.log(f"The velocity data were transformed to {data.trans['coordsystem']}")
# --------------------------- #
# Loading the transducer data #
# --------------------------- #
data.vel[data.vel == VEL_FILL_VALUE] = np.nan
# WATER VELOCITIES
dataset["u"] = (["depth", "time"], data.vel[:, :, 0].T)
dataset["v"] = (["depth", "time"], data.vel[:, :, 1].T)
dataset["w"] = (["depth", "time"], data.vel[:, :, 2].T)
dataset["e"] = (["depth", "time"], data.vel[:, :, 3].T)
l.log("Velocity data loaded")
if sonar == "sv":
data.vbvel[data.vbvel == VEL_FILL_VALUE] = np.nan
dataset["vb_vel"] = (["depth", "time"], data.vbvel.T)
dataset["vb_corr"] = (["depth", "time"], np.asarray(data.VBCorrelation.T))
dataset["vb_amp"] = (["depth", "time"], np.asarray(data.VBIntensity.T))
if "VBPercentGood" in data:
dataset["vb_pg"] = (["depth", "time"], np.asarray(data.VBPercentGood.T))
l.log("Data from the Sentinel V fifth beam loaded.")
# BOTTOM VELOCITIES
if "bt_vel" in data:
if (data.bt_vel == 0).all():
l.log(
"Bottom track values were all `0`, therefore they were dropped from the output."
)
elif not np.isfinite(data.bt_vel).all():
l.log(
"Bottom track values were all `nan`, therefore they were dropped from the output."
)
else:
data.bt_vel[data.bt_vel == VEL_FILL_VALUE] = np.nan
dataset["bt_u"] = (["time"], data.bt_vel[:, 0])
dataset["bt_v"] = (["time"], data.bt_vel[:, 1])
dataset["bt_w"] = (["time"], data.bt_vel[:, 2])
dataset["bt_e"] = (["time"], data.bt_vel[:, 3])
l.log("Bottom track data loaded")
# BOTTOM DEPTH
if "bt_depth" in data:
if (data.bt_depth == 0).all():
l.log(
"Bottom depth values were all `0`, therefore they were dropped from the output."
)
elif not np.isfinite(data.bt_depth).all():
l.log(
"Bottom depth values were all `nan`, therefore they were dropped from the output."
)
else:
dataset["bt_depth"] = (
["time"],
np.asarray(np.nanmean(data.bt_depth, axis=-1)),
)
if orientation == "up":
l.log(
"In a `up` orientation, bottom_depth corresponds to the water height above adcp, thus should "
"correspond to the xducer_depth measurements and bottom velocities correspond to the water surface "
"velocity. "
)
l.log(
f"The averaged xducer_depth computed from the bottom tracking is {np.median(data.bt_depth)}."
)
elif sensor_depth:
if abs(depth_difference) > 0 and sonar == "os":
dataset["bt_depth"] -= depth_difference
else:
dataset["bt_depth"] += xducer_depth
l.log("Bottom depth data loaded")
# QUALITY
if "pg" in data:
if original_coordsystem == "beam":
dataset["pg"] = (["depth", "time"], np.asarray(np.mean(data.pg, axis=2).T))
l.log(
"Percent good was computed by averaging each beam PercentGood. The raw data were in beam coordinate."
)
else:
dataset["pg"] = (["depth", "time"], np.asarray(data.pg4.T))
else:
l.warning("Percent good was not retrieve from the dataset.")
if "cor1" in data:
for i in range(1, 5):
dataset[f"corr{i}"] = (["depth", "time"], np.asarray(data[f"cor{i}"].T))
if "amp1" in data:
for i in range(1, 5):
dataset[f"amp{i}"] = (["depth", "time"], np.asarray(data[f"amp{i}"].T))
# ------------------ #
# Loading depth data #
# ------------------ #
# For `wh`, `sv` and `sw` XducerDepth varies over times but is constant for `os`.
if sonar != "os":
dataset["xducer_depth"] = (["time"], np.asarray(xducer_depth))
# --------------------------- #
# Loading the navigation data #
# --------------------------- #
if "rawnav" in data:
dataset["lon"] = (
["time"],
np.array(data["rawnav"]["Lon1_BAM4"] * 180.0 / 2 ** 31),
)
dataset["lat"] = (
["time"],
np.array(data["rawnav"]["Lat1_BAM4"] * 180.0 / 2 ** 31),
)
l.log("Navigation (GPS) data loaded.")
# -------------------------------------------- #
# Quick checkup and loading of other variables #
# -------------------------------------------- #
# For `wh`, `sv` and `sw` the pressure is added if available.
if "Pressure" in data.VL.dtype.names:
if not (data.VL["Pressure"] == 0).all():
dataset["pres"] = (
["time"],
data.VL["Pressure"] / 1000,
) # decapascal to decibar
else:
l.log("Pressure data unavailable")
if "heading" in data:
if (data.heading == 0).all() or (np.diff(data.heading) == 0).all():
l.warning("Heading data are either all 0, or not varying.")
else:
dataset["heading"] = (["time"], np.asarray(data.heading))
if "roll" in data:
if (data.roll == 0).all() or (np.diff(data.roll) == 0).all():
l.warning("Roll data are either all 0, or not varying.")
else:
dataset["roll_"] = (["time"], np.asarray(data.roll))
if "pitch" in data:
if (data.pitch == 0).all() or (np.diff(data.pitch) == 0).all():
l.warning("Pitch data are either all 0, or not varying.")
else:
dataset["pitch"] = (["time"], np.asarray(data.pitch))
if "temperature" in data:
if (data.temperature == 0).all() or (np.diff(data.temperature) == 0).all():
l.warning("Temperature data are either all 0, or not varying.")
else:
dataset["temperature"] = (["time"], np.asarray(data.temperature))
# ------------------------------- #
# Load time string or dday if bad #
# ------------------------------- #
if bad_dday:
dataset["dday"] = (["time"], np.asarray(data.dday))
else:
dataset["time_string"] = (["time"], time_string)
if orientation == "up":
dataset = dataset.sortby("depth")
# -------------- #
# Add attributes #
# -------------- #
sonar_names = dict(
wh="WorkHorse", sv="Sentinel V", os="Ocean Surveyor", sw="SeaWATCH"
)
dataset.attrs["sonar"] = sonar_names[sonar]
dataset.attrs["manufacturer"] = (
"Teledyne RD Instruments Inc."
if sonar in ["wh", "sv", "os"]
else "Rowe Technologies Inc. (RTI)"
)
if "xducer_depth" not in dataset:
dataset.attrs["xducer_depth"] = round(average_xducer_depth, 2)
dataset.attrs["coord_system"] = data.trans["coordsystem"]
dataset.attrs["beam_angle"] = data.sysconfig["angle"]
dataset.attrs["frequency_Hz"] = data.sysconfig["kHz"] * 1000 # kHz to hz
dataset.attrs["bin_size_m"] = data.CellSize
dataset.attrs["ping_per_ensemble"] = data.NPings
dataset.attrs["ping_type"] = data.pingtype
dataset.attrs["blank_m"] = data.Blank # cm to m
dataset.attrs["bin1dist_m"] = data.Bin1Dist
dataset.attrs["firmware_version"] = ".".join(
list(str(data.FL["FWV"])) + list(str(data.FL["FWR"]))
)
dataset.attrs["transmit_pulse_length_m"] = data.FL["Pulse"] / 100 # cm to m
dataset.attrs["delta_t_sec"] = np.round(
np.mean((np.diff(dataset.time).astype("timedelta64[s]"))).astype(float), 2
)
dataset.attrs["sampling_interval"] = str(dataset.attrs["delta_t_sec"]) + " seconds"
dataset.attrs["beam_pattern"] = "convex" if data.sysconfig["convex"] else "concave"
dataset.attrs["janus"] = "5-Beam" if sonar == "sv" else "4-Beam"
dataset.attrs["magnetic_declination"] = None
if "FL" in data:
if "EV" in data.FL:
if data.FL["EV"] != 0:
dataset.attrs["magnetic_declination"] = data.FL["EV"] / 100
dataset.attrs["magnetic_declination_units"] = "degree east"
dataset.attrs["orientation"] = orientation
dataset.attrs["serial_number"] = (
data.SerialNumber if "SerialNumber" in data else None
)
l.log(f"File(s) loaded with {l.w_count} warnings")
dataset.attrs["logbook"] = l.logbook
return dataset
def coordsystem2earth(data: Bunch, orientation: str):
"""Transforms beam and xyz coordinates to enu coordinates
NOTE: not properly tested. But it should work.
Replace the values of data.vel, data.bt_vel with East, North and Up velocities
and the velocity error for 4 beams ADCP. UHDAS transform functions are used to
transform for beam coordinates and xyz to east-north-up (enu). These function
can use a three-beam solution by faking a fourth beam.
Also change the values of of `coordinates` in data.trans.
beam coordinates : Velocity measured along beam axis.
xyz coordinates : Velocity in a cartesian coordinate system in the ADCP frame of reference.
enu coordinates : East North Up measured using the heading, pitch, roll of the ADCP.
Parameters
----------
data:
pycurrents.adcp.rdiraw.Bunche object containing: vel[time, depth, beams], bt_vel[time, beams],
heading, roll, pitch sysconfig.convex, sysconfig.angle and trans.coordsystem.
orientation:
adcp orientation. Either `up` or `down`.
Notes
-----
Move the prints outside
"""
if data.trans.coordsystem not in ["beam", "xyz"]:
l.log(
f"Coordsystem value of {data.sysconfig.coordsystem} not recognized. Conversion to enu not available."
)
beam_pattern = "convex" if data.sysconfig["convex"] else "concave"
xyze, bt_xyze = data.vel, data.bt_vel
if data.trans.coordsystem == "beam":
if data.sysconfig.angle:
trans = transform.Transform(
angle=data.sysconfig.angle, geometry=beam_pattern
)
xyze = trans.beam_to_xyz(data.vel)
bt_xyze = trans.beam_to_xyz(data.bt_vel)
else:
l.log("Beam angle missing. Could not convert from beam coordinate.")
if (data.heading == 0).all() or (data.roll == 0).all() or (data.pitch == 0).all():
data.trans["coordsystem"] = "xyz"
for i in range(4):
data.vel[:, :, i] = np.round(xyze[:, :, i], decimals=3)
data.bt_vel[:, i] = np.round(bt_xyze[:, i], decimals=3)
else:
enu = transform.rdi_xyz_enu(
xyze, data.heading, data.pitch, data.roll, orientation=orientation,
)
bt_enu = transform.rdi_xyz_enu(
bt_xyze, data.heading, data.pitch, data.roll, orientation=orientation,
)
data.trans["coordsystem"] = "earth"
for i in range(4):
data.vel[:, :, i] = np.round(enu[:, :, i], decimals=3)
data.bt_vel[:, i] = np.round(bt_enu[:, i], decimals=3)
def check_pd0_fixed_leader(
filenames: tp.Union[str, tp.List[str]],
sonar: str,
yearbase: int,
leading_index: int = None,
trailing_index: int = None,
) -> tp.Tuple[bool, int]:
"""Read Teledyne RDI binary FixedLeader.
Returns the most common orientation and flag for an invalid config.
Invalid config -> msb=`11111111` and lsb=`11111111`
Using: rawfile().read() to get the FixedLeader for all pings.
Parameters
----------
filenames :
File(s) to read.
sonar :
sonar type passed to pycurrents.Multiread.
('nb', 'bb', 'wh', 'sv', or 'os')
yearbase :
start year of the sampling.
leading_index :
trailing_index :
Returns
-------
upward_looking :
True is the adcp is looking upward
invalid_config_count :
number of invalid configuration in the fixed leaders.
Notes:
------
Although the fixed_leader is supposed to be fixed, there is occurrence
of change in the fixed_leader of some ping. A check up of some
the fixed_leader parameters is done in the processing.
"""
# uses rawfile() to get the FixedLeader.
if isinstance(filenames, list):
fixed_leader = np.concatenate(
[
rawfile(fname=fname, sonar=sonar, yearbase=yearbase)
.read(varlist=["FixedLeader"])
.raw.FixedLeader
for fname in filenames
]
)
else:
fixed_leader = (
rawfile(fname=filenames, sonar=sonar, yearbase=yearbase)
.read(varlist=["FixedLeader"])
.raw.FixedLeader
)
bad_config_value = 2 ** 16 - 1
_up = int('10000000', 2)
orientations = fixed_leader["SysCfg"][leading_index:trailing_index] & _up
upward_looking = np.mean(orientations) > 63
invalid_config_count = np.sum(
(fixed_leader["SysCfg"][leading_index:trailing_index] == bad_config_value)
)
return upward_looking, invalid_config_count
def _print_filenames(file_type: str, filenames: tp.List) -> str:
"""Format a string of filenames for prints
`file_type` files :
|-filename1
:
|-filenameN
"""
return (
file_type
+ " files : \n |-"
+ "\n |-".join([p.name for p in list(map(Path, filenames))])
)
def _get_time(
dday: np.ndarray, yearbase: int, start_time: str = None, time_step: float = None
) -> tp.Union[np.ndarray, np.ndarray, bool]:
"""
Parameters
----------
yearbase :
Year that the sampling started
start_time :
Format: 'YYYY-MM-DDThh:mm:ss.ssss'.
time_step :
Time step in seconds.
"""
bad_dday = False
if start_time is None:
if (dday < 0).any():
bad_dday = True
l.warning(
"The `dday` (time in fraction of days) vector in the adcp file had negative values."
)
elif (np.diff(dday) < 0).any():
bad_dday = True
l.warning(
"The `dday` (time in fraction of days) was no monotonically increasing."
)
if bad_dday is True:
l.log('`dday` values were added to the dataset.')
start_time = str(dday_to_datetime64(dday[0], yearbase)[1])
time_step = _get_time_step(dday)
time, time_string = _make_time(start_time, len(dday), time_step)
else:
time, time_string = dday_to_datetime64(dday, yearbase)
else:
if time_step is None:
time_step = _get_time_step(dday)
else:
time_step = pd.Timedelta(time_step,'seconds')
time, time_string = _make_time(start_time, len(dday), time_step)
return time, time_string, bad_dday
def _make_time(
start_time: str, length: int, time_step: pd.Timedelta
) -> tp.Union[np.ndarray, np.ndarray]:
"""
Parameters
----------
start_time:
Format 'YYYY-MM-DDThh:mm:ss.ssss'.
length:
Length of the time vector.
time_step:
Time step in seconds.
"""
l.warning(
f"Time vector was replace with a time series starting at {start_time} with {time_step.seconds} seconds time step."
)
time = pd.date_range(pd.Timestamp(start_time), periods=length, freq=time_step)
return time, time.astype(str)
def _get_time_step(dday: np.ndarray) -> pd.Timedelta:
"""
Parameters
----------
dday
Returns
-------
"""
time = dday * 86400
deltas, counts = np.unique(np.round(np.diff(time), 4), return_counts=True)
return pd.Timedelta(deltas[counts.argmax()], "seconds")
if __name__ == "__main__":
import matplotlib.pyplot as plt
path = "/home/jeromejguay/ImlSpace/Data/MPO/iml42020/"
ds = load_adcp_binary(filenames=path + "*4.ENS", sonar="sw", yearbase=2020)
```
#### File: viking_ADCP_processing/magtogoek/tools.py
```python
import typing as tp
import numpy as np
from pygeodesy.ellipsoidalVincenty import LatLon
def nans(shape: tp.Union[list, tuple, np.ndarray]) -> np.ndarray:
"""return array of nan of shape `shape`"""
return np.full(shape, np.nan)
def circular_distance(a1, a2, units="rad"):
"""
Function circdist usage:
d = circdist(a1,a2,units='rad')
Returns to 'd' the distance between angles a1 and a2
expected to be radians by default, or degrees if units
is specified to 'deg'.
Parameters
----------
a1, a2 : float
Input angle.
units : str
Units of input angles ('deg', 'rad')
Returns
-------
float
Angular distance between `a1` and `a2`.
Notes
-----
Taken from jeanlucshaw adcp2nc: https://github.com/jeanlucshaw/adcp2nc/
"""
if units == "deg":
a1 = np.pi * a1 / 180
a2 = np.pi * a2 / 180
if np.isscalar(a1) and np.isscalar(a2):
v1 = np.array([np.cos(a1), np.sin(a1)])
v2 = np.array([np.cos(a2), np.sin(a2)])
dot = np.dot(v1, v2)
elif not np.isscalar(a1) and np.isscalar(a2):
a2 = np.tile(a2, a1.size)
v1 = np.array([np.cos(a1), np.sin(a1)]).T
v2 = np.array([np.cos(a2), np.sin(a2)]).T
dot = (v1 * v2).sum(-1)
else:
v1 = np.array([np.cos(a1), np.sin(a1)]).T
v2 = np.array([np.cos(a2), np.sin(a2)]).T
dot = (v1 * v2).sum(-1)
res = np.arccos(np.clip(dot, -1.0, 1.0))
if units == "deg":
res = 180 * res / np.pi
return res
def vincenty(p0: tp.Tuple[float, float], p1: tp.Tuple[float, float]) -> float:
"""Calculate the distance between 2 coordinates with pygeodesy.ellipsoidalVincenty.LatLon
Great Circule Distance with Datum = WGS84
Parameters
----------
p0 :
(longitude, latitude) of the first points in decimal degrees
p1 :
(longitude, latitude) of the second points in decimal degrees
Returns
-------
distance :
distance between the two points in meters.
"""
return LatLon(p0[1], p0[0]).distanceTo(LatLon(p1[1], p1[0]))
def get_gps_bearing(p0: tp.Tuple[float, float], p1: tp.Tuple[float, float]) -> float:
"""Calculate the bearing between two coordinates with pygeodesy.ellipsoidalVincenty.LatLon
Datum = WGS84
Parameters
----------
p0 :
(longitude, latitude) of the first points in decimal degrees
p1 :
(longitude, latitude) of the second points in decimal degrees
Returns
-------
bearing in degrees [0, 360]
"""
return LatLon(p0[1], p0[0]).initialBearingTo(LatLon(p1[1], p1[0]))
```
#### File: rti_python/Ensemble/AncillaryData.py
```python
from rti_python.Ensemble.Ensemble import Ensemble
import logging
from pandas import DataFrame
class AncillaryData:
"""
Ancillary Data DataSet.
Float values that give details about the ensemble.
"""
def __init__(self, num_elements=19, element_multiplier=1):
self.ds_type = 10 # Float
self.num_elements = num_elements
self.element_multiplier = element_multiplier
self.image = 0
self.name_len = 8
self.Name = "E000009\0"
self.FirstBinRange = 0.0 # Blank. Depth to the first bin in meters.
self.BinSize = 0.0 # Size of a bin in meters.
self.FirstPingTime = 0.0 # First Ping Time in seconds.
self.LastPingTime = 0.0 # Last Ping Time in seconds. (If averaging pings, this will be the last ping)
self.Heading = 0.0 # Heading in degrees.
self.Pitch = 0.0 # Pitch in degrees.
self.Roll = 0.0 # Roll in degrees.
self.WaterTemp = 0.0 # Water Temperature in fahrenheit
self.SystemTemp = 0.0 # System Temperature in fahrenheit
self.Salinity = 0.0 # Water Salinity set by the user in PPT
self.Pressure = 0.0 # Pressure from pressure sensor in Pascals
self.TransducerDepth = 0.0 # Transducer Depth, used by Pressure sensor in meters
self.SpeedOfSound = 0.0 # Speed of Sound in m/s.
self.RawMagFieldStrength = 0.0 # Raw magnetic field strength (uT) (micro Tesla)
self.RawMagFieldStrength2 = 0.0 # Raw magnetic field strength (uT) (micro Tesla)
self.RawMagFieldStrength3 = 0.0 # Raw magnetic field strength (uT) (micro Tesla)
self.PitchGravityVector = 0.0 # Pitch Gravity Vector
self.RollGravityVector = 0.0 # Roll Gravity Vector
self.VerticalGravityVector = 0.0 # Vertical Gravity Vector
def decode(self, data):
"""
Take the data bytearray. Decode the data to populate
the values.
:param data: Bytearray for the dataset.
"""
packet_pointer = Ensemble.GetBaseDataSize(self.name_len)
self.FirstBinRange = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 0, Ensemble().BytesInFloat, data)
self.BinSize = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 1, Ensemble().BytesInFloat, data)
self.FirstPingTime = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 2, Ensemble().BytesInFloat, data)
self.LastPingTime = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 3, Ensemble().BytesInFloat, data)
self.Heading = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 4, Ensemble().BytesInFloat, data)
self.Pitch = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 5, Ensemble().BytesInFloat, data)
self.Roll = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 6, Ensemble().BytesInFloat, data)
self.WaterTemp = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 7, Ensemble().BytesInFloat, data)
self.SystemTemp = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 8, Ensemble().BytesInFloat, data)
self.Salinity = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 9, Ensemble().BytesInFloat, data)
self.Pressure = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 10, Ensemble().BytesInFloat, data)
self.TransducerDepth = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 11, Ensemble().BytesInFloat, data)
self.SpeedOfSound = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 12, Ensemble().BytesInFloat, data)
if self.num_elements > 13:
self.RawMagFieldStrength = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 13, Ensemble().BytesInFloat, data)
self.RawMagFieldStrength2 = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 14, Ensemble().BytesInFloat, data)
self.RawMagFieldStrength3 = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 15, Ensemble().BytesInFloat, data)
self.PitchGravityVector = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 16, Ensemble().BytesInFloat, data)
self.RollGravityVector = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 17, Ensemble().BytesInFloat, data)
self.VerticalGravityVector = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 18, Ensemble().BytesInFloat, data)
logging.debug(self.FirstBinRange)
logging.debug(self.BinSize)
logging.debug(self.Heading)
logging.debug(self.Pitch)
logging.debug(self.Roll)
logging.debug(self.Salinity)
logging.debug(self.SpeedOfSound)
def encode(self):
"""
Encode the data into RTB format.
:return:
"""
result = []
# Generate header
result += Ensemble.generate_header(self.ds_type,
self.num_elements,
self.element_multiplier,
self.image,
self.name_len,
self.Name)
# Add the data
result += Ensemble.float_to_bytes(self.FirstBinRange)
result += Ensemble.float_to_bytes(self.BinSize)
result += Ensemble.float_to_bytes(self.FirstPingTime)
result += Ensemble.float_to_bytes(self.LastPingTime)
result += Ensemble.float_to_bytes(self.Heading)
result += Ensemble.float_to_bytes(self.Pitch)
result += Ensemble.float_to_bytes(self.Roll)
result += Ensemble.float_to_bytes(self.WaterTemp)
result += Ensemble.float_to_bytes(self.SystemTemp)
result += Ensemble.float_to_bytes(self.Salinity)
result += Ensemble.float_to_bytes(self.Pressure)
result += Ensemble.float_to_bytes(self.TransducerDepth)
result += Ensemble.float_to_bytes(self.SpeedOfSound)
result += Ensemble.float_to_bytes(self.RawMagFieldStrength)
result += Ensemble.float_to_bytes(self.RawMagFieldStrength2)
result += Ensemble.float_to_bytes(self.RawMagFieldStrength3)
result += Ensemble.float_to_bytes(self.PitchGravityVector)
result += Ensemble.float_to_bytes(self.RollGravityVector)
result += Ensemble.float_to_bytes(self.VerticalGravityVector)
return result
def encode_csv(self, dt, ss_code, ss_config, blank=0, bin_size=0):
"""
Encode into CSV format.
:param dt: Datetime object.
:param ss_code: Subsystem code.
:param ss_config: Subsystem Configuration
:param blank: Blank or first bin position in meters.
:param bin_size: Bin size in meters.
:return: List of CSV lines.
"""
str_result = []
# Create the CSV strings
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_FIRST_PING_TIME, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.FirstPingTime))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_LAST_PING_TIME, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.LastPingTime))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_HEADING, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.Heading))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_PITCH, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.Pitch))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_ROLL, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.Roll))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_WATER_TEMP, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.WaterTemp))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_SYS_TEMP, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.SystemTemp))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_PRESSURE, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.Pressure))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_XDCR_DEPTH, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.TransducerDepth))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_SOS, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.SpeedOfSound))
return str_result
def encode_df(self, dt, ss_code, ss_config):
"""
Encode into Dataframe format.
:param dt: Datetime object.
:param ss_code: Subsystem code.
:param ss_config: Subsystem Configuration
:return: Dataframe of the data.
"""
df_result = []
# Create the Dataframe strings
df_result.append([dt, Ensemble.CSV_FIRST_PING_TIME, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.FirstPingTime])
df_result.append([dt, Ensemble.CSV_LAST_PING_TIME, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.LastPingTime])
df_result.append([dt, Ensemble.CSV_HEADING, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.Heading])
df_result.append([dt, Ensemble.CSV_PITCH, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.Pitch])
df_result.append([dt, Ensemble.CSV_ROLL, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.Roll])
df_result.append([dt, Ensemble.CSV_WATER_TEMP, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.WaterTemp])
df_result.append([dt, Ensemble.CSV_SYS_TEMP, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.SystemTemp])
df_result.append([dt, Ensemble.CSV_PRESSURE, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.Pressure])
df_result.append([dt, Ensemble.CSV_XDCR_DEPTH, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.TransducerDepth])
df_result.append([dt, Ensemble.CSV_SOS, ss_code, ss_config, 0, 0, self.FirstBinRange, self.BinSize, self.SpeedOfSound])
# Create the column names
df_earth_columns = ["dt", "type", "ss_code", "ss_config", "bin_num", "beam", "blank", "bin_size", "val"]
return DataFrame(df_result, columns=df_earth_columns)
def is_upward_facing(self, min_roll: float = 0.0, max_roll: float = 20.0):
"""
Check the roll value if it is near 0 degrees. A value near
zero means the ADCP is upward facing.
Upward looking is around 0 degrees for roll.
Downward looking is around 180 degrees for roll.
:param min_roll: Minimum roll for upward looking taking absolute value.
:param max_roll: Maximum roll for the upward looking taking absolute value.
:return: TRUE if Upward facing.
"""
# Check if the roll value is near 0
if min_roll <= abs(self.Roll) <= max_roll:
return True
return False
```
#### File: rti_python/Ensemble/BottomTrack.py
```python
from rti_python.Ensemble.Ensemble import Ensemble
import logging
from pandas import DataFrame
class BottomTrack:
"""
Ensemble Data DataSet.
Integer values that give details about the ensemble.
"""
def __init__(self, num_elements=74, element_multiplier=1):
"""
Initialize the object
:param num_elements: Number of elements. 74 for 4 Beam system, 59 for 3 beam, 29 for 1 beam
:param element_multiplier: Element mulitplier = 1 always
"""
self.ds_type = 10 # Float
self.num_elements = num_elements
self.element_multiplier = element_multiplier
self.image = 0
self.name_len = 8
self.Name = "E000010\0"
self.FirstPingTime = 0.0
self.LastPingTime = 0.0
self.Heading = 0.0
self.Pitch = 0.0
self.Roll = 0.0
self.WaterTemp = 0.0
self.SystemTemp = 0.0
self.Salinity = 0.0
self.Pressure = 0.0
self.TransducerDepth = 0.0
self.SpeedOfSound = 0.0
self.Status = 0.0
self.NumBeams = 0.0
self.ActualPingCount = 0.0
self.Range = []
self.SNR = []
self.Amplitude = []
self.Correlation = []
self.BeamVelocity = []
self.BeamGood = []
self.InstrumentVelocity = []
self.InstrumentGood = []
self.EarthVelocity = []
self.EarthGood = []
self.SNR_PulseCoherent = []
self.Amp_PulseCoherent = []
self.Vel_PulseCoherent = []
self.Noise_PulseCoherent = []
self.Corr_PulseCoherent = []
"""
for beams in range(element_multiplier):
self.Range.append(Ensemble().BadVelocity)
self.SNR.append(Ensemble().BadVelocity)
self.Amplitude.append(Ensemble().BadVelocity)
self.Correlation.append(Ensemble().BadVelocity)
self.BeamVelocity.append(Ensemble().BadVelocity)
self.BeamGood.append(Ensemble().BadVelocity)
self.InstrumentVelocity.append(Ensemble().BadVelocity)
self.InstrumentGood.append(Ensemble().BadVelocity)
self.EarthVelocity.append(Ensemble().BadVelocity)
self.EarthGood.append(Ensemble().BadVelocity)
"""
def decode(self, data):
"""
Take the data bytearray. Decode the data to populate
the velocities.
:param data: Bytearray for the dataset.
"""
packet_pointer = Ensemble.GetBaseDataSize(self.name_len)
self.FirstPingTime = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 0, Ensemble().BytesInFloat, data)
self.LastPingTime = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 1, Ensemble().BytesInFloat, data)
self.Heading = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 2, Ensemble().BytesInFloat, data)
self.Pitch = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 3, Ensemble().BytesInFloat, data)
self.Roll = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 4, Ensemble().BytesInFloat, data)
self.WaterTemp = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 5, Ensemble().BytesInFloat, data)
self.SystemTemp = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 6, Ensemble().BytesInFloat, data)
self.Salinity = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 7, Ensemble().BytesInFloat, data)
self.Pressure = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 8, Ensemble().BytesInFloat, data)
self.TransducerDepth = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 9, Ensemble().BytesInFloat, data)
self.SpeedOfSound = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 10, Ensemble().BytesInFloat, data)
self.Status = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 11, Ensemble().BytesInFloat, data)
self.NumBeams = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 12, Ensemble().BytesInFloat, data)
self.ActualPingCount = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 13, Ensemble().BytesInFloat, data)
index = 14
numBeam = int(self.NumBeams)
for beams in range(numBeam):
self.Range.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.SNR.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Amplitude.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Correlation.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.BeamVelocity.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.BeamGood.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.InstrumentVelocity.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.InstrumentGood.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.EarthVelocity.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.EarthGood.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
if self.num_elements > 54:
for beams in range(numBeam):
self.SNR_PulseCoherent.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Amp_PulseCoherent.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Vel_PulseCoherent.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Noise_PulseCoherent.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Corr_PulseCoherent.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
else:
# Fill in with 0.0
for beams in range(numBeam):
self.SNR_PulseCoherent.append(0.0)
for beams in range(numBeam):
self.Amp_PulseCoherent.append(0.0)
for beams in range(numBeam):
self.Vel_PulseCoherent.append(0.0)
for beams in range(numBeam):
self.Noise_PulseCoherent.append(0.0)
for beams in range(numBeam):
self.Corr_PulseCoherent.append(0.0)
logging.debug(self.FirstPingTime)
logging.debug(self.LastPingTime)
logging.debug(self.Heading)
logging.debug(self.Pitch)
logging.debug(self.Roll)
logging.debug(self.Salinity)
logging.debug(self.SpeedOfSound)
logging.debug(self.EarthVelocity)
def get_vessel_speed(self):
"""
This will calculate the vessel speed (magnitude). You will
need 3 beams of good data to calculate the vessel speed.
If you do not have 3 beams or any of the velocities for a
beam are bad, this will return BAD_VELOCITY.
:return: Vessel speed or BAD_VELOCITY.
"""
# At least 3 beams needed
if int(self.NumBeams) >= 3 and len(self.EarthVelocity) >= 3:
return Ensemble.calculate_magnitude(self.EarthVelocity[0], self.EarthVelocity[1], self.EarthVelocity[2])
return Ensemble.BadVelocity
def get_vessel_direction(self):
"""
This will calculate the vessel direction. You will
need 2 beams of good data to calculate the vessel speed.
If you do not have 2 beams or any of the velocities for a
beam are bad, this will return BAD_VELOCITY.
:return: Vessel speed or BAD_VELOCITY.
"""
# At least 3 beams needed
if int(self.NumBeams) >= 2 and len(self.EarthVelocity) >= 2:
return Ensemble.calculate_direction(self.EarthVelocity[0], self.EarthVelocity[1])
return Ensemble.BadVelocity
def avg_range(self):
"""
Return the average range (depth to the bottom). This will determine the good values
for the range and average them together.
:return: Average range.
"""
# Average the range
return Ensemble.get_avg_range(self.Range)
def status_str(self):
"""
Convert the status value to a string.
:return: Status value as a string.
"""
BT_LONG_LAG = 0x0001;
BT_BT_3BEAM_SOLUTION = 0x0002;
BT_HOLD = 0x0004;
BT_SEARCHING = 0x0008;
BT_LR = 0x0010;
BT_COAST = 0x0020;
BT_PROOF = 0x0040;
OVERTEMP = 0x0020;
BT_LOWGAIN = 0x0080;
ERR_HEADING_SENSOR = 0x0100;
ERR_PRESSURE_SENSOR = 0x0200;
ERR_POWER_DOWN_FAILURE = 0x0400;
ERR_NONVOLATILE_DATA = 0x0800;
ERR_RTC = 0x1000;
ERR_TEMPERATURE = 0x2000;
ERR_RCVR_DATA = 0x4000;
ERR_RCVR_TIMEOUT = 0x8000;
ERR_LOW_VOLTAGE = 0xFFFF;
result_str = ""
if self.Status & BT_LONG_LAG:
result_str += "Bottom Track Long Lag, "
if self.Status & BT_BT_3BEAM_SOLUTION:
result_str += "Bottom Track 3 Beam Solution"
if self.Status & BT_HOLD:
result_str += "Bottom Track Search: HOLD, "
if self.Status & BT_SEARCHING:
result_str += "Bottom Track Search: SEARCHING, "
if self.Status & BT_LR:
result_str += "Bottom Track Long Range [Narrowband Mode], "
if self.Status & BT_COAST:
result_str += "Bottom Track Coast, "
if self.Status & BT_PROOF:
result_str += "Bottom Track Search: PROOF, "
if self.Status & OVERTEMP:
result_str += "Over Temperature, "
if self.Status & BT_LOWGAIN:
result_str += "Bottom Track Low Gain (Shallow Water Mode), "
if self.Status & ERR_HEADING_SENSOR:
result_str += "Heading Sensor Error, "
if self.Status & ERR_PRESSURE_SENSOR:
result_str += "Pressure Sensor Error, "
if self.Status & ERR_POWER_DOWN_FAILURE:
result_str += "Error Powering Down, "
if self.Status & ERR_NONVOLATILE_DATA:
result_str += "Error in NonVolatile Data: "
if self.Status & ERR_RTC:
result_str += "RTC Error, "
if self.Status & ERR_TEMPERATURE:
result_str += "Temperature Error, "
if self.Status & ERR_RCVR_DATA:
result_str += "Receiver Data Error, "
if self.Status & ERR_RCVR_TIMEOUT:
result_str += "Receiver Timeout, "
if self.Status == ERR_LOW_VOLTAGE:
result_str += "Low Voltage, "
return result_str
def encode(self):
"""
Encode the data into RTB format.
:return:
"""
result = []
self.num_elements = (15 * int(self.NumBeams)) + 14
# Generate header
result += Ensemble.generate_header(self.ds_type,
self.num_elements,
self.element_multiplier,
self.image,
self.name_len,
self.Name)
# Add the data
result += Ensemble.float_to_bytes(self.FirstPingTime)
result += Ensemble.float_to_bytes(self.LastPingTime)
result += Ensemble.float_to_bytes(self.Heading)
result += Ensemble.float_to_bytes(self.Pitch)
result += Ensemble.float_to_bytes(self.Roll)
result += Ensemble.float_to_bytes(self.WaterTemp)
result += Ensemble.float_to_bytes(self.SystemTemp)
result += Ensemble.float_to_bytes(self.Salinity)
result += Ensemble.float_to_bytes(self.Pressure)
result += Ensemble.float_to_bytes(self.TransducerDepth)
result += Ensemble.float_to_bytes(self.SpeedOfSound)
result += Ensemble.float_to_bytes(self.Status)
result += Ensemble.float_to_bytes(self.NumBeams)
result += Ensemble.float_to_bytes(self.ActualPingCount)
for beam in range(len(self.Range)):
result += Ensemble.float_to_bytes(self.Range[beam])
for beam in range(len(self.SNR)):
result += Ensemble.float_to_bytes(self.SNR[beam])
for beam in range(len(self.Amplitude)):
result += Ensemble.float_to_bytes(self.Amplitude[beam])
for beam in range(len(self.Correlation)):
result += Ensemble.float_to_bytes(self.Correlation[beam])
for beam in range(len(self.BeamVelocity)):
result += Ensemble.float_to_bytes(self.BeamVelocity[beam])
for beam in range(len(self.BeamGood)):
result += Ensemble.float_to_bytes(self.BeamGood[beam])
for beam in range(len(self.InstrumentVelocity)):
result += Ensemble.float_to_bytes(self.InstrumentVelocity[beam])
for beam in range(len(self.InstrumentGood)):
result += Ensemble.float_to_bytes(self.InstrumentGood[beam])
for beam in range(len(self.EarthVelocity)):
result += Ensemble.float_to_bytes(self.EarthVelocity[beam])
for beam in range(len(self.EarthGood)):
result += Ensemble.float_to_bytes(self.EarthGood[beam])
for beam in range(len(self.SNR_PulseCoherent)):
result += Ensemble.float_to_bytes(self.SNR_PulseCoherent[beam])
for beam in range(len(self.Amp_PulseCoherent)):
result += Ensemble.float_to_bytes(self.Amp_PulseCoherent[beam])
for beam in range(len(self.Vel_PulseCoherent)):
result += Ensemble.float_to_bytes(self.Vel_PulseCoherent[beam])
for beam in range(len(self.Noise_PulseCoherent)):
result += Ensemble.float_to_bytes(self.Vel_PulseCoherent[beam])
for beam in range(len(self.Corr_PulseCoherent)):
result += Ensemble.float_to_bytes(self.Vel_PulseCoherent[beam])
return result
def encode_csv(self, dt, ss_code, ss_config, blank=0, bin_size=0):
"""
Encode into CSV format.
:param dt: Datetime object.
:param ss_code: Subsystem code.
:param ss_config: Subsystem Configuration
:param blank: Blank or first bin position in meters.
:param bin_size: Bin size in meters.
:return: List of CSV lines.
"""
str_result = []
# Create the CSV strings
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_HEADING, ss_code, ss_config, 0, 0, blank, bin_size, self.Heading))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_PITCH, ss_code, ss_config, 0, 0, blank, bin_size, self.Pitch))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_ROLL, ss_code, ss_config, 0, 0, blank, bin_size, self.Roll))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_PRESSURE, ss_code, ss_config, 0, 0, blank, bin_size, self.Pressure))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_XDCR_DEPTH, ss_code, ss_config, 0, 0, blank, bin_size, self.TransducerDepth))
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_STATUS, ss_code, ss_config, 0, 0, blank, bin_size, self.Status))
for beams in range(len(self.Range)):
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_RANGE, ss_code, ss_config, 0, beams, blank, bin_size, self.Range[beams]))
for beams in range(len(self.BeamVelocity)):
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_BEAM_VEL, ss_code, ss_config, 0, beams, blank, bin_size, self.BeamVelocity[beams]))
for beams in range(len(self.BeamGood)):
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_BEAM_GOOD, ss_code, ss_config, 0, beams, blank, bin_size, self.BeamGood[beams]))
for beams in range(len(self.InstrumentVelocity)):
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_INSTR_VEL, ss_code, ss_config, 0, beams, blank, bin_size, self.InstrumentVelocity[beams]))
for beams in range(len(self.InstrumentGood)):
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_INSTR_GOOD, ss_code, ss_config, 0, beams, blank, bin_size, self.InstrumentGood[beams]))
for beams in range(len(self.EarthVelocity)):
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_EARTH_VEL, ss_code, ss_config, 0, beams, blank, bin_size, self.EarthVelocity[beams]))
for beams in range(len(self.EarthGood)):
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BT_EARTH_GOOD, ss_code, ss_config, 0, beams, blank, bin_size, self.EarthGood[beams]))
return str_result
def encode_df(self, dt, ss_code, ss_config):
"""
Encode into Dataframe array format.
:param dt: Datetime object.
:param ss_code: Subsystem code.
:param ss_config: Subsystem Configuration
:param vel_bad_val: Change bad velocity value for Earth Velocity
:param mag_bad_val: Change bad velocity value for Magnitude value
:param include_bad_vel: Include the velocity if it is bad, or remove it
:param include_bad_mag: Include the magnitude if it is bad, or remove it
:return: List of CSV lines.
"""
df_result = []
for beams in range(len(self.Range)):
df_result.append([dt, Ensemble.CSV_BT_RANGE, ss_code, ss_config, 0, beams, self.Range[beams]])
df_result.append([dt, Ensemble.CSV_BT_AVG_RANGE, ss_code, ss_config, 0, 0, self.avg_range()])
# Create the column names
df_earth_columns = ["dt", "type", "ss_code", "ss_config", "bin_num", "beam", "val"]
return DataFrame(df_result, columns=df_earth_columns)
return df_result
def pd0_range_cm(self, pd0_beam_num: int):
"""
Convert the range from meters to centimeters.
Remap the Beam numbers to match PD0 beams.
RTB and PD0 do not share the same Beam Order
RTB BEAM 0,1,2,3 = PD0 BEAM 3,2,0,1
:param pd0_beam_num: PD0 Beam number.
:type pd0_beam_num: Integer
:return: Ranges for the given PD0 beam, converted to centimeters for the beam. The beam will be based on reordering for PD0
:rtype: Float - Range value.
"""
if pd0_beam_num == 0 and pd0_beam_num <= self.NumBeams:
if Ensemble.is_bad_velocity(self.Range[2]):
return -32768
return round(self.Range[2] * 100.0) # PD0 0 - RTB 2
if pd0_beam_num == 1 and pd0_beam_num <= self.NumBeams:
if Ensemble.is_bad_velocity(self.Range[3]):
return -32768
return round(self.Range[3] * 100.0) # PD0 1 - RTB 3
if pd0_beam_num == 2 and pd0_beam_num <= self.NumBeams:
if Ensemble.is_bad_velocity(self.Range[1]):
return -32768
return round(self.Range[1] * 100.0) # PD0 2 - RTB 1
if pd0_beam_num == 3 and pd0_beam_num <= self.NumBeams:
if Ensemble.is_bad_velocity(self.Range[0]):
return -32768
return round(self.Range[0] * 100.0) # PD0 3 - RTB 0
return None
def pd0_beam_vel_mm_per_sec(self, pd0_beam_num: int):
"""
Convert the Beam Velocity from m/s to mm/s.
Also remap the Beam numbers to match PD0 beams.
RTB and PD0 do not share the same Beam Order
RTB BEAM 0,1,2,3 = PD0 BEAM 3,2,0,1
:param pd0_beam_num: PD0 Beam number.
:type pd0_beam_num: Integer
:return: Velocity for the given PD0 beam, converted to mm/s for the beam. The beam will be based on reordering for PD0
:rtype: Velocity data for given beam.
"""
if pd0_beam_num == 0 and pd0_beam_num <= self.NumBeams:
if Ensemble.is_bad_velocity(self.BeamVelocity[2]):
return -32768
return round(self.BeamVelocity[2] * 1000.0 * -1.0) # Convert to mm/s PD0 0 - RTB 2
if pd0_beam_num == 1 and pd0_beam_num <= self.NumBeams:
if Ensemble.is_bad_velocity(self.BeamVelocity[3]):
return -32768
return round(self.BeamVelocity[3] * 1000.0 * -1.0) # Convert to mm/s PD0 1 - RTB 3
if pd0_beam_num == 2 and pd0_beam_num <= self.NumBeams:
if Ensemble.is_bad_velocity(self.BeamVelocity[1]):
return -32768
return round(self.BeamVelocity[1] * 1000.0 * -1.0) # Convert to mm/s PD0 2 - RTB 1
if pd0_beam_num == 3 and pd0_beam_num <= self.NumBeams:
if Ensemble.is_bad_velocity(self.BeamVelocity[0]):
return -32768
return round(self.BeamVelocity[0] * 1000.0 * -1.0) # Convert to mm/s PD0 3 - RTB 0
return None
def pd0_corr_counts(self, pd0_beam_num: int):
"""
The value has to be converted from percentage to 0 - 255
Scale 0 % - 100 % to 0 - 255
255 = 100 %
0 = 0 %
50 % = 0.50 * 255 = 127.5 = 255 / 2
Also remap the Beam numbers to match PD0 beams.
RTB and PD0 do not share the same Beam Order
RTB BEAM 0,1,2,3 = PD0 BEAM 3,2,0,1
:param pd0_beam_num: PD0 Beam number.
:type pd0_beam_num: Integer
:return: Correlation data as PD0 counts format. Beams are reordered
:rtype: Float - Correlation value.
"""
if pd0_beam_num == 0 and pd0_beam_num <= self.NumBeams:
return round(self.Correlation[2] * 255.0) # Convert to counts - PD0 0 - RTB 2
if pd0_beam_num == 1 and pd0_beam_num <= self.NumBeams:
return round(self.Correlation[3] * 255.0) # Convert to counts - PD0 1 - RTB 3
if pd0_beam_num == 2 and pd0_beam_num <= self.NumBeams:
return round(self.Correlation[1] * 255.0) # Convert to counts - PD0 2 - RTB 1
if pd0_beam_num == 3 and pd0_beam_num <= self.NumBeams:
return round(self.Correlation[0] * 255.0) # Convert to counts - PD0 3 - RTB 0
return None
def pd0_amp_counts(self, pd0_beam_num: int):
"""
Convert the Amplitude/Echo Intensity to Counts.
0.5dB per count.
Also remap the Beam numbers to match PD0 beams.
RTB and PD0 do not share the same Beam Order
RTB BEAM 0,1,2,3 = PD0 BEAM 3,2,0,1
:param pd0_beam_num: PD0 Beam number.
:type pd0_beam_num: Integer
:return: Amplitude data as PD0 counts format. Beams are reordered
:rtype: Float - Amplitude value.
"""
if pd0_beam_num == 0 and pd0_beam_num <= self.NumBeams:
return round(self.Amplitude[2] * 2.0) # Convert to counts - PD0 0 - RTB 2
if pd0_beam_num == 1 and pd0_beam_num <= self.NumBeams:
return round(self.Amplitude[3] * 2.0) # Convert to counts - PD0 1 - RTB 3
if pd0_beam_num == 2 and pd0_beam_num <= self.NumBeams:
return round(self.Amplitude[1] * 2.0) # Convert to counts - PD0 2 - RTB 1
if pd0_beam_num == 3 and pd0_beam_num <= self.NumBeams:
return round(self.Amplitude[0] * 2.0) # Convert to counts - PD0 3 - RTB 0
return None
def pd0_good_beam_percent(self, pd0_beam_num: int):
"""
Convert the Good Beams to percentage.
Also remap the Beam numbers to match PD0 beams.
RTB and PD0 do not share the same Beam Order
RTB BEAM 0,1,2,3 = PD0 BEAM 3,2,0,1
:param pd0_beam_num: PD0 Beam number.
:type pd0_beam_num: Integer
:return: Good Beams data as PD0 as a percentage. Beams are reordered
:rtype: Float - Good Beams value.
"""
if pd0_beam_num == 0 and pd0_beam_num <= self.NumBeams:
return round((self.BeamGood[2] * 100.0) / self.ActualPingCount) # Convert to percentage - PD0 0 - RTB 2
if pd0_beam_num == 1 and pd0_beam_num <= self.NumBeams:
return round((self.BeamGood[3] * 100.0) / self.ActualPingCount) # Convert to counts - PD0 1 - RTB 3
if pd0_beam_num == 2 and pd0_beam_num <= self.NumBeams:
return round((self.BeamGood[1] * 100.0) / self.ActualPingCount) # Convert to counts - PD0 2 - RTB 1
if pd0_beam_num == 3 and pd0_beam_num <= self.NumBeams:
return round((self.BeamGood[0] * 100.0) / self.ActualPingCount) # Convert to counts - PD0 3 - RTB 0
return None
```
#### File: rti_python/Ensemble/EnsembleReader.py
```python
from log import logger
import socket
class EnsembleReader:
"""
Read in data from the given TCP. Then decode the data
and pass it on to another TCP port
"""
def __init__(self, port):
# Open a TCP port to read in the ensemble data
self.port = port
self.is_alive = True
self.socket = None
self.reconnect(port)
self.read()
def reconnect(self, tcp_port):
"""
Connect to the server.
"""
logger.debug("Ensemble Reader: ", tcp_port)
self.is_alive = True
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(('localhost', int(tcp_port)))
except ConnectionRefusedError as err:
print("EnsembleReader: ", err)
#sys.exit(2)
except socket.error as err:
print('EnsembleReader: Error Opening socket ', err)
#sys.exit(2)
def read(self):
"""
Read data from the serial port
"""
try:
while self.is_alive:
# Receive a response
response = self.socket.recv()
#print('"%s"' % response)
# Decode the ensemble data
# Reconnect
if len(response) == 0:
print("Disconnected")
# Close the socket
self.close()
# Reconnect to the server
self.reconnect(self.port)
# Try to read again
self.read()
except KeyboardInterrupt:
# Ctrl-C will stop the application
pass
except:
pass
# Close the socket
self.close()
def close(self):
"""
Close the socket.
"""
self.is_alive = False
self.socket.close()
```
#### File: rti_python/Ensemble/GoodBeam.py
```python
from rti_python.Ensemble.Ensemble import Ensemble
import logging
class GoodBeam:
"""
Good Beam DataSet.
Indicate if the beam data is good.
[Bin x Beam] data.
"""
def __init__(self, num_elements, element_multiplier):
self.ds_type = 20 # Int
self.num_elements = num_elements
self.element_multiplier = element_multiplier
self.image = 0
self.name_len = 8
self.Name = "E000006\0"
self.GoodBeam = []
# Create enough entries for all the (bins x beams)
# Initialize with bad values
for bins in range(num_elements):
bins = []
for beams in range(element_multiplier):
bins.append([0])
self.GoodBeam.append(bins)
def decode(self, data):
"""
Take the data bytearray. Decode the data to populate
the Good Beams.
:param data: Bytearray for the dataset.
"""
packet_pointer = Ensemble.GetBaseDataSize(self.name_len)
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
self.GoodBeam[bin_num][beam] = Ensemble.GetInt32(packet_pointer, Ensemble().BytesInInt32, data)
packet_pointer += Ensemble().BytesInInt32
logging.debug(self.GoodBeam)
def encode(self):
"""
Encode the data into RTB format.
:return:
"""
result = []
# Generate header
result += Ensemble.generate_header(self.ds_type,
self.num_elements,
self.element_multiplier,
self.image,
self.name_len,
self.Name)
# Add the data
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
val = self.GoodBeam[bin_num][beam]
result += Ensemble.int32_to_bytes(val)
return result
def encode_csv(self, dt, ss_code, ss_config, blank, bin_size):
"""
Encode into CSV format.
:param dt: Datetime object.
:param ss_code: Subsystem code.
:param ss_config: Subsystem Configuration
:param blank: Blank or First bin position in meters.
:param bin_size: Bin size in meters.
:return: List of CSV lines.
"""
str_result = []
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
# Get the value
val = self.GoodBeam[bin_num][beam]
# Create the CSV string
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_GOOD_BEAM, ss_code, ss_config, bin_num, beam, blank, bin_size, val))
return str_result
def pd0_percent(self, pings_per_ens: int, pd0_beam_num: int):
"""
Convert the Good Beams to Percent.
Also remap the Beam numbers to match PD0 beams.
RTB and PD0 do not share the same Beam Order
RTB BEAM 0,1,2,3 = PD0 BEAM 3,2,0,1
:param pd0_beam_num: PD0 Beam number.
:type pd0_beam_num: Integer
:param pings_per_ens: Number of pings in the ensemble.
:type pings_per_ens: Integer
:return: A list of all the velocities for the given PD0 beam, converted to mm/s for the beam. The beam will be based on reordering for PD0
:rtype: List or None if beam number is not correct.
"""
# Vertical Beam ONLY
if self.element_multiplier == 1:
beam0 = [v[0] for v in self.GoodBeam] # Beam 0
return [round((v * 100.0) / pings_per_ens) for v in beam0] # Convert to percent
if pd0_beam_num == 0 and pd0_beam_num <= self.element_multiplier:
beam2 = [v[2] for v in self.GoodBeam] # PD0 0 - RTB 2
return [round((v * 100.0) / pings_per_ens) for v in beam2] # Convert to percent
if pd0_beam_num == 1 and pd0_beam_num <= self.element_multiplier:
beam3 = [v[3] for v in self.GoodBeam] # PD0 1 - RTB 3
return [round((v * 100.0) / pings_per_ens) for v in beam3] # Convert to percent
if pd0_beam_num == 2 and pd0_beam_num <= self.element_multiplier:
beam1 = [v[1] for v in self.GoodBeam] # PD0 2 - RTB 1
return [round((v * 100.0) / pings_per_ens) for v in beam1] # Convert to percent
if pd0_beam_num == 3 and pd0_beam_num <= self.element_multiplier:
beam0 = [v[0] for v in self.GoodBeam] # PD0 3 - RTB 0
return [round((v * 100.0) / pings_per_ens) for v in beam0] # Convert to percent
return None
```
#### File: rti_python/Ensemble/InstrumentVelocity.py
```python
from rti_python.Ensemble.Ensemble import Ensemble
import logging
class InstrumentVelocity:
"""
Instrument Velocity DataSet.
[Bin x Beam] data.
"""
def __init__(self, num_elements, element_multiplier):
self.ds_type = 10
self.num_elements = num_elements
self.element_multiplier = element_multiplier
self.image = 0
self.name_len = 8
self.Name = "E000002\0"
self.Velocities = []
# Create enough entries for all the (bins x beams)
# Initialize with bad values
for bins in range(num_elements):
bins = []
for beams in range(element_multiplier):
bins.append([Ensemble().BadVelocity])
self.Velocities.append(bins)
def decode(self, data):
"""
Take the data bytearray. Decode the data to populate
the velocities.
:param data: Bytearray for the dataset.
"""
packetpointer = Ensemble.GetBaseDataSize(self.name_len)
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
self.Velocities[bin_num][beam] = Ensemble.GetFloat(packetpointer, Ensemble().BytesInFloat, data)
packetpointer += Ensemble().BytesInFloat
logging.debug(self.Velocities)
def encode(self):
"""
Encode the data into RTB format.
:return:
"""
result = []
# Generate header
result += Ensemble.generate_header(self.ds_type,
self.num_elements,
self.element_multiplier,
self.image,
self.name_len,
self.Name)
# Add the data
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
val = self.Velocities[bin_num][beam]
result += Ensemble.float_to_bytes(val)
return result
def encode_csv(self, dt, ss_code, ss_config, blank, bin_size):
"""
Encode into CSV format.
:param dt: Datetime object.
:param ss_code: Subsystem code.
:param ss_config: Subsystem Configuration
:param blank: Blank or First bin position in meters.
:param bin_size: Bin size in meters.
:return: List of CSV lines.
"""
str_result = []
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
# Get the value
val = self.Velocities[bin_num][beam]
# Create the CSV string
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_INSTR_VEL, ss_code, ss_config, bin_num, beam, blank, bin_size, val))
return str_result
```
#### File: viking_ADCP_processing/test/configfile_test.py
```python
import pytest
from magtogoek.configfile import (ConfigFileError, load_configfile)
def test_load():
load_configfile("data/config_test_files/valid_config.ini")
@pytest.mark.parametrize(
"filename, error",
[
("data/config_test_files/choice_error.ini", "choice"),
("data/config_test_files/range_error.ini", "range"),
("data/config_test_files/nargs_error.ini", "nargs"),
("data/config_test_files/dtype_error.ini", "dtype"),
("data/config_test_files/sensor_type_error.ini", "sensor_type"),
],
)
def test_ConfigFileError(filename, error):
with pytest.raises(ConfigFileError) as exc_info:
load_configfile(filename)
assert str(exc_info.value.error) == error
```
|
{
"source": "jeromekelleher/demes",
"score": 3
}
|
#### File: demes/tests/test_incremental_build.py
```python
import unittest
import demes
# test_examples.py tests importing and exporting YAML files
# these tests are for incremental builds of demographic models, where we
# add demes to an "empty" demography using the demographic event functions
class TestExamples(unittest.TestCase):
maxDiff = None
def test_one_deme_multiple_epochs(self):
# initial size set to 50, add a later deme that has size of 100
# only need to specify the second deme, the first one is implicit with
# size equal to 50
g = demes.DemeGraph(
description="one deme test", time_units="generations", generation_time=1
)
g.deme(
id="pop",
initial_size=50,
epochs=[demes.Epoch(start_time=30, end_time=0, initial_size=100)],
)
self.assertEqual(len(g["pop"].epochs), 2)
self.assertEqual(g["pop"].epochs[0].start_time, float("inf"))
self.assertEqual(g["pop"].epochs[0].end_time, 30)
self.assertEqual(g["pop"].epochs[1].start_time, 30)
self.assertEqual(g["pop"].epochs[1].end_time, 0)
self.assertEqual(g["pop"].start_time, float("inf"))
self.assertEqual(g["pop"].end_time, 0)
# same as above, but start time is not inf
g = demes.DemeGraph(
description="one deme test", time_units="generations", generation_time=1
)
g.deme(
id="pop",
initial_size=50,
start_time=100,
epochs=[demes.Epoch(start_time=30, end_time=0, initial_size=100)],
)
self.assertEqual(len(g["pop"].epochs), 2)
self.assertEqual(g["pop"].epochs[0].start_time, 100)
def test_simple_split(self):
g = demes.DemeGraph(
description="split model", time_units="generations", generation_time=1
)
g.deme(id="ancestral", end_time=50, initial_size=100)
g.deme(id="pop1", start_time=50, initial_size=200)
g.deme(id="pop2", start_time=50, initial_size=300)
g.split(parent="ancestral", children=["pop1", "pop2"], time=50)
self.assertEqual(len(g.splits), 1)
self.assertTrue(g.splits[0].parent == "ancestral")
self.assertTrue("pop1" in g.splits[0].children)
self.assertTrue("pop2" in g.splits[0].children)
self.assertTrue(g.splits[0].time == 50)
self.assertTrue("ancestral" in g["pop1"].ancestors)
self.assertTrue("ancestral" in g["pop2"].ancestors)
self.assertTrue(g["ancestral"].ancestors is None)
def test_simple_branch(self):
g = demes.DemeGraph(
description="branch model", time_units="generations", generation_time=1
)
g.deme(id="ancestral", initial_size=100)
g.deme(id="pop1", start_time=50, initial_size=200)
g.branch(parent="ancestral", child="pop1", time=50)
self.assertEqual(len(g.branches), 1)
self.assertTrue(g.branches[0].parent == "ancestral")
self.assertTrue(g.branches[0].child == "pop1")
self.assertTrue(g.branches[0].time == 50)
self.assertTrue("ancestral" in g["pop1"].ancestors)
def test_simple_merge(self):
g = demes.DemeGraph(
description="branch model", time_units="generations", generation_time=1
)
g.deme(id="ancestral1", initial_size=100, end_time=10)
g.deme(id="ancestral2", initial_size=100, end_time=10)
g.deme(id="child", initial_size=100, start_time=10)
g.merge(
parents=["ancestral1", "ancestral2"],
proportions=[0.5, 0.5],
child="child",
time=10,
)
self.assertEqual(len(g.mergers), 1)
self.assertEqual(g.mergers[0].time, 10)
self.assertEqual(g.mergers[0].child, "child")
for anc in ["ancestral1", "ancestral2"]:
self.assertTrue(anc in g.mergers[0].parents)
self.assertEqual(g["ancestral1"].end_time, 10)
self.assertEqual(g["ancestral2"].end_time, 10)
self.assertEqual(g["child"].start_time, 10)
def test_merge_that_truncates(self):
# by calling merge and setting the time, we cut the parental populations
# at the merge time
g = demes.DemeGraph(
description="branch model", time_units="generations", generation_time=1
)
g.deme(id="ancestral1", initial_size=100) # don't set their end times
g.deme(id="ancestral2", initial_size=100)
g.deme(id="child", initial_size=100, start_time=10)
g.merge(
parents=["ancestral1", "ancestral2"],
proportions=[0.5, 0.5],
child="child",
time=10,
)
self.assertEqual(len(g.mergers), 1)
self.assertEqual(g.mergers[0].time, 10)
self.assertEqual(g.mergers[0].child, "child")
for anc in ["ancestral1", "ancestral2"]:
self.assertTrue(anc in g.mergers[0].parents)
self.assertEqual(g["ancestral1"].end_time, 10)
self.assertEqual(g["ancestral2"].end_time, 10)
self.assertEqual(g["child"].start_time, 10)
def test_admixture(self):
g = demes.DemeGraph(
description="branch model", time_units="generations", generation_time=1
)
g.deme(id="ancestral1", initial_size=100)
g.deme(id="ancestral2", initial_size=100)
g.deme(id="child", initial_size=100, start_time=10)
g.admix(
parents=["ancestral1", "ancestral2"],
proportions=[0.5, 0.5],
child="child",
time=10,
)
self.assertEqual(g["ancestral1"].end_time, 0)
self.assertEqual(g["ancestral2"].end_time, 0)
self.assertEqual(g["child"].end_time, 0)
self.assertEqual(g["child"].start_time, 10)
self.assertEqual(len(g.admixtures), 1)
self.assertEqual(g.admixtures[0].time, 10)
for anc in ["ancestral1", "ancestral2"]:
self.assertTrue(anc in g.admixtures[0].parents)
self.assertTrue(anc in g["child"].ancestors)
```
|
{
"source": "jeromekelleher/lshmm",
"score": 3
}
|
#### File: lshmm/forward_backward/fb_haploid_variants_samples_tree.py
```python
import numpy as np
import tskit
def mirror_coordinates(ts):
"""Return a copy of the specified tree sequence in which all coordinates x are transformed into L - x."""
L = ts.sequence_length
tables = ts.dump_tables()
left = tables.edges.left
right = tables.edges.right
tables.edges.left = L - right
tables.edges.right = L - left
tables.sites.position = L - tables.sites.position
# TODO migrations.
tables.sort()
return tables.tree_sequence()
class ValueTransition:
"""Simple struct holding value transition values."""
def __init__(self, tree_node=-1, value=-1, value_index=-1):
self.tree_node = tree_node
self.value = value
self.value_index = value_index
def copy(self):
"""Copy the value transition."""
return ValueTransition(self.tree_node, self.value, self.value_index)
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return repr(self)
class LsHmmAlgorithm:
"""Abstract superclass of Li and Stephens HMM algorithm."""
def __init__(self, ts, rho, mu, precision=10):
self.ts = ts
self.mu = mu
self.rho = rho
self.precision = precision
# The array of ValueTransitions.
self.T = []
# indexes in to the T array for each node.
self.T_index = np.zeros(ts.num_nodes, dtype=int) - 1
# The number of nodes underneath each element in the T array.
self.N = np.zeros(ts.num_nodes, dtype=int)
# Efficiently compute the allelic state at a site
self.allelic_state = np.zeros(ts.num_nodes, dtype=int) - 1
# Diffs so we can can update T and T_index between trees.
self.edge_diffs = self.ts.edge_diffs()
self.parent = np.zeros(self.ts.num_nodes, dtype=int) - 1
self.tree = tskit.Tree(self.ts)
self.output = None
def check_integrity(self):
"""Check the integrity of the algorithm."""
M = [st.tree_node for st in self.T if st.tree_node != -1]
assert np.all(self.T_index[M] >= 0)
index = np.ones_like(self.T_index, dtype=bool)
index[M] = 0
assert np.all(self.T_index[index] == -1)
for j, st in enumerate(self.T):
if st.tree_node != -1:
assert j == self.T_index[st.tree_node]
def compress(self):
"""Compress the values in the tree."""
tree = self.tree
T = self.T
T_index = self.T_index
values = np.unique(list(st.value if st.tree_node != -1 else 1e200 for st in T))
for st in T:
if st.tree_node != -1:
st.value_index = np.searchsorted(values, st.value)
child = np.zeros(len(values), dtype=int)
num_values = len(values)
value_count = np.zeros(num_values, dtype=int)
def compute(u, parent_state):
value_count[:] = 0
for v in tree.children(u):
child[:] = optimal_set[v]
# If the set for a given child is empty, then we know it inherits
# directly from the parent state and must be a singleton set.
if np.sum(child) == 0:
child[parent_state] = 1
for j in range(num_values):
value_count[j] += child[j]
max_value_count = np.max(value_count)
optimal_set[u, :] = 0
optimal_set[u, value_count == max_value_count] = 1
optimal_set = np.zeros((tree.tree_sequence.num_nodes, len(values)), dtype=int)
t_node_time = [
-1 if st.tree_node == -1 else tree.time(st.tree_node) for st in T
]
order = np.argsort(t_node_time)
for j in order:
st = T[j]
u = st.tree_node
if u != -1:
# Compute the value at this node
state = st.value_index
if tree.is_internal(u):
compute(u, state)
else:
# A[u, state] = 1
optimal_set[u, state] = 1
# Find parent state
v = tree.parent(u)
if v != -1:
while T_index[v] == -1:
v = tree.parent(v)
parent_state = T[T_index[v]].value_index
v = tree.parent(u)
while T_index[v] == -1:
compute(v, parent_state)
v = tree.parent(v)
T_old = [st.copy() for st in T]
T.clear()
T_parent = []
old_state = T_old[T_index[tree.root]].value_index
new_state = np.argmax(optimal_set[tree.root])
T.append(ValueTransition(tree_node=tree.root, value=values[new_state]))
T_parent.append(-1)
stack = [(tree.root, old_state, new_state, 0)]
while len(stack) > 0:
u, old_state, new_state, t_parent = stack.pop()
for v in tree.children(u):
old_child_state = old_state
if T_index[v] != -1:
old_child_state = T_old[T_index[v]].value_index
if np.sum(optimal_set[v]) > 0:
new_child_state = new_state
child_t_parent = t_parent
if optimal_set[v, new_state] == 0:
new_child_state = np.argmax(optimal_set[v])
child_t_parent = len(T)
T_parent.append(t_parent)
T.append(
ValueTransition(tree_node=v, value=values[new_child_state])
)
stack.append((v, old_child_state, new_child_state, child_t_parent))
else:
if old_child_state != new_state:
T_parent.append(t_parent)
T.append(
ValueTransition(tree_node=v, value=values[old_child_state])
)
for st in T_old:
if st.tree_node != -1:
T_index[st.tree_node] = -1
for j, st in enumerate(T):
T_index[st.tree_node] = j
self.N[j] = tree.num_samples(st.tree_node)
for j in range(len(T)):
if T_parent[j] != -1:
self.N[T_parent[j]] -= self.N[j]
def update_tree(self):
"""Update the internal data structures to move on to the next tree."""
parent = self.parent
T_index = self.T_index
T = self.T
_, edges_out, edges_in = next(self.edge_diffs)
for edge in edges_out:
u = edge.child
if T_index[u] == -1:
# Make sure the subtree we're detaching has an T_index-value at the root.
while T_index[u] == -1:
u = parent[u]
assert u != -1
T_index[edge.child] = len(T)
T.append(
ValueTransition(tree_node=edge.child, value=T[T_index[u]].value)
)
parent[edge.child] = -1
for edge in edges_in:
parent[edge.child] = edge.parent
u = edge.parent
if parent[edge.parent] == -1:
# Grafting onto a new root.
if T_index[edge.parent] == -1:
T_index[edge.parent] = len(T)
T.append(
ValueTransition(
tree_node=edge.parent, value=T[T_index[edge.child]].value
)
)
else:
# Grafting into an existing subtree.
while T_index[u] == -1:
u = parent[u]
assert u != -1
assert T_index[u] != -1 and T_index[edge.child] != -1
if T[T_index[u]].value == T[T_index[edge.child]].value:
st = T[T_index[edge.child]]
# Mark the lower ValueTransition as unused.
st.value = -1
st.tree_node = -1
T_index[edge.child] = -1
# We can have values left over still pointing to old roots. Remove
for root in self.tree.roots:
if T_index[root] != -1:
# Use a special marker here to designate the real roots.
T[T_index[root]].value_index = -2
for vt in T:
if vt.tree_node != -1:
if parent[vt.tree_node] == -1 and vt.value_index != -2:
T_index[vt.tree_node] = -1
vt.tree_node = -1
vt.value_index = -1
def update_probabilities(self, site, haplotype_state):
"""Update values of probabilities of path ending at a set of nodes."""
tree = self.tree
T_index = self.T_index
T = self.T
alleles = ["0", "1"]
allelic_state = self.allelic_state
# Set the allelic_state for this site.
allelic_state[tree.root] = alleles.index(site.ancestral_state)
for mutation in site.mutations:
u = mutation.node
allelic_state[u] = alleles.index(mutation.derived_state)
if T_index[u] == -1:
while T_index[u] == tskit.NULL:
u = tree.parent(u)
T_index[mutation.node] = len(T)
T.append(
ValueTransition(tree_node=mutation.node, value=T[T_index[u]].value)
)
for st in T:
u = st.tree_node
if u != -1:
# Get the allelic_state at u. TODO we can cache these states to
# avoid some upward traversals.
v = u
while allelic_state[v] == -1:
v = tree.parent(v)
assert v != -1
match = (
haplotype_state == tskit.MISSING_DATA
or haplotype_state == allelic_state[v]
)
st.value = self.compute_next_probability(site.id, st.value, match)
# Unset the states
allelic_state[tree.root] = -1
for mutation in site.mutations:
allelic_state[mutation.node] = -1
def process_site(self, site, haplotype_state, forwards=True):
"""Process the next site."""
if forwards:
# Forwards algorithm
self.update_probabilities(site, haplotype_state)
self.compress()
s = self.compute_normalisation_factor()
for st in self.T:
if st.tree_node != tskit.NULL:
st.value /= s
st.value = round(st.value, self.precision)
self.output.store_site(
site.id, s, [(st.tree_node, st.value) for st in self.T]
)
else:
# Backwards algorithm
self.output.store_site(
site.id,
self.output.normalisation_factor[site.id],
[(st.tree_node, st.value) for st in self.T],
)
self.update_probabilities(site, haplotype_state)
self.compress()
b_last_sum = self.compute_normalisation_factor()
s = self.output.normalisation_factor[site.id]
for st in self.T:
if st.tree_node != tskit.NULL:
st.value = (
self.rho[site.id] / self.ts.num_samples
) * b_last_sum + (1 - self.rho[site.id]) * st.value
st.value /= s
st.value = round(st.value, self.precision)
def run_forward(self, h):
"""Run the forwards algorithm."""
n = self.ts.num_samples
self.tree.clear()
for u in self.ts.samples():
self.T_index[u] = len(self.T)
self.T.append(ValueTransition(tree_node=u, value=1 / n))
while self.tree.next():
self.update_tree()
for site in self.tree.sites():
self.process_site(site, h[site.id])
return self.output
def run_backward(self, h):
"""Run the backwards algorithm."""
self.tree.clear()
for u in self.ts.samples():
self.T_index[u] = len(self.T)
self.T.append(ValueTransition(tree_node=u, value=1))
while self.tree.next():
self.update_tree()
for site in self.tree.sites():
self.process_site(site, h[site.id], forwards=False)
return self.output
def compute_normalisation_factor(self):
"""Compute normalisation term to avoid underflow.
Not implemented here, as this must be defined at the forwards/backwards level.
"""
raise NotImplementedError()
def compute_next_probability(self, site_id, p_last, is_match):
"""Compute next probability.
Not implemented here, as this must be defined at the forwards/backwards level.
"""
raise NotImplementedError()
class CompressedMatrix:
"""
Class representing a num_samples x num_sites matrix compressed by a tree sequence.
Each site is represented by a set of (node, value) pairs,
which act as "mutations", i.e., any sample that descends
from a particular node will inherit that value (unless any other
values are on the path).
"""
def __init__(self, ts, normalisation_factor=None):
self.ts = ts
self.num_sites = ts.num_sites
self.num_samples = ts.num_samples
self.value_transitions = [None for _ in range(self.num_sites)]
if normalisation_factor is None:
self.normalisation_factor = np.zeros(self.num_sites)
else:
self.normalisation_factor = normalisation_factor
assert len(self.normalisation_factor) == self.num_sites
def store_site(self, site, normalisation_factor, value_transitions):
"""Store the site information.
Store the normalisation factor and the collection of value transitions
at the site
"""
self.normalisation_factor[site] = normalisation_factor
self.value_transitions[site] = value_transitions
# Expose the same API as the low-level classes
@property
def num_transitions(self):
"""Determine the number of unique value transitions at each site."""
a = [len(self.value_transitions[j]) for j in range(self.num_sites)]
return np.array(a, dtype=np.int32)
def get_site(self, site):
"""Get the value transitions at the site."""
return self.value_transitions[site]
def decode(self):
"""Decode the tree encoding of the values into an explicit matrix."""
A = np.zeros((self.num_sites, self.num_samples))
for tree in self.ts.trees():
for site in tree.sites():
f = dict(self.value_transitions[site.id])
for j, u in enumerate(self.ts.samples()):
while u not in f:
u = tree.parent(u)
A[site.id, j] = f[u]
return A
class ForwardMatrix(CompressedMatrix):
"""Class representing a compressed forward matrix."""
class BackwardMatrix(CompressedMatrix):
"""Class representing a compressed backward matrix."""
class ForwardAlgorithm(LsHmmAlgorithm):
"""Runs the Li and Stephens forward algorithm."""
def __init__(self, ts, rho, mu, precision=10):
super().__init__(ts, rho, mu, precision)
self.output = ForwardMatrix(ts)
def compute_normalisation_factor(self):
"""Compute normalisation term to avoid underflow."""
s = 0
for j, st in enumerate(self.T):
assert st.tree_node != tskit.NULL
assert self.N[j] > 0
s += self.N[j] * st.value
return s
def compute_next_probability(self, site_id, p_last, is_match):
"""Determine the scaled likelihoods at the next site."""
rho = self.rho[site_id]
mu = self.mu[site_id]
n = self.ts.num_samples
p_t = p_last * (1 - rho) + rho / n
p_e = mu
if is_match:
p_e = 1 - mu
return p_t * p_e
class BackwardAlgorithm(LsHmmAlgorithm):
"""Runs the Li and Stephens forward algorithm."""
def __init__(self, ts, rho, mu, normalisation_factor, precision=10):
super().__init__(ts, rho, mu, precision)
self.output = BackwardMatrix(ts, normalisation_factor)
def compute_normalisation_factor(self):
"""Compute normalisation term to avoid underflow."""
s = 0
for j, st in enumerate(self.T):
assert st.tree_node != tskit.NULL
assert self.N[j] > 0
s += self.N[j] * st.value
return s
def compute_next_probability(self, site_id, p_last, is_match):
"""Determine the scaled probabilities at the next (previous) site."""
mu = self.mu[site_id]
e = mu
if is_match:
e = 1 - mu
return p_last * e
def ls_forward_tree(h, ts, rho, mu, precision=30):
"""Forward matrix computation based on a tree sequence."""
fa = ForwardAlgorithm(ts, rho, mu, precision=precision)
return fa.run_forward(h)
def ls_backward_tree(h, ts_mirror, rho, mu, normalisation_factor, precision=30):
"""Backward matrix computation based on a tree sequence."""
ba = BackwardAlgorithm(
ts_mirror, rho, mu, normalisation_factor, precision=precision
)
return ba.run_backward(h)
```
#### File: lshmm/viterbi/vit_diploid_variants_samples.py
```python
import numba as nb
import numpy as np
# https://github.com/numba/numba/issues/1269
@nb.njit
def np_apply_along_axis(func1d, axis, arr):
"""Create numpy-like functions for max, sum etc."""
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@nb.njit
def np_amax(array, axis):
"""Numba implementation of numpy vectorised maximum."""
return np_apply_along_axis(np.amax, axis, array)
@nb.njit
def np_sum(array, axis):
"""Numba implementation of numpy vectorised sum."""
return np_apply_along_axis(np.sum, axis, array)
@nb.njit
def np_argmax(array, axis):
"""Numba implementation of numpy vectorised argmax."""
return np_apply_along_axis(np.argmax, axis, array)
# def forwards_viterbi_dip_naive(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((m, n, n))
# P = np.zeros((m, n, n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V[0,:,:] = 1/(n**2) * e[0,index]
# r_n = r/n
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# # Get the vector to maximise over
# v = np.zeros((n,n))
# for k1 in range(n):
# for k2 in range(n):
# v[k1, k2] = V[l-1,k1, k2]
# if ((k1 == j1) and (k2 == j2)):
# v[k1, k2] *= ((1 - r[l])**2 + 2*(1-r[l]) * r_n[l] + r_n[l]**2)
# elif ((k1 == j1) or (k2 == j2)):
# v[k1, k2] *= (r_n[l] * (1 - r[l]) + r_n[l]**2)
# else:
# v[k1, k2] *= r_n[l]**2
# V[l,j1,j2] = np.amax(v) * e[l, index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V[l,:,:])
# V[l,:,:] *= 1/c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_naive(n, m, G, s, e, r):
"""Naive implementation of LS diploid Viterbi algorithm."""
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V[0, j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
# Get the vector to maximise over
v = np.zeros((n, n))
for k1 in range(n):
for k2 in range(n):
v[k1, k2] = V[l - 1, k1, k2]
if (k1 == j1) and (k2 == j2):
v[k1, k2] *= (
(1 - r[l]) ** 2 + 2 * (1 - r[l]) * r_n[l] + r_n[l] ** 2
)
elif (k1 == j1) or (k2 == j2):
v[k1, k2] *= r_n[l] * (1 - r[l]) + r_n[l] ** 2
else:
v[k1, k2] *= r_n[l] ** 2
V[l, j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
# def forwards_viterbi_dip_naive_low_mem(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((n,n))
# P = np.zeros((m,n,n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V_previous = 1/(n**2) * e[0,index]
# r_n = r/n
# # Take a look at Haploid Viterbi implementation in Jeromes code and see if we can pinch some ideas.
# # Diploid Viterbi, with smaller memory footprint.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# # Get the vector to maximise over
# v = np.zeros((n,n))
# for k1 in range(n):
# for k2 in range(n):
# v[k1, k2] = V_previous[k1, k2]
# if ((k1 == j1) and (k2 == j2)):
# v[k1, k2] *= ((1 - r[l])**2 + 2*(1-r[l]) * r_n[l] + r_n[l]**2)
# elif ((k1 == j1) or (k2 == j2)):
# v[k1, k2] *= (r_n[l] * (1 - r[l]) + r_n[l]**2)
# else:
# v[k1, k2] *= r_n[l]**2
# V[j1,j2] = np.amax(v) * e[l,index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V)
# V_previous = np.copy(V) / c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_naive_low_mem(n, m, G, s, e, r):
"""Naive implementation of LS diploid Viterbi algorithm, with reduced memory."""
# Initialise
V = np.zeros((n, n))
V_previous = np.zeros((n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V_previous[j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Take a look at Haploid Viterbi implementation in Jeromes code and see if we can pinch some ideas.
# Diploid Viterbi, with smaller memory footprint.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
# Get the vector to maximise over
v = np.zeros((n, n))
for k1 in range(n):
for k2 in range(n):
v[k1, k2] = V_previous[k1, k2]
if (k1 == j1) and (k2 == j2):
v[k1, k2] *= (
(1 - r[l]) ** 2 + 2 * (1 - r[l]) * r_n[l] + r_n[l] ** 2
)
elif (k1 == j1) or (k2 == j2):
v[k1, k2] *= r_n[l] * (1 - r[l]) + r_n[l] ** 2
else:
v[k1, k2] *= r_n[l] ** 2
V[j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V)
V_previous = np.copy(V) / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
# def forwards_viterbi_dip_low_mem(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((n, n))
# P = np.zeros((m,n,n)).astype(np.int64)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V_previous = 1/(n**2) * e[0,index]
# c = np.ones(m)
# r_n = r/n
# # Diploid Viterbi, with smaller memory footprint, rescaling, and using the structure of the HMM.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# c[l] = np.amax(V_previous)
# argmax = np.argmax(V_previous)
# V_previous *= 1/c[l]
# V_rowcol_max = np_amax(V_previous, 0)
# arg_rowcol_max = np_argmax(V_previous, 0)
# no_switch = (1 - r[l])**2 + 2*(r_n[l]*(1 - r[l])) + r_n[l]**2
# single_switch = r_n[l]*(1 - r[l]) + r_n[l]**2
# double_switch = r_n[l]**2
# j1_j2 = 0
# for j1 in range(n):
# for j2 in range(n):
# V_single_switch = max(V_rowcol_max[j1], V_rowcol_max[j2])
# P_single_switch = np.argmax(np.array([V_rowcol_max[j1], V_rowcol_max[j2]]))
# if P_single_switch == 0:
# template_single_switch = j1*n + arg_rowcol_max[j1]
# else:
# template_single_switch = arg_rowcol_max[j2]*n + j2
# V[j1,j2] = V_previous[j1,j2] * no_switch # No switch in either
# P[l, j1, j2] = j1_j2
# # Single or double switch?
# single_switch_tmp = single_switch * V_single_switch
# if (single_switch_tmp > double_switch):
# # Then single switch is the alternative
# if (V[j1,j2] < single_switch * V_single_switch):
# V[j1,j2] = single_switch * V_single_switch
# P[l, j1, j2] = template_single_switch
# else:
# # Double switch is the alternative
# if V[j1, j2] < double_switch:
# V[j1, j2] = double_switch
# P[l, j1, j2] = argmax
# V[j1,j2] *= e[l, index[j1, j2]]
# j1_j2 += 1
# V_previous = np.copy(V)
# ll = np.sum(np.log10(c)) + np.log10(np.amax(V))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_low_mem(n, m, G, s, e, r):
"""LS diploid Viterbi algorithm, with reduced memory."""
# Initialise
V = np.zeros((n, n))
V_previous = np.zeros((n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V_previous[j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Diploid Viterbi, with smaller memory footprint, rescaling, and using the structure of the HMM.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
c[l] = np.amax(V_previous)
argmax = np.argmax(V_previous)
V_previous *= 1 / c[l]
V_rowcol_max = np_amax(V_previous, 0)
arg_rowcol_max = np_argmax(V_previous, 0)
no_switch = (1 - r[l]) ** 2 + 2 * (r_n[l] * (1 - r[l])) + r_n[l] ** 2
single_switch = r_n[l] * (1 - r[l]) + r_n[l] ** 2
double_switch = r_n[l] ** 2
j1_j2 = 0
for j1 in range(n):
for j2 in range(n):
V_single_switch = max(V_rowcol_max[j1], V_rowcol_max[j2])
P_single_switch = np.argmax(
np.array([V_rowcol_max[j1], V_rowcol_max[j2]])
)
if P_single_switch == 0:
template_single_switch = j1 * n + arg_rowcol_max[j1]
else:
template_single_switch = arg_rowcol_max[j2] * n + j2
V[j1, j2] = V_previous[j1, j2] * no_switch # No switch in either
P[l, j1, j2] = j1_j2
# Single or double switch?
single_switch_tmp = single_switch * V_single_switch
if single_switch_tmp > double_switch:
# Then single switch is the alternative
if V[j1, j2] < single_switch * V_single_switch:
V[j1, j2] = single_switch * V_single_switch
P[l, j1, j2] = template_single_switch
else:
# Double switch is the alternative
if V[j1, j2] < double_switch:
V[j1, j2] = double_switch
P[l, j1, j2] = argmax
V[j1, j2] *= e[l, index[j1, j2]]
j1_j2 += 1
V_previous = np.copy(V)
ll = np.sum(np.log10(c)) + np.log10(np.amax(V))
return V, P, ll
# def forwards_viterbi_dip_naive_vec(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((m,n,n))
# P = np.zeros((m,n,n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V[0,:,:] = 1/(n**2) * e[0,index]
# r_n = r/n
# # Jumped the gun - vectorising.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# v = (r_n[l]**2) * np.ones((n,n))
# v[j1,j2] += (1-r[l])**2
# v[j1, :] += (r_n[l] * (1 - r[l]))
# v[:, j2] += (r_n[l] * (1 - r[l]))
# v *= V[l-1,:,:]
# V[l,j1,j2] = np.amax(v) * e[l,index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V[l,:,:])
# V[l,:,:] *= 1/c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.jit
def forwards_viterbi_dip_naive_vec(n, m, G, s, e, r):
"""Vectorised LS diploid Viterbi algorithm using numpy."""
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V[0, j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Jumped the gun - vectorising.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
v = (r_n[l] ** 2) * np.ones((n, n))
v[j1, j2] += (1 - r[l]) ** 2
v[j1, :] += r_n[l] * (1 - r[l])
v[:, j2] += r_n[l] * (1 - r[l])
v *= V[l - 1, :, :]
V[l, j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
def forwards_viterbi_dip_naive_full_vec(n, m, G, s, e, r):
"""Fully vectorised naive LS diploid Viterbi algorithm using numpy."""
char_both = np.eye(n * n).ravel().reshape((n, n, n, n))
char_col = np.tile(np.sum(np.eye(n * n).reshape((n, n, n, n)), 3), (n, 1, 1, 1))
char_row = np.copy(char_col).T
rows, cols = np.ogrid[:n, :n]
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
index = (
4 * np.equal(G[0, :, :], s[0, 0]).astype(np.int64)
+ 2 * (G[0, :, :] == 1).astype(np.int64)
+ np.int64(s[0, 0] == 1)
)
V[0, :, :] = 1 / (n ** 2) * e[0, index]
r_n = r / n
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
v = (
(r_n[l] ** 2)
+ (1 - r[l]) ** 2 * char_both
+ (r_n[l] * (1 - r[l])) * (char_col + char_row)
)
v *= V[l - 1, :, :]
P[l, :, :] = np.argmax(v.reshape(n, n, -1), 2) # Have to flatten to use argmax
V[l, :, :] = v.reshape(n, n, -1)[rows, cols, P[l, :, :]] * e[l, index]
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
@nb.jit
def backwards_viterbi_dip(m, V_last, P):
"""Run a backwards pass to determine the most likely path."""
assert V_last.ndim == 2
assert V_last.shape[0] == V_last.shape[1]
# Initialisation
path = np.zeros(m).astype(np.int64)
path[m - 1] = np.argmax(V_last)
# Backtrace
for j in range(m - 2, -1, -1):
path[j] = P[j + 1, :, :].ravel()[path[j + 1]]
return path
def get_phased_path(n, path):
"""Obtain the phased path."""
return np.unravel_index(path, (n, n))
@nb.jit
def path_ll_dip(n, m, G, phased_path, s, e, r):
"""Evaluate log-likelihood path through a reference panel which results in sequence s."""
index = (
4 * np.int64(np.equal(G[0, phased_path[0][0], phased_path[1][0]], s[0, 0]))
+ 2 * np.int64(G[0, phased_path[0][0], phased_path[1][0]] == 1)
+ np.int64(s[0, 0] == 1)
)
log_prob_path = np.log10(1 / (n ** 2) * e[0, index])
old_phase = np.array([phased_path[0][0], phased_path[1][0]])
r_n = r / n
for l in range(1, m):
index = (
4 * np.int64(np.equal(G[l, phased_path[0][l], phased_path[1][l]], s[0, l]))
+ 2 * np.int64(G[l, phased_path[0][l], phased_path[1][l]] == 1)
+ np.int64(s[0, l] == 1)
)
current_phase = np.array([phased_path[0][l], phased_path[1][l]])
phase_diff = np.sum(~np.equal(current_phase, old_phase))
if phase_diff == 0:
log_prob_path += np.log10(
(1 - r[l]) ** 2 + 2 * (r_n[l] * (1 - r[l])) + r_n[l] ** 2
)
elif phase_diff == 1:
log_prob_path += np.log10(r_n[l] * (1 - r[l]) + r_n[l] ** 2)
else:
log_prob_path += np.log10(r_n[l] ** 2)
log_prob_path += np.log10(e[l, index])
old_phase = current_phase
return log_prob_path
```
|
{
"source": "jeromekelleher/msprime_sim",
"score": 2
}
|
#### File: msprime_sim/src/phenotypes.py
```python
from __future__ import division
import msprime
import numpy as np
import random
import tqdm
import scipy.stats as sp
import src.regressions as reg
import src.tools as tl
import src.tree_sequence as ts
import src.snpgetter as sg
import src.ldscores as ld
import src.printing as pr
import statsmodels.api as sm
import time, sys, traceback, argparse
import src.write as write
import pandas as pd
def case_control(y, prevalence, sample_prevalence, N):
# Determine the liability threshold.
p = prevalence
T = sp.norm.ppf(1-p)
# Index the cases.
cases = [i for (i, x) in enumerate(y) if x >= T]
mask = np.ones(len(y), dtype=bool)
mask[cases] = False
n_cases = len(cases)
if sample_prevalence is None:
n_controls = N - n_cases
else:
n_controls = int(((1-sample_prevalence) / sample_prevalence) * n_cases)
controls = np.arange(N)
if (N - n_cases) < n_controls:
n_controls = N - n_cases
log.log('Warning: this condition should not hold - '
'is sample prevalence close to population prevalence?')
controls = controls[mask]
else:
controls = controls[mask][random.sample(range(N - n_cases), n_controls)]
controls = sorted(controls)
return cases, controls, n_cases, n_controls, T
def get_phenotypes(args, N, n_pops, tree_sequence_list, m_total, log):
y = np.zeros(N)
if args.debug:
random.seed(1)
np.random.seed(1)
if args.C_bool:
C = np.random.binomial(1, args.C_bool_p, size=N)
C = (C - np.mean(C)) / np.std(C)
log.log('Boolean covariate.')
log.log('Average kurtosis for these phenotypes: {K}.'.format(K=np.sum(C**4)/N))
else:
C = np.random.normal(loc=0, scale=1, size=N)
C = (C - np.mean(C)) / np.std(C)
# print the average kurtosis across the individuals
log.log('Normally distributed covariate. Kurtosis should be around 3.')
log.log('Average kurtosis for these phenotypes: {K}.'.format(K=np.sum(C**4)/N))
if args.include_pop_strat is True and args.s2 > 0:
# Get the means for the populations.
alpha = np.random.normal(loc=0, scale=np.sqrt(args.s2), size=n_pops)
log.log(alpha)
# Add pop-strat additions to the phenotype vector, conditional on the population sampled from.
for pops in range(n_pops):
pop_leaves = tree_sequence_list[0].get_samples(population_id=pops)
len(map(int, [x/2 for x in pop_leaves[0::2]]))
y[map(int, [x/2 for x in pop_leaves[0::2]])] += alpha[pops]
for chr in range(args.n_chr):
m_chr = int(tree_sequence_list[chr].get_num_mutations())
log.log('Picking causal variants and determining effect sizes in chromosome {chr}'.format(chr=chr+1))
if (((1 + int(args.dominance) + int(args.gxe)) * args.p_causal) < 1) or args.same_causal_sites: # If the number of runs through the data is less than 1, run this speedup.
tree_sequence_pheno_A, m_causal_A = ts.set_mutations_in_tree(tree_sequence_list[chr], args.p_causal)
log.log('Picked {m} additive causal variants out of {mc}'.format(m=m_causal_A, mc=m_chr))
if args.same_causal_sites is False:
tree_sequence_pheno_D, m_causal_D = ts.set_mutations_in_tree(tree_sequence_list[chr], args.p_causal)
if args.h2_D > 0: log.log('Picked {m} dominance causal variants out of {mc}'.format(m=m_causal_D, mc=m_chr))
tree_sequence_pheno_AC, m_causal_AC = ts.set_mutations_in_tree(tree_sequence_list[chr], args.p_causal)
if args.h2_AC > 0: log.log('Picked {m} gxe causal variants out of {mc}'.format(m=m_causal_AC, mc=m_chr))
if args.h2_A > 0:
beta_A = np.random.normal(loc=0, scale=np.sqrt(args.h2_A / (m_total * args.p_causal)), size=m_causal_A)
# Get the phenotypes.
k = 0
log.log('Determining phenotype data: additive.')
for variant in tl.progress(args.progress_bars, tree_sequence_pheno_A.variants(), total=m_causal_A): # Note, progress here refers you to tqdm which just creates a pretty progress bar.
X_A = sg.nextSNP_add(variant)
# Effect size on the phenotype.
y += X_A * beta_A[k]
k += 1
if args.dominance and args.h2_D >0:
beta_D = np.random.normal(loc=0, scale=np.sqrt(args.h2_D / (m_total * args.p_causal)), size=m_causal_D)
k = 0
log.log('Determining phenotype data: dominance.')
for variant in tl.progress(args.progress_bars, tree_sequence_pheno_D.variants(), total=m_causal_D): # Note, progress here refers you to tqdm which just creates a pretty progress bar.
X_A, X_D = sg.nextSNP(variant)
# Effect size on the phenotype.
y += X_D * beta_D[k]
k += 1
if args.gxe and args.h2_AC > 0:
beta_AC = np.random.normal(loc=0, scale=np.sqrt(args.h2_AC / (m_total * args.p_causal)), size=m_causal_AC)
# If examining interaction with a covariate, pick the values of the covariate, and normalise.
k = 0
log.log('Determining phenotype data: gene x environment.')
for variant in tl.progress(args.progress_bars, tree_sequence_pheno_AC.variants(), total=m_causal_AC): # Note, progress here refers you to tqdm which just creates a pretty progress bar.
X_A = sg.nextSNP_add(variant)
# Effect size on the phenotype.
y += C * X_A * beta_AC[k]
k += 1
else:
beta_A, beta_D, beta_AC = np.zeros(m_causal_A), np.zeros(m_causal_A), np.zeros(m_causal_A)
if args.h2_A > 0:
beta_A = np.random.normal(loc=0, scale=np.sqrt(args.h2_A / (m_total * args.p_causal)), size=m_causal_A)
if args.dominance and args.h2_D > 0:
beta_D = np.random.normal(loc=0, scale=np.sqrt(args.h2_D / (m_total * args.p_causal)), size=m_causal_A)
if args.gxe and args.h2_AC > 0:
beta_AC = np.random.normal(loc=0, scale=np.sqrt(args.h2_AC / (m_total * args.p_causal)), size=m_causal_A)
k = 0
log.log('Determining phenotype data')
# Note that we use just one tree_sequence here, because the causal sites are the same in this portion of the code.
for variant in tl.progress(args.progress_bars, tree_sequence_pheno_A.variants(), total=m_causal_A): # Note, progress here refers you to tqdm which just creates a pretty progress bar.
X_A, X_D = sg.nextSNP(variant)
# Effect size on the phenotype.
y += X_A * beta_A[k] + X_D * beta_D[k] + C * X_A * beta_AC[k]
k += 1
else:
m_causal = int(m_chr * args.p_causal)
beta_A, beta_D, beta_AC = np.zeros(m_chr), np.zeros(m_chr), np.zeros(m_chr)
beta_A_causal_index = random.sample(range(m_chr), m_causal)
log.log('Picked {m} additive causal variants out of {mc}'.format(m=m_causal, mc=m_chr))
if args.h2_A > 0:
beta_A[beta_A_causal_index] = np.random.normal(loc=0, scale=np.sqrt(args.h2_A / (m_total * args.p_causal)), size=m_causal)
if args.dominance:
beta_D, beta_D_causal_index = np.zeros(m_chr), random.sample(range(m_chr), m_causal)
log.log('Picked {m} dominance causal variants out of {mc}'.format(m=m_causal, mc=m_chr))
if args.h2_D > 0:
beta_D[beta_D_causal_index] = np.random.normal(loc=0, scale=np.sqrt(args.h2_D / (m_total * args.p_causal)), size=m_causal)
if args.gxe:
beta_AC, beta_AC_causal_index = np.zeros(m_chr), random.sample(range(m_chr), m_causal)
log.log('Picked {m} gxe causal variants out of {mc}'.format(m=m_causal, mc=m_chr))
if args.h2_AC > 0:
beta_AC[beta_AC_causal_index] = np.random.normal(loc=0, scale=np.sqrt(args.h2_AC / (m_total * args.p_causal)), size=m_causal)
# Get the phenotypes.
k = 0
log.log('Determining phenotype data.')
for variant in tl.progress(args.progress_bars, tree_sequence_list[chr].variants(), total=m_chr): # Note, progress here refers you to tqdm which just creates a pretty progress bar.
X_A, X_D = sg.nextSNP(variant)
# Effect size on the phenotype.
y += X_A * beta_A[k] + X_D * beta_D[k] + X_A * C * beta_AC[k]
k += 1
# Add noise to the y.
y += np.random.normal(loc=0, scale=np.sqrt(1-(args.h2_A+args.h2_D+args.h2_AC+args.s2)), size=N)
# Finally, normalise.
y = (y - np.mean(y)) / np.std(y)
return y, C
# Here, want to create a chi sq function, and an LD score function.
def get_chisq(args, tree_sequence_list_geno, m_geno, m_geno_total, y, N, C, log):
# Initialise the chi squared statistics.
chisq_A, chisq_D, chisq_AC = np.zeros((m_geno_total,1)), np.zeros((m_geno_total,1)), np.zeros((m_geno_total,1))
if args.case_control:
log.log("Running case-control simulation.")
if args.prevalence is None:
raise ValueError("prevalence must be set if running case-control analysis.")
cases, controls, n_cases, n_controls, T = case_control(y, args.prevalence, args.sample_prevalence, N)
n = n_cases + n_controls
y_cc = np.zeros(n)
y_cc[:n_cases] = 1
index = cases + controls
C_sim = C[index]
if args.linear is False and args.ldsc is True:
k = 0
for chr in range(args.n_chr):
for variant in tl.progress(args.progress_bars, tree_sequence_list_geno[chr].variants(), total=m_geno[chr]):
X_A, X_D = sg.nextSNP(variant, index = index)
chisq_A[k] = sm.Logit(y_cc, sm.add_constant(X_A)).fit(disp=0).llr
chisq_D[k] = sm.Logit(y_cc, sm.add_constant(X_D)).fit(disp=0).llr
chisq_AC[k] = sm.Logit(y_cc, sm.add_constant(C_sim * X_A)).fit(disp=0).llr
k += 1
if ( ((args.case_control is False) or (args.case_control is True and args.linear is True)) and args.ldsc is True ):
if args.case_control:
log.log("Warning: running linear regression for case-control.")
y = (y_cc - np.mean(y_cc)) / np.std(y_cc)
index = cases + controls
C_sim = C[index]
else:
index = None
C_sim = C
n = N
# Then use these ys to determine beta hats.
k = 0
for chr in range(args.n_chr):
log.log('Determining chi-squared statistics in chromosome {chr}'.format(chr=chr+1))
for variant in tree_sequence_list_geno[chr].variants():
X_A, X_D = sg.nextSNP(variant, index=index)
# Then sum to get the effect size on the phenotype.
chisq_A[k] = np.dot(y.reshape(1,n), X_A)**2 / n
chisq_D[k] = np.dot(y.reshape(1,n), X_D)**2 / n
chisq_AC[k] = np.dot(y.reshape(1,n), C_sim * X_A)**2 / n
k += 1
if args.write_pheno or args.write_trees:
if args.case_control:
sample_ID = index
y = y_cc.astype(int)
if args.write_trees:
tree_index = [[2*x,2*x+1] for x in index]
tree_index = [j for x in tree_index for j in x]
for chr in range(args.n_chr):
tree_sequence_to_write = tree_sequence_list_geno[chr].simplify(tree_index)
write.trees(args.out, tree_sequence_to_write, chr, m_geno[chr], n_pops, N, sim, args.vcf, index)
else:
sample_ID = np.arange(N)
df_pheno=pd.DataFrame({'sample_ID':sample_ID, 'phenotype':y})
df_pheno.to_csv(args.out + ".sim" + str(sim+1) + '.pheno.tsv', sep='\t', header=True, index=False)
if args.case_control:
return chisq_A, chisq_D, chisq_AC, n, C_sim, index, y_cc, n_cases, T
else:
return chisq_A, chisq_D, chisq_AC, n, C_sim, index
```
|
{
"source": "jeromekelleher/tszip",
"score": 2
}
|
#### File: tszip/tscompress/compression.py
```python
import logging
import os
import numcodecs
import zarr
import numpy as np
import tskit
logger = logging.getLogger(__name__)
def compress(ts, path):
"""
Compresses the specified tree sequence and writes it to the specified
path.
"""
logging.info("Compressing to {}".format(path))
try:
store = zarr.ZipStore(path, mode='w')
root = zarr.group(store=store)
compress_zarr(ts, root)
store.close()
except Exception as e:
os.unlink(path)
raise e
def compress_zarr(ts, root):
# TODO this current version is the most extreme option where we throw away
# all the non-site information.
# First reduce to site topology
tables = ts.dump_tables()
tables.simplify(reduce_to_site_topology=True)
nodes = root.create_group("nodes")
flags = nodes.empty("flags", shape=len(tables.nodes), dtype=np.uint8)
flags[:] = tables.nodes.flags
logger.debug(flags.info)
# Get the indexes into the position array.
pos_map = np.hstack([tables.sites.position, [tables.sequence_length]])
pos_map[0] = 0
left_mapped = np.searchsorted(pos_map, tables.edges.left)
if np.any(pos_map[left_mapped] != tables.edges.left):
raise ValueError("Invalid left coordinates")
right_mapped = np.searchsorted(pos_map, tables.edges.right)
if np.any(pos_map[right_mapped] != tables.edges.right):
raise ValueError("Invalid right coordinates")
filters = [numcodecs.Delta(dtype=np.int32, astype=np.int32)]
compressor = numcodecs.Blosc(cname='zstd', clevel=9, shuffle=numcodecs.Blosc.SHUFFLE)
edges = root.create_group("edges")
parent = edges.empty(
"parent", shape=len(tables.edges), dtype=np.int32, filters=filters,
compressor=compressor)
child = edges.empty(
"child", shape=len(tables.edges), dtype=np.int32, filters=filters,
compressor=compressor)
left = edges.empty(
"left", shape=len(tables.edges), dtype=np.uint32, filters=filters,
compressor=compressor)
right = edges.empty(
"right", shape=len(tables.edges), dtype=np.uint32, filters=filters,
compressor=compressor)
parent[:] = tables.edges.parent
child[:] = tables.edges.child
left[:] = left_mapped
right[:] = right_mapped
mutations = root.create_group("mutations")
site = mutations.empty(
"site", shape=len(tables.mutations), dtype=np.int32, compressor=compressor)
node = mutations.empty(
"node", shape=len(tables.mutations), dtype=np.int32, compressor=compressor)
site[:] = tables.mutations.site
node[:] = tables.mutations.node
def decompress(path):
"""
Returns a decompressed tskit tree sequence read from the specified path.
"""
store = zarr.ZipStore(path, mode='r')
root = zarr.group(store=store)
return decompress_zarr(root)
def decompress_zarr(root):
site = root["mutations/site"][:]
num_sites = site[-1] + 1
n = site.shape[0]
tables = tskit.TableCollection(num_sites)
tables.mutations.set_columns(
node=root["mutations/node"],
site=site,
derived_state=np.zeros(n, dtype=np.int8) + ord("1"),
derived_state_offset=np.arange(n + 1, dtype=np.uint32))
tables.sites.set_columns(
position=np.arange(num_sites),
ancestral_state=np.zeros(num_sites, dtype=np.int8) + ord("0"),
ancestral_state_offset=np.arange(num_sites + 1, dtype=np.uint32))
flags = root["nodes/flags"][:]
n = flags.shape[0]
tables.nodes.set_columns(
flags=flags.astype(np.uint32),
time=np.arange(n))
tables.edges.set_columns(
left=root["edges/left"],
right=root["edges/right"],
parent=root["edges/parent"],
child=root["edges/child"])
return tables.tree_sequence()
```
|
{
"source": "jeromeku/Python-Financial-Tools",
"score": 4
}
|
#### File: jeromeku/Python-Financial-Tools/jumps.py
```python
import numpy as np
from stock import Stock
from scipy.special import gamma
from scipy import stats
class JumpStatistics(object):
def __init__(self,stock):
self.stock = stock
class BarndorffNielsen(JumpStatistics):
# An implementation of the Barnforff-Nielsen test statistic used for detecting "jumps"
# (or "suprises") in stock price data. The mathematics for this test statistic can be
# found at the following two resources:
#
# <NAME>. 2008. "Problems in the Application of Jump Detection Tests
# to Stock Price Data". Duke University.
#
# "Some Like it Smooth, and Some Like it Rough: Untangling Continuous and Jump
# Components in Measuring, Modeling, and Forecasting Asset Return Volatility".
# <NAME>, <NAME> and <NAME>. September 2003.
#
# The following is an example of how to apply the Barnforff-Nielsen statistic to detect
# surprises in Microsoft stock data:
# if True:
# # Observe a trend in Microsoft stock prices where a jump occurs.
# stock = Stock("MSFT",{"start" : "2013-02-14","end" : "2014-02-14"})
# else:
# # Otherwise, view a sequence of stock prices where no jump was detected.
# stock = Stock("MSFT",{"start" : "2013-03-01","end" : "2013-04-01"})
# stock.display_price()
# bn = BarndorffNielsen(stock)
# bn.barndorff_nielsen_test()
def __init__(self,stock):
super(BarndorffNielsen,self).__init__(stock)
self.n = len(self.stock.statistics["log_returns"])
self.realized_variance = self.calculate_realized_variance()
self.bipower_variance = self.calculate_bipower_variance()
self.relative_jump = np.float(self.realized_variance - self.bipower_variance) / self.realized_variance
self.tripower_quarticity = self.calculate_tripower_quarticity()
self.statistic = self.barndorff_nielsen_statistic()
def calculate_realized_variance(self):
log_returns = self.stock.statistics["log_returns"]
variance = np.sum(np.power(log_returns,2))
return variance
def calculate_bipower_variance(self):
n = self.n
log_returns = np.absolute(self.stock.statistics["log_returns"])
variance = (np.pi / 2.0) * (np.float(n) / (n - 1.0)) * np.sum(log_returns[1:] * log_returns[:-1])
return variance
def calculate_tripower_quarticity(self):
n = self.n
# Notice that the absolute value of the log returns is calculated in this step. This is to
# prevent numerical nan's from being produced. This also seems to be consistent with the
# notation specified by <NAME> and <NAME> et al.
log_returns = np.absolute(self.stock.statistics["log_returns"])
mu = np.power(np.power(2.0,2.0 / 3) * gamma(7.0 / 6.0) * np.power(gamma(1.0 / 2.0),-1),-3)
tripower = np.sum(np.power(log_returns[2:],4.0 / 3) *
np.power(log_returns[1:-1],4.0 / 3) * np.power(log_returns[:-2],4.0 / 3))
quarticity = n * mu * (np.float(n) / (n - 2.0)) * tripower
return quarticity
def barndorff_nielsen_statistic(self):
n = self.n
pi = np.pi
relative_jump = self.relative_jump
tripower = self.tripower_quarticity
bipower = self.bipower_variance
statistic = relative_jump / np.sqrt(((pi / 2) ** 2 + pi - 5) * (1.0 / n) * max(1,tripower / (bipower ** 2)))
return statistic
def barndorff_nielsen_test(self,alpha = .01):
quantile = stats.norm.ppf(1 - alpha)
print_string = ""
if self.statistic > quantile:
print_string += "\tThe Barndorff-Nielsen Test reports that there was a jump in asset price.\n"
else:
print_string += "\tThe Barndorff-Nielsen Test reports that there was not a jump in asset price.\n"
print_string += "\tThe significance level of the test: %.2f\n" % alpha
print self.stock
print print_string
if True:
# Observe a trend in Microsoft stock prices where a jump occurs.
stock = Stock("MSFT",{"start" : "2013-02-14","end" : "2014-02-14"})
else:
# Otherwise, view a sequence of stock prices where no jump was detected.
stock = Stock("MSFT",{"start" : "2013-03-01","end" : "2013-04-01"})
stock.display_price()
bn = BarndorffNielsen(stock)
bn.barndorff_nielsen_test()
```
|
{
"source": "JeromeLabonte-CRIM/Service",
"score": 3
}
|
#### File: Service/tests/test_report.py
```python
import json
import unittest
# --Modules to test -----------------------------------------------------------
from VestaService.Report import WorkerReport, TaskReport
class UtilsTests(unittest.TestCase):
def test_WorkerReport_update(self):
wr = WorkerReport(nb_tasks=3)
tr = TaskReport(doc_id="secret", tool="screwdriver")
tr.set_succeeded()
wr.update(tr)
self.assertEqual(wr.nb_success, 1,
msg="Error by updating a worker report "
"with a successful task report.")
tr.set_failed(code=444, message="No screwdriver in this drawer.")
wr.update(tr)
self.assertEqual(wr.nb_failures, 1,
msg="Error by updating a worker report "
"with a failed task report.")
self.assertEqual(len(wr.detail), 2,
msg="Error by updating a worker report "
"with a failed task report. "
" Wrong number of tasks.")
def test_WorkerReport_tojson(self):
wr = WorkerReport(nb_tasks=2)
tr = TaskReport(doc_id="secret", tool="screwdriver")
tr.set_succeeded()
wr.update(tr)
tr2 = TaskReport(doc_id="secret", tool="screwdriver")
tr2.set_failed(code=444, message="No screwdriver in this drawer.")
wr.update(tr2)
wr.set_succeeded()
wr.update_completion_ratio()
attended_wrjson_str = ('{"nb_success": 1, "nb_ignores": 0, '
'"nb_failures": 1, "completion_ratio" : 1.0, '
'"nb_tasks" : 2, "status" : "success",'
'"detail": ['
'{"doc_id" : "secret", "step" : "screwdriver", '
'"status" : "success"},'
'{"doc_id" : "secret", "step" : "screwdriver", '
'"status" : "failure", "code":444, '
'"message" : "No screwdriver in this drawer."}'
']'
'}')
wrjson = wr.to_json()
self.assertEqual(json.JSONDecoder().decode(wrjson),
json.JSONDecoder().decode(attended_wrjson_str))
def test_WorkerReport_abbreviated_json(self):
wr = WorkerReport(nb_tasks=2)
tr = TaskReport(doc_id="secret", tool="screwdriver")
tr.set_succeeded()
wr.update(tr)
tr2 = TaskReport(doc_id="secret", tool="screwdriver")
tr2.set_failed(code=444, message="No screwdriver in this drawer.")
wr.update(tr2)
wr.set_succeeded()
wr.update_completion_ratio()
attended_wrjson_str = ('{"nb_success": 1, "nb_ignores": 0, '
'"nb_failures": 1, "completion_ratio" : 1.0, '
'"nb_tasks" : 2, "status" : "success",'
'"full_report_url":"http://mss:1234"'
'}')
wrjson = wr.abbreviated_json("http://mss:1234")
self.assertEqual(json.JSONDecoder().decode(wrjson),
json.JSONDecoder().decode(attended_wrjson_str))
```
#### File: Service/VestaService/Document.py
```python
from datetime import datetime
import logging
# ----------------------------------------------------------------------------
class Document(object):
"""
Container for all information related to downloading of a distant document
to a local file.
"""
url = None
local_path = None
transfer_time = None
length = None
def __init__(self, url=None, path=None):
"""
Constructor.
:param url: URL of the source document which will be handled locally.
:param path: Local path to the document copy.
"""
self.logger = logging.getLogger(__name__)
self.logger.debug("Creating an instance with parameters url=%s,"
" path=%s", url, path)
self.url = url
self.local_path = path
self.transfer_time = datetime.now()
def __repr__(self):
"""
Printable representation
"""
return self.url
```
#### File: Service/VestaService/DownloadRemote.py
```python
import optparse
# --Project specific----------------------------------------------------------
from . import RemoteAccess
def main():
"""
Command line entry point.
"""
usage = '%prog url'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-c', '--credentials', dest='credentials_fn',
help='Path to crendentials filename',
default=None)
args = parser.parse_args()[-1]
if len(args) != 1:
parser.error('Insufficient arguments')
doc_msg = {'url': args[0], 'credentials': None}
doc = RemoteAccess.download(doc_msg)
print("Downloaded file is at «{fn}»".format(fn=doc.local_path))
if __name__ == '__main__':
main()
```
|
{
"source": "jeromelebleu/django-cruditor",
"score": 2
}
|
#### File: django-cruditor/cruditor/mixins.py
```python
from collections import OrderedDict
from django.contrib import messages
from django.contrib.auth.views import REDIRECT_FIELD_NAME, LoginView
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from .forms import LoginForm
class CruditorMixin(object):
"""
Base mixin for all Cruditor views. Provides common functionality for all views.
It is a good idea to have a own "base" mixin to configure the common options
like ``menu_title``, the urls and templates.
Usually you might have ``required_permission`` configured per-view.
"""
#: Title to use in templates / menu bar
menu_title = 'CRUDitor'
#: URL to use in the linked menu_title
index_url = '#'
#: URL to use when providing a logout link to the user.
logout_url = '#'
#: URL to the change password view, if available.
change_password_url = None
#: Template name which is included to render the menu.
menu_template_name = 'cruditor/includes/menu.html'
#: Template used to include extra head stuff.
extrahead_template_name = 'cruditor/includes/extrahead.html'
#: Page template for the login view.
login_template_name = 'cruditor/login.html'
#: Form class which is used in the login view.
login_form_class = LoginForm
#: Decide if only staff users should be able to use the Cruditor views.
staff_required = True
#: Which permission is required to access the view.
required_permission = None
#: If not provided, Cruditor tries to look up the verbose name from ``model.Meta``
model_verbose_name = None
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
"""
Ensure the user is logged in (by calling `ensure_logged_in`` method).
If the user is logged in, permissions are checked by calling
``ensure_required_permission``.
"""
login_result = self.ensure_logged_in(request, *args, **kwargs)
if login_result is not True:
return login_result
self.ensure_required_permission()
return super().dispatch(request, *args, **kwargs)
def get_cruditor_context(self, alternative_title=None):
"""
Provides some context for all Cruditor templates to render menu, header,
breadcrumb and title buttons.
The method takes an optional argument ``alternative_title`` to override
the default title from ``get_title`` method.
"""
return {
'title': alternative_title or self.get_title(),
'breadcrumb': self.get_breadcrumb() + [
{'title': alternative_title or self.get_breadcrumb_title(), 'url': None}
],
'titlebuttons': self.get_titlebuttons(),
'constants': {
'menu_title': self.menu_title,
'menu_template_name': self.menu_template_name,
'extrahead_template_name': self.extrahead_template_name,
'index_url': self.index_url,
'logout_url': self.logout_url,
'change_password_url': self.change_password_url,
}
}
def get_title(self):
"""
Returns the title of the page. Uses view's ``title`` property. If not set
falls back to ``menu_title``.
"""
return getattr(self, 'title', self.menu_title)
def get_breadcrumb_title(self):
"""
By default, the breadcrumb title is the same as the page title.
Calls ``get_title`` if not overwritten.
"""
return self.get_title()
def get_breadcrumb(self):
"""
This method is expected to return a list of breadcrumb elements as a list.
Every breadcrumb element is a dict or object with at least
a ``title`` property/key. If a ``url`` key/property is provided, the item
is linked.
"""
return []
def get_titlebuttons(self):
"""
This method is expected to return None or a list of buttons to display in
the title row of the page.
Every button element is a dict or object with at least a ``label`` and
``url`` property/key. In addition, one can provide an alternative
``button_class`` which is used as a css class - pefixed with "btn-".
Default ``button_class`` is "light".
"""
return None
def get_model_verbose_name(self):
"""
Returns the verbose name of the handled object/item.
If ``model_verbose_name`` is set, the value is used. If not, Cruditor
tries to get the verbose name from the model property (via Meta class).
If no name is available at all, "Item" is returned.
"""
if self.model_verbose_name:
return self.model_verbose_name
if getattr(self, 'model', None):
return self.model._meta.verbose_name
return 'Item'
def ensure_logged_in(self, request, *args, **kwargs):
"""
This method checks if the request user is logged in and has the right
flags set (e.g. ``is_staff`` if ``staff_required`` is set in view).
If user is logged in, ``True`` is returned.
If not, ``handle_not_logged_in`` is called.
"""
if (
not request.user.is_active or
(self.staff_required and not request.user.is_staff)
):
return self.handle_not_logged_in(request, *args, **kwargs)
return True
def handle_not_logged_in(self, request, *args, **kwargs):
"""
This method is responsible to handle not logged-in users.
By default, renders the Django login view using a Cruditor optimized
template using the ``login_form_class`` as Form.
"""
return LoginView.as_view(
template_name=self.login_template_name,
redirect_field_name=REDIRECT_FIELD_NAME,
form_class=self.login_form_class,
redirect_authenticated_user=False,
extra_context={
'app_path': request.get_full_path(),
'next_field': REDIRECT_FIELD_NAME,
'next_value': request.get_full_path(),
'cruditor': self.get_cruditor_context(alternative_title='Login'),
},
)(request)
def get_required_permission(self):
"""
Returns the required Django permissions required to access the view.
You might override the method to apply more complex rules on what
permissions are required.
"""
return self.required_permission
def ensure_required_permission(self):
"""
This method ensures that all required permissions (fetched by calling
``get_required_permission``).
If permissions are not met, ``PermissionDenied`` is raised.
"""
required_permission = self.get_required_permission()
if not required_permission:
return
if not self.request.user.has_perm(required_permission):
raise PermissionDenied
def get_context_data(self, **kwargs):
"""
Adds the ``cruditor`` context variable to the template context. Uses data
from ``get_cruditor_context`` method.
"""
context = super().get_context_data(**kwargs)
context['cruditor'] = self.get_cruditor_context()
return context
class FormViewMixin(object):
"""
Mixin to add formset support to Django FormViews. To use formsets,
you have to provide a set of formsets as a dict (or OrderedDict if you have
more than one formset - just to have a defined ordering).
"""
formset_classes = None
def get_formset_classes(self):
"""
This method returns the formset classes to render in the form view.
By default, returns the ``formset_classes`` property.
"""
return self.formset_classes or {}
def get(self, request, *args, **kwargs):
"""
Extended get-method to render to form and all formsets properly initialized.
"""
self.object = self.get_object()
formsets = OrderedDict([(
formset_name,
formset_class(instance=self.object)
) for formset_name, formset_class in self.get_formset_classes().items()])
return self.render_to_response(self.get_context_data(
form=self.get_form(self.get_form_class()),
formsets=formsets,
))
def post(self, request, *args, **kwargs):
"""
Extended version of the FormView.post method which validates the form and
all configured formsets. If everything is valid, ``form_valid`` is called.
If something is not valid, ``form_invalid`` is called.
Both the form instance and all formset instances are provided to the called
method. The form is passed as the first argument, the formsets are passed
as keyword arguments using the formset key from ``formset_classes``.
"""
self.object = self.get_object()
form = self.get_form(self.get_form_class())
formsets = OrderedDict([(
formset_name,
formset_class(request.POST, files=request.FILES, instance=self.object)
) for formset_name, formset_class in self.get_formset_classes().items()])
if all([form.is_valid()] + [formset.is_valid() for formset in formsets.values()]):
return self.form_valid(form, **formsets)
else:
return self.form_invalid(form, **formsets)
def save_form(self, form, **formsets):
"""
This method is called from ``form_valid`` to actual save the data from the
form and all formsets. All saving is done by default.
"""
self.object = form.save()
self.formset_objects = {}
for formset_name, formset in formsets.items():
formset.instance = self.object
self.formset_objects[formset_name] = formset.save()
def form_valid(self, form, **formsets):
"""
Saves the data and provides a nice success message, then redirects to the
``get_success_url`` url.
"""
self.save_form(form, **formsets)
messages.success(self.request, self.success_message.format(
model=self.get_model_verbose_name(), object=self.object))
return redirect(self.get_success_url())
def form_invalid(self, form, **formsets):
"""
Re-render the page with the invalid form and/or formsets.
"""
return self.render_to_response(self.get_context_data(
form=form,
formsets=formsets,
formset_errors=True,
))
```
#### File: examples/collection/forms.py
```python
from django import forms
from cruditor.forms import CruditorTapeformMixin
from examples.store.models import Person
class PersonForm(CruditorTapeformMixin, forms.ModelForm):
reminder = forms.SplitDateTimeField(
label='Next reminder', help_text='Some help for you')
class Meta:
model = Person
fields = '__all__'
def clean(self):
if not self.cleaned_data.get('last_name', ''):
raise forms.ValidationError('Please provide a last name too.')
return self.cleaned_data
```
#### File: examples/remote/models.py
```python
import requests
BASE_TAG = {'id': 0, 'name': 'cruditor'}
class Pet:
def __init__(self, pet):
self.data = {
'id': pet['id'],
'name': pet['name'],
'photo_url': pet['photoUrls'][0]
}
def __str__(self):
return self.name
@property
def pk(self):
return self.data['id']
@property
def name(self):
return self.data['name']
def for_form(self):
return {
'name': self.data['name'],
'photo_url': self.data['photo_url']
}
def update(self, form):
self.data = Pet(requests.put('http://petstore.swagger.io/v2/pet', json={
'id': self.data['id'],
'name': form['name'],
'photoUrls': [form['photo_url']],
'tags': [BASE_TAG],
'status': 'available'
}).json()).data
def delete(self):
requests.delete(
'http://petstore.swagger.io/v2/pet/{}'.format(self.data['id']))
@classmethod
def get_list(cls):
all_pets = requests.get(
'http://petstore.swagger.io/v2/pet/findByStatus',
{'status': 'available'}
).json()
for pet in all_pets:
# Filter pets with tag cruditor
tags = pet.get('tags', [])
if not tags or tags[0] != BASE_TAG:
continue
yield Pet(pet)
@classmethod
def get(cls, pk):
return Pet(requests.get(
'http://petstore.swagger.io/v2/pet/{}'.format(pk)).json())
@classmethod
def create(cls, form):
return Pet(requests.post('http://petstore.swagger.io/v2/pet', json={
'name': form['name'],
'photoUrls': [form['photo_url']],
'tags': [BASE_TAG],
'status': 'available'
}).json())
```
#### File: examples/remote/views.py
```python
from django.urls import reverse, reverse_lazy
from cruditor.collection import CollectionViewMixin
from cruditor.views import (
CruditorAddView, CruditorChangeView, CruditorDeleteView, CruditorListView)
from examples.mixins import ExamplesMixin
from .forms import PetForm
from .models import Pet
class PetMixin(ExamplesMixin, CollectionViewMixin):
collection_list_title = 'Pets'
collection_list_urlname = 'remote:list'
collection_detail_urlname = 'remote:change'
model_verbose_name = 'Pet'
class PetListView(PetMixin, CruditorListView):
def get_titlebuttons(self):
return [{'url': reverse('remote:add'), 'label': 'Add pet'}]
def get_queryset(self):
return Pet.get_list()
class PetAddView(PetMixin, CruditorAddView):
form_class = PetForm
success_url = reverse_lazy('remote:list')
model_verbose_name = 'Pet'
class PetChangeView(PetMixin, CruditorChangeView):
form_class = PetForm
success_url = reverse_lazy('remote:list')
model_verbose_name = 'Pet'
def get_object(self):
return Pet.get(self.kwargs['pk'])
def get_delete_url(self):
return reverse('remote:delete', args=(self.object.pk,))
class PetDeleteView(PetMixin, CruditorDeleteView):
success_url = reverse_lazy('remote:list')
def get_object(self):
return Pet.get(self.kwargs['pk'])
```
#### File: django-cruditor/tests/test_filters.py
```python
from cruditor.filters import AnyChoiceFilter, MultiCharFilter
from examples.store.models import Person
class TestAnyChoiceFilter:
def test_default_label(self):
instance = AnyChoiceFilter('foo')
assert str(instance.extra['empty_label']) == 'Any choice'
def test_custom_label(self):
instance = AnyChoiceFilter('foo', empty_label='Anyting')
assert str(instance.extra['empty_label']) == 'Anyting'
class TestMultiCharFilter:
def test_init(self):
instance = MultiCharFilter(('foo', 'bar'))
assert instance.fields == ('foo', 'bar')
def test_filter(self):
instance = MultiCharFilter(('first_name', '^last_name'))
filters = instance.filter(
Person.objects.all(), 'foo').query.has_filters().children[0].children
assert filters[0].lhs.field.name == 'first_name'
assert filters[0].lookup_name == 'icontains'
assert filters[1].lhs.field.name == 'last_name'
assert filters[1].lookup_name == 'istartswith'
def test_skip_filter(self):
instance = MultiCharFilter(('first_name', '^last_name'))
assert len(instance.filter(
Person.objects.all(), '').query.has_filters().children) == 0
```
|
{
"source": "jeromelebleu/django-tapeforms",
"score": 2
}
|
#### File: tests/contrib/test_bootstrap.py
```python
from django import forms
from tapeforms.contrib.bootstrap import BootstrapTapeformMixin
class DummyForm(BootstrapTapeformMixin, forms.Form):
my_field1 = forms.CharField()
my_field2 = forms.BooleanField()
class TestBootstrapTapeformMixin:
def test_field_template(self):
form = DummyForm()
assert form.field_template == 'tapeforms/fields/bootstrap.html'
def test_field_container_css_class_default(self):
form = DummyForm()
assert form.get_field_container_css_class(
form['my_field1']) == 'form-group'
def test_field_container_css_class_checkbox(self):
form = DummyForm()
assert form.get_field_container_css_class(
form['my_field2']) == 'form-check'
def test_field_label_css_class_default(self):
form = DummyForm()
assert form.get_field_label_css_class(
form['my_field1']) is None
def test_field_label_css_class_checkbox(self):
form = DummyForm()
assert form.get_field_label_css_class(
form['my_field2']) == 'form-check-label'
def test_widget_css_class_default(self):
form = DummyForm()
assert form.get_widget_css_class(
'my_field1', form.fields['my_field1']) == 'form-control'
def test_widget_css_class_checkbox(self):
form = DummyForm()
assert form.get_widget_css_class(
'my_field2', form.fields['my_field2']) == 'form-check-input'
def test_widget_css_class_invalid(self):
form = DummyForm({})
form.full_clean()
css_classes = form.fields['my_field1'].widget.attrs['class'].split(' ')
assert 'is-invalid' in css_classes
assert 'form-control' in css_classes
def test_add_error(self):
form = DummyForm({})
form.add_error(None, 'Non field error!')
form.add_error('my_field1', 'Error!')
css_classes = form.fields['my_field1'].widget.attrs['class'].split(' ')
assert 'is-invalid' in css_classes
```
|
{
"source": "JeromeLeLan/celluloid",
"score": 3
}
|
#### File: celluloid/scripts/generateWallpapers.py
```python
import sys, os, argparse, shutil
from PIL import Image, ImageOps # pip install Pillow
POSTER_COUNT = 9999
# POSTER_COUNT = 50 # Uncomment to test on poster subset
ROW_SIZE = 7
COLUMN_SIZE = 3
BACKGROUND_COLOR = '#797877'
WATCHED_COLOR = '#4f4e4d'
IMAGE_WIDTH = 1288
IMAGE_HEIGHT = 1600
BORDER_SIZE = 75
scriptPath = os.path.dirname(os.path.realpath(__file__))
postersPath = os.path.join(scriptPath, '../posters')
wallpapersPath = os.path.join(postersPath, 'wallpapers')
borderedPath = os.path.join(wallpapersPath, 'bordered')
rowsPath = os.path.join(wallpapersPath, 'rows')
alphanumericOrder = lambda item: (int(item.partition(' ')[0]) if item[0].isdigit() else float('inf'), item)
def cleanUpFolder():
if os.path.exists(wallpapersPath):
shutil.rmtree(wallpapersPath)
os.makedirs(wallpapersPath)
os.makedirs(borderedPath)
os.makedirs(rowsPath)
def borderize():
posterCount = 0
posters = sorted(os.listdir(postersPath), key=alphanumericOrder)
for poster in posters:
filePath = os.path.join(postersPath, poster)
if not os.path.isfile(filePath) or not filePath.endswith('jpg'):
continue
print(poster)
img = Image.open(filePath)
img = img.resize((IMAGE_WIDTH,IMAGE_HEIGHT))
color = BACKGROUND_COLOR
if '@seen' in poster:
color = WATCHED_COLOR
img = ImageOps.expand(img, border=BORDER_SIZE, fill=color)
img.save(os.path.join(borderedPath, poster))
posterCount += 1
if posterCount > POSTER_COUNT:
break
paddingCount = (ROW_SIZE * COLUMN_SIZE) - posterCount % (ROW_SIZE * COLUMN_SIZE)
for i in range(paddingCount):
paddingPoster = str(posterCount + i + 2) + ' - padding.jpg'
img = Image.new('RGB', (IMAGE_WIDTH + 2 * BORDER_SIZE, IMAGE_HEIGHT + 2 * BORDER_SIZE), BACKGROUND_COLOR)
print(paddingPoster)
img.save(os.path.join(borderedPath, paddingPoster))
def createRows():
posters = sorted(os.listdir(borderedPath), key=alphanumericOrder)
row = []
rowCount = 1
for poster in posters:
print(poster)
if len(row) < ROW_SIZE:
row.append(Image.open(os.path.join(borderedPath, poster)))
if len(row) < ROW_SIZE:
continue
widths, heights = zip(*(img.size for img in row))
total_width = sum(widths)
max_height = max(heights)
rowImage = Image.new('RGB', (total_width, max_height))
rowOffset = 0
for img in row:
rowImage.paste(img, (rowOffset, 0))
rowOffset += img.size[0]
fileName = str(rowCount) + ' - row' + '.jpg'
rowImage.save(os.path.join(rowsPath, fileName))
row.clear()
rowCount += 1
def createWallpapers():
rows = sorted(os.listdir(rowsPath), key=alphanumericOrder)
column = []
wallpaperCount = 1
for row in rows:
print(row)
if len(column) < COLUMN_SIZE:
column.append(Image.open(os.path.join(rowsPath, row)))
if len(column) < COLUMN_SIZE:
continue
widths, heights = zip(*(img.size for img in column))
total_width = max(widths)
max_height = sum(heights)
wallpaper = Image.new('RGB', (total_width, max_height))
columnOffset = 0
for img in column:
wallpaper.paste(img, (0, columnOffset))
columnOffset += img.size[1]
wallpaper = ImageOps.expand(wallpaper, border=150, fill=BACKGROUND_COLOR)
fileName = 'wallpaper' + str(wallpaperCount) + '.jpg'
wallpaper.save(os.path.join(wallpapersPath, fileName))
column.clear()
wallpaperCount += 1
def main():
cleanUpFolder()
borderize()
createRows()
createWallpapers()
shutil.rmtree(borderedPath)
shutil.rmtree(rowsPath)
if __name__ == '__main__':
print(sys.version)
main()
```
|
{
"source": "JeromeLeLan/watched",
"score": 3
}
|
#### File: watched/data/generateMovieInfos.py
```python
import sys, time, json, urllib.request, math, collections, operator
apiFile = "../private/themoviedb.apikey"
def getTheMovieDBApiKey():
with open(apiFile, "rU") as f:
for line in f:
apiKey = line
return apiKey
apiKey = getTheMovieDBApiKey()
watchedFile = "watched.txt"
#watchedFile = "watchedSample.txt" # debug
exportFile = "../js/watched.json"
movieInfos = []
countryList = dict()
decadeList = dict()
genreList = dict()
actorList = dict()
directorList = dict()
cameraList = dict()
jsonResult = dict()
def addItemToDict(vDict, key, value, valueType):
if not key in vDict:
vDict[key] = dict()
vDict[key]["count"] = 0
vDict[key][valueType] = value
vDict[key]["count"] = vDict[key]["count"] + 1
def getMovieInfo(imdbId):
url = "https://api.themoviedb.org/3/movie/tt" + imdbId + \
"?api_key=" + apiKey + "&language=en-US"
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
movieInfo = json.load(response)
time.sleep(0.25)
return movieInfo
def getCreditsInfo(imdbId):
url = "https://api.themoviedb.org/3/movie/tt" + imdbId + \
"/credits?api_key=" + apiKey
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
credits = json.load(response)
time.sleep(0.25)
director = ""
for cast in credits["cast"]:
addItemToDict(actorList, cast["id"], cast["name"], "name")
for crew in credits["crew"]:
if crew["job"] == "Director":
director += crew["name"] + ", "
addItemToDict(directorList, crew["id"], crew["name"], "name")
if crew["job"] == "Director of Photography":
addItemToDict(cameraList, crew["id"], crew["name"], "name")
return director[0:len(director)-2]
def getDecade(movieInfo):
decade = str(int(math.floor(int(movieInfo["release_date"][0:4]) / 10) * 10)) + 's'
print(" - " + decade, flush=True)
decadeList[decade] = decadeList.get(decade, 0) + 1
def getCountry(movieInfo):
for country in movieInfo["production_countries"]:
countryISO = country["iso_3166_1"].replace("SU", "RU")
countryName = country["name"].replace("Soviet Union", "Russia")
addItemToDict(countryList, countryISO, countryName, "name")
def getGenre(movieInfo):
for genre in movieInfo["genres"]:
genreName = genre["name"]
genreList[genreName] = genreList.get(genreName, 0) + 1
def generateMovieInfos():
with open(watchedFile, "rU") as f:
for movie in f:
if '//' in movie:
continue
watchDate, imdbId, rating, useEnglishTitle = movie.strip().split(" ")
if movie:
print(imdbId, end='', flush=True)
movieInfo = getMovieInfo(imdbId)
movieInfoLight = dict()
if useEnglishTitle == "1":
movieInfoLight["original_title"] = movieInfo["title"]
else:
movieInfoLight["original_title"] = movieInfo["original_title"]
print("-> " + movieInfoLight["original_title"], end='', flush=True)
getDecade(movieInfo)
getCountry(movieInfo)
getGenre(movieInfo)
director = getCreditsInfo(imdbId)
movieInfoLight["imdb_id"] = movieInfo["imdb_id"]
movieInfoLight["release_date"] = movieInfo["release_date"][0:4]
#movieInfoLight["vote_average"] = movieInfo["vote_average"]
movieInfoLight["watchDate"] = watchDate
movieInfoLight["personalRating"] = rating
movieInfoLight["runtime"] = movieInfo["runtime"]
movieInfoLight["poster_path"] = movieInfo["poster_path"]
movieInfoLight["director"] = director.strip()
movieInfos.append(movieInfoLight)
jsonResult["countries"] = countryList
decadeListSorted = collections.OrderedDict(sorted(decadeList.items()))
jsonResult["decades"] = decadeListSorted
genreListSorted = sorted(genreList.items(), key=operator.itemgetter(1), reverse=True)
jsonResult["genres"] = collections.OrderedDict(genreListSorted)
actorListSorted = sorted(actorList.items(), key=lambda x: x[1].get('count'), reverse=True)
jsonResult["actors"] = actorListSorted[0:10]
directorListSorted = sorted(directorList.items(), key=lambda x: x[1].get('count'), reverse=True)
jsonResult["directors"] = directorListSorted
cameraListSorted = sorted(cameraList.items(), key=lambda x: x[1].get('count'), reverse=True)
jsonResult["cinematographers"] = cameraListSorted[0:10]
jsonResult["movies"] = movieInfos
file = open(exportFile, "w")
file.write(json.dumps(jsonResult, indent=4, separators=(',', ': ')))
file.close()
def main():
generateMovieInfos()
if __name__ == '__main__':
main()
```
|
{
"source": "Jerome-maker/ensae_teaching_cs",
"score": 3
}
|
#### File: competition/program/evaluate.py
```python
import os
import sys
if sys.version_info[0] == 2:
FileNotFoundError = Exception
def main_codalab_wrapper(fct, metric_name, argv, truth_file="truth.txt", submission_file="answer.txt", output_file="scores.txt"):
"""
adapt the tempate available at
`evaluate.py <https://github.com/Tivix/competition-examples/blob/master/hello_world/competition/scoring_program/evaluate.py>`_
"""
input_dir = argv[1]
output_dir = argv[2]
submit_dir = os.path.join(input_dir, 'res')
truth_dir = os.path.join(input_dir, 'ref')
if not os.path.isdir(submit_dir):
raise FileNotFoundError("%s doesn't exist" % submit_dir)
if os.path.isdir(submit_dir) and os.path.isdir(truth_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
private_codalab_wrapper(fct, metric_name,
fold1=truth_dir, f1=truth_file,
fold2=submit_dir, f2=submission_file,
output=os.path.join(output_dir, output_file))
else:
raise FileNotFoundError(
"{0} or {1} is not a folder".format(submit_dir, truth_dir))
def private_codalab_wrapper(fct, metric_name, fold1, fold2, f1="answer.txt", f2="answer.txt", output="scores.txt"):
"""
Wraps the function following the guidelines
`User_Building a Scoring Program for a Competition <https://github.com/codalab/codalab-competitions/wiki/User_Building-a-Scoring-Program-for-a-Competition>`_.
It replicates the example available at
`competition-examples/hello_world <https://github.com/Tivix/competition-examples/tree/master/hello_world/competition>`_.
@param fct function to wrap
@param metric_name metric name
@param fold1 folder which contains the data for folder containing the truth
@param fold2 folder which contains the data for folder containing the data
@param f1 filename for the truth
@param f2 filename for the produced answers
@param output produces an output with the expected results
@return metric
"""
f1 = os.path.join(fold1, f1)
f2 = os.path.join(fold2, f2)
if not os.path.exists(f1):
raise FileNotFoundError("unable to find '{0}'".format(f1))
if not os.path.exists(f2):
raise FileNotFoundError("unable to find '{0}'".format(f2))
if f1 == f2:
raise ValueError(
"answers and scores are the same file: '{0}'".format(f1))
with open(f1, "r") as f:
lines = f.readlines()
answers = [float(_) for _ in lines if _]
print("Reading answers:", f1, len(answers), "rows")
print("First answers:", answers[:10])
with open(f2, "r") as f:
lines = f.readlines()
scores = [float(_) for _ in lines if _]
print("Reading scores:", f1, len(scores), "rows")
print("First scores:", scores[:10])
metric = fct(answers, scores)
res = "{0}:{1}".format(metric_name, metric)
print("Results=", res)
with open(output, "w") as f:
f.write(res)
print("Wrote", res, "in", output)
return metric
def AUC(answers, scores):
"""
Compute the `AUC <https://en.wikipedia.org/wiki/Area_under_the_curve_(pharmacokinetics)>`_.
@param answers expected answers 0 (false), 1 (true)
@param scores score obtained for class 1
@return number
"""
ab = list(zip(answers, scores))
plus = [s for a, s in ab if a == 1]
moins = [s for a, s in ab if a != 1]
auc = 0
for p in plus:
for m in moins:
if p > m:
auc += 2
elif p == m:
auc += 1
den = len(plus) * len(moins)
if den == 0:
return 1.0 if len(moins) == 0 else 0.0
return auc * 1.0 / (len(plus) * len(moins) * 2)
if __name__ == "__main__":
if len(sys.argv) < 3:
raise Exception("bad arguments: {0}".format(sys.argv))
main_codalab_wrapper(AUC, "AUC", sys.argv)
```
#### File: notebooks/sklearn_ensae_course/helpers.py
```python
from sklearn import neighbors, datasets, linear_model
import pylab as pl
import numpy as np
from matplotlib.colors import ListedColormap
# Create color maps for 3-class classification problem, as with iris
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
def plot_iris_knn():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
knn = neighbors.KNeighborsClassifier(n_neighbors=3)
knn.fit(X, y)
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.xlabel('sepal length (cm)')
pl.ylabel('sepal width (cm)')
pl.axis('tight')
def plot_polynomial_regression():
rng = np.random.RandomState(0)
x = 2 * rng.rand(100) - 1
def f(t): return 1.2 * t**2 + .1 * t**3 - .4 * t ** 5 - .5 * t ** 9
y = f(x) + .4 * rng.normal(size=100)
x_test = np.linspace(-1, 1, 100)
pl.figure()
pl.scatter(x, y, s=4)
X = np.array([x**i for i in range(5)]).T
X_test = np.array([x_test**i for i in range(5)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='4th order')
X = np.array([x**i for i in range(10)]).T
X_test = np.array([x_test**i for i in range(10)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='9th order')
pl.legend(loc='best')
pl.axis('tight')
pl.title('Fitting a 4th and a 9th order polynomial')
pl.figure()
pl.scatter(x, y, s=4)
pl.plot(x_test, f(x_test), label="truth")
pl.axis('tight')
pl.title('Ground truth (9th order polynomial)')
```
#### File: ensae_teaching_cs/automation_students/git_helper.py
```python
import os
import re
from pyquickhelper.loghelper import noLOG, run_cmd
from pyquickhelper.texthelper import remove_diacritics
def git_clone(local_folder, url_https, user=None, password=<PASSWORD>, timeout=60,
init=True, fLOG=noLOG):
"""
Clones a project from a git repository in a non empty local folder,
it requires `GIT <http://git-scm.com/>`_ to be installed
and uses the command line.
@param local_folder local folder of the project
@param url_https url, example ``https://gitlab.server/folder/project_name``
@param user part 1 of the credentials
@param password part 2 of the credentials
@param timeout timeout for the command line
@param init see below (True, use fetch, False, use clone)
@param fLOG logging function
@return local_folder
If the reposity has already been cloned, it does not do it again.
We assume that git can be run without giving its full location.
The function executes the following commands (if init is True)::
cd [folder]
git init
git remote add origin [https://user.password@server/project.git]
git fetch
Otherwise, it does::
cd [folder]
git clone origin [https://user.password@server/project.git]
git fetch
A folder will be created.
.. exref::
:tag: Automation
:title: Clone many folders in one row
::
eleves = "project1;project2;..."
root = r"destination"
for el in eleves.split(";"):
cl = el.lower().replace(".","-")
fold = os.path.join(root, el)
if not os.path.exists(fold):
print("clone", el)
url = "https://<gitlab>/<group>/{0}.git".format(cl)
git_clone( fold, url,user=user,password=password, init=False,fLOG=print)
"""
url_user = git_url_user_password(url_https, user, password)
timeout = 60
local_folder = os.path.normpath(os.path.abspath(local_folder))
if init:
if not os.path.exists(local_folder):
fLOG("creating folder", local_folder)
os.mkdir(local_folder)
hg = os.path.join(local_folder, ".git")
if os.path.exists(hg):
raise Exception("folder {0} should not exist".format(local_folder))
if not os.path.exists(hg):
cmds = """
cd {0}
git init
git remote add origin {1}
git fetch
""".format(local_folder, url_user).replace(" ", "").strip(" \n\r\t")
cmd = cmds.replace("\n", "&")
sin = "" # "{0}\n".format(password)
out, err = run_cmd(
cmd, sin=sin, wait=True, timeout=timeout, fLOG=fLOG)
git_check_error(out, err, fLOG)
return local_folder
else:
if not os.path.exists(local_folder):
fLOG("creating folder", local_folder)
os.mkdir(local_folder)
hg = os.path.join(local_folder, ".git")
if os.path.exists(hg):
raise Exception("folder {0} should not exist".format(local_folder))
final = os.path.split(url_user)[-1].replace(".git", "")
locf = os.path.join(local_folder, final)
if os.path.exists(locf):
raise Exception(
"folder {0} should not exists before cloning".format(locf))
cmds = """
cd {0}
git clone {1} .
""".format(local_folder, url_user).replace(" ", "").strip(" \n\r\t")
cmd = cmds.replace("\n", "&")
sin = "" # "{0}\n".format(password)
out, err = run_cmd(cmd, sin=sin, wait=True, timeout=timeout, fLOG=fLOG)
git_check_error(out, err, fLOG)
return locf
def git_change_remote_origin(local_folder, url_https, user=None, password=<PASSWORD>,
add_fetch=False, timeout=10, fLOG=noLOG):
"""
Changes the origin of the repository. The url and the password
refer to the new repository.
@param local_folder local folder
@param url_https url, example ``https://gitlab.server/folder/project_name``
@param user part 1 of the credentials
@param password part 2 of the credentials
@param timeout timeout for the command line
@param add_fetch add instruction ``fetch``
@param fLOG logging function
@return something
The function runs the instruction::
git remote remove origin
git remote add origin url
"""
url_user = git_url_user_password(url_https, user, password)
cmds = """
cd {0}
git remote remove origin
git remote add origin {1}
""".format(local_folder, url_user).replace(" ", "").strip(" \n\r\t")
if add_fetch:
cmds += "\ngit fetch"
cmd = cmds.replace("\n", "&")
sin = "" # "{0}\n".format(password)
out, err = run_cmd(cmd, sin=sin, wait=True, timeout=timeout, fLOG=fLOG)
git_check_error(out, err, fLOG)
def git_commit_all(local_folder, url_https, message, user=None,
password=<PASSWORD>, timeout=300, fLOG=noLOG):
"""
From a git repository,
it requires `GIT <http://git-scm.com/>`_ to be installed
and uses the command line.
@param local_folder local folder of the project
@param url_https url, example ``https://gitlab.server/folder/project_name``
@param message message for the commit
@param user part 1 of the credentials
@param password part 2 of the credentials
@param timeout timeout for the command line
@param fLOG logging function
@return None
If the reposity has already been cloned, it does not do it again.
We assume that git can be run without giving its full location.
The function executes the following commands::
cd [folder]
git add -A
git commit -m "[message]"
git push -u origin master
"""
cmds = """
cd {0}
git add -A
git commit -m "{1}"
git push -u origin master
""".format(local_folder, message).replace(" ", "").strip(" \n\r\t")
cmd = cmds.replace("\n", "&")
sin = "" # "{0}\n".format(password)
out, err = run_cmd(cmd, sin=sin, wait=True, timeout=timeout, fLOG=fLOG)
git_check_error(out, err, fLOG)
def git_first_commit_all_projects(local_folder, user=None, password=<PASSWORD>,
timeout=300, suivi="suivi.rst", fLOG=noLOG):
"""
@param local_folder folder
@param user part 1 of the credentials
@param password part 2 of the credentials
@param timeout timeout for the command line
@param suivi file to open to get the gitlab account
@param fLOG logging function
@return None or ( local_folder, gitlab )
"""
if not os.path.exists(local_folder):
raise FileNotFoundError(local_folder)
filename = os.path.join(local_folder, suivi)
if not os.path.exists(filename):
raise FileNotFoundError(filename)
with open(filename, "r", encoding="utf8") as f:
content = f.read()
_gitlab_regex = re.compile(".+1.*")
gitlab = _gitlab_regex.findall(content)
if len(gitlab) == 0:
raise Exception(
"unable to find the regular expression {0} in {1}".format(
_gitlab_regex.pattern,
filename))
if not isinstance(gitlab, list):
raise TypeError("we expect a list for: " + str(gitlab))
if len(gitlab) != 1:
raise Exception(
"more than one gitlab repo is mentioned {0} in {1}".format(
_gitlab_regex.pattern,
filename))
gitlab = gitlab[0]
fLOG("* gitlab", gitlab)
g = os.path.join(local_folder, ".git")
commit = None
if not os.path.exists(g):
fLOG("* initialize", local_folder)
git_clone(local_folder, gitlab,
user=user, password=password, fLOG=fLOG)
sub = os.path.split(local_folder)[-1]
fLOG("* first commit ", gitlab)
git_commit_all(local_folder, gitlab,
"first commit to " + sub,
user=user, password=password, fLOG=print)
commit = local_folder, gitlab
return commit
def create_folders_from_dataframe(df, root, report="suivi.rst", col_student="Eleves",
col_group="Groupe", col_subject="Sujet",
overwrite=False, email_function=None):
"""
Creates a series of folders for groups of students.
@param root where to create the folders
@param col_student column which contains the student name (firt name + last name)
@param col_group index of the grou
@param col_subject column which contains the subject
@param df DataFrame
@param email_function function which infers email from first and last names, see below
@param report report file
@param overwrite if False, skip if the report already exists
@return list of creates folders
The function *email_function* has the following signature::
def email_function(first_name, last_name):
# ....
"""
def split_name(name):
name = remove_diacritics(name).split(" ")
first = name[-1]
last = " ".join(name[:-1])
return first, last
def ul(last):
res = ""
for i, c in enumerate(last):
if c == " ":
res += "_"
elif i == 0 or last[i - 1] in [" ", "-", "_"]:
res += c.upper()
else:
res += c.lower()
return res
folds = []
gr = df.groupby(col_group)
for name, group in gr:
s = list(set(group[col_subject].copy()))
if len(s) > 1:
raise Exception(
"more than one subject for group: " + str(name) + "\n" + str(s))
# subject = s[0]
eleves = list(group[col_student])
names = [(_,) + split_name(_) for _ in eleves]
eleves.sort()
title = ", ".join(eleves)
content = [title]
content.append("=" * len(title))
content.append("")
content.append("* subject: " + title)
content.append("* G: %d" % int(name))
if email_function is not None:
mails = [email_function(a[1], a[2]) for a in names]
jmail = "; ".join(mails)
content.append("* mails: " + jmail)
content.append("")
content.append("")
last = ".".join(ul(a[-1]) for a in sorted(names))
folder = os.path.join(root, last)
filename = os.path.join(folder, report)
if not os.path.exists(folder):
os.mkdir(folder)
if overwrite or not os.path.exists(filename):
with open(filename, "w", encoding="utf8") as f:
f.write("\n".join(content))
folds.append(folder)
return folds
def get_sections(path, suivi="suivi.rst"):
"""
Extracts sections from a filename used to follow a group of students.
@param path where to find filename
@param suivi file, RST format, section are followed by ``+++++``
@return dictionary { section : content }
Example of a file::
rapport
+++++++
* bla 1
extrait
+++++++
::
paragraphe 1
paragraphe 2
"""
if not os.path.exists(path):
raise FileNotFoundError(path)
filename = os.path.join(path, suivi)
if not os.path.exists(filename):
raise FileNotFoundError(filename)
try:
with open(filename, "r", encoding="utf8") as f:
content = f.read()
except UnicodeDecodeError as e:
raise ValueError(
'unable to parse file:\n File "{0}", line 1'.format(filename)) from e
lines = [_.strip("\r").rstrip() for _ in content.split("\n")]
added_in = []
sections = {"": []}
title = ""
for i, line in enumerate(lines):
if len(line) == 0:
sections[title].append(line)
added_in.append(title)
else:
f = line[0]
if f == " ":
if title is not None:
sections[title].append(line)
added_in.append(title)
else:
sections[""].append(line)
added_in.append("")
elif f in "=+-":
if line == f * len(line):
title = lines[i - 1]
if len(added_in) > 0:
t = added_in[-1]
sections[t] = sections[t][:-1]
added_in[-1] = title
if f == "=":
sections["title"] = [title]
added_in.append("title")
title = "title"
else:
sections[title] = []
added_in.append(title)
else:
sections[title].append(line)
added_in.append(title)
else:
sections[title].append(line)
added_in.append(title)
return sections
def git_url_user_password(url_https, user, password):
"""
Builds a url (starting with https) and add the user and the password
to skip the authentification.
:param url_https: example ``https://gitlab.server/folder/project_name``
:param user: part 1 of the credentials
:param password: part 2 of the credentials
:return: url
"""
url_user = url_https.replace(
"https://", "https://{0}:{1}@".format(user, password))
return url_user
def git_check_error(out, err, fLOG):
"""
Private function, analyse the output.
"""
if len(out) > 0:
fLOG("OUT:\n" + out)
if len(err) > 0:
if "error" in err.lower():
raise Exception("OUT:\n{0}\nERR:\n{1}".format(out, err))
raise Exception(err)
```
#### File: ensae_teaching_cs/automation_students/quick_tasks.py
```python
def build_mailing_list(names, domain, format="{first}.{last}@{domain}"):
"""
Infers mails from a list of names.
@param names list of strings
@param domain something like ``ensae.fr``.
@param format mail format
@return list of mails
Examples :
::
DUPRE Xavier
Everything upper case is the last name,
everything lower case is the first name.
"""
mails = []
for name in names:
words = name.split()
first = []
last = []
for w in words:
if w.upper() == w:
last.append(w)
else:
first.append(w)
first = ".".join(s.lower() for s in first)
last = ".".join(s.lower() for s in last)
mail = format.format(first=first, last=last, domain=domain)
mails.append(mail)
return mails
```
#### File: ensae_teaching_cs/automation/teaching_modules.py
```python
def get_teaching_modules():
"""
List of teachings modules to maintain (CI + documentation).
.. runpython::
:showcode:
from ensae_teaching_cs.automation import get_teaching_modules
print('\\n'.join(sorted(get_teaching_modules())))
"""
return ["pymlbenchmark", "_benchmarks", "ensae_teaching_dl", "machinelearningext",
"lecture_citation", "botadi", "pyquickhelper", "jyquickhelper",
"python3_module_template", "mathenjeu", "pymmails", "pymyinstall",
"pyensae", "pyrsslocal", "pysqllike", "ensae_projects", "ensae_teaching_cs",
"code_beatrix", "actuariat_python", "mlstatpy", "jupytalk", "teachpyx",
"tkinterquickhelper", "cpyquickhelper", "pandas_streaming",
"lightmlboard", "lightmlrestapi", "mlinsights", "pyenbc", "mlprodict",
"papierstat", "sparkouille", "manydataapi", "csharpy", "csharpyml",
"wrapclib", "myblog"
]
```
#### File: ensae_teaching_cs/data/crypt_helper.py
```python
from pyquickhelper.filehelper.encryption import encrypt_stream, decrypt_stream
def encrypt_data(password, input, output):
"""
Encrypts a file.
@param input input filename
@param output output filename
@param password The encryption key - a string that must be either 16, 24 or 32
bytes long. Longer keys are more secure. If the data to encrypt
is in bytes, the key must be given in bytes too.
"""
if not isinstance(password, bytes):
password = bytes(password, "ascii")
encrypt_stream(password, input, output)
def decrypt_data(password, input, output):
"""
Decrypts a file.
@param input input filename
@param output output filename
@param password The encryption key - a string that must be either 16, 24 or 32
bytes long. Longer keys are more secure. If the data to encrypt
is in bytes, the key must be given in bytes too.
"""
if not isinstance(password, bytes):
password = bytes(password, "ascii")
decrypt_stream(password, input, output)
```
#### File: ensae_teaching_cs/data/data_ts.py
```python
from datetime import datetime, timedelta
import numpy
def generate_sells(duration=730, end=None,
week_coef=None, month_coef=None,
trend=1.1):
"""
Generates dummy data and trends and seasonality.
"""
if week_coef is None:
week_coef = numpy.array([0.1, 0.12, 0.12, 0.15, 0.20, 0., 0.])
week_coef[5] = 1. - week_coef.sum()
if month_coef is None:
month_coef = [0.8, 1, 1, 1, 1, 1,
0.8, 0.6, 1, 1, 1, 1.5]
month_coef = numpy.array(month_coef)
month_coef /= month_coef.sum()
if end is None:
end = datetime.now()
begin = end - timedelta(duration)
day = timedelta(1)
rows = []
rnd = (numpy.random.randn(duration + 1) * 0.1) + 1
exp = (1 + numpy.exp(- numpy.arange(duration + 1) / duration * trend)) ** (-1)
pos = 0
while begin <= end:
month = begin.month
weekd = begin.weekday()
value = rnd[pos] * week_coef[weekd] * month_coef[month - 1] * exp[pos]
pos += 1
obs = dict(date=begin, value=value)
rows.append(obs)
begin += day
return rows
```
#### File: ensae_teaching_cs/data/dataweb.py
```python
from io import StringIO
import pandas
from .data_helper import any_local_file
def anyfile(name, local=True, cache_folder=".", filename=True, unzip=False, encoding=None):
"""
Returns any file in sub folder
`data_web <https://github.com/sdpython/ensae_teaching_cs/tree/master/src/ensae_teaching_cs/data/data_web>`_.
@param name file to download
@param local local data or web
@param cache_folder where to cache the data if downloaded a second time
@param filename return the filename (True) or the content (False)
@param unzip unzip the file
@param encoding encoding
@return text content (str)
"""
return any_local_file(name, "data_web", cache_folder=cache_folder, filename=filename, unzip=unzip, encoding=encoding)
def google_trends(name="macron", local=True, cache_folder=".", filename=True):
"""
Returns some google trends example.
See :func:`ensae_teaching_cs.data.dataweb.anyfile` to
directly download it.
@param name expression
@param local local data or web
@param cache_folder where to cache the data if downloaded a second time
@param filename return the filename (True) or the content (False)
@return text content (str)
"""
return anyfile("google_trends_%s.csv" % name, local=local, cache_folder=cache_folder, filename=filename)
def twitter_zip(name="tweets_macron_sijetaispresident_201609", local=True, cache_folder=".",
filename=False, unzip=True, as_df=True, encoding="utf-8"):
"""
Returns zipped twitter.
See :func:`ensae_teaching_cs.data.dataweb.anyfile` to
directly download it.
@param name filename
@param local local data or web
@param cache_folder where to cache or unzip the data if downloaded a second time
@param filename return the filename (True) or the content (False)
@param unzip unzip the file
@return text content (str)
"""
res = anyfile(name + ".zip", local=local,
cache_folder=cache_folder, filename=filename, unzip=unzip, encoding=encoding)
if as_df:
st = StringIO(res)
return pandas.read_csv(st, sep="\t")
else:
if isinstance(res, list):
if len(res) > 1:
raise ValueError("too many files: {0}".format(res))
res = res[0]
return res
```
#### File: ensae_teaching_cs/doc/regex.py
```python
from io import StringIO
import pandas
def regex_cases():
text = """
Cas;Explications
-------;Bases
"a";a
-------;Quantificateurs
"abc?";ab suivi par 0 ou 1 c
"abc*";ab suivi par 0.. c
"abc+";ab suivi par 1.. c
"abc{3}";ab suivi par 3 c
"abc{3,5}";ab suivi par 3, 4 ou 5 c
;Groupes
"(abc)+";1..8 abc
"(a|b)c";ac ou bc
-------;Intervalles (type 1)
".";n'importe quel caractère (un seul)
"[aB9]";a ou B ou 9
"[0-9]";n'importe quel caractère numérique
"[a-zA-Z]";n'importe quel caractère alphabétique
"[^a-c]";n'importe quel caractère SAUF a, b et c
-------;Intervalles (type 2)
"\\d";comme "[0-9]"
"\\w";comme "[a-zA-Z0-9_]"
"\\W";comme "[^a-zA-Z0-9_]"
"\\s";espaces (" ", "\\n", "\\t", "\\r")
"\\S";tout ce qui n'est pas un espace
-------;Ancres
"^abc";commence par "abc"
"abc$";termine par "abc"
""".replace(" ", "")
df = pandas.read_csv(StringIO(text), sep=";").fillna('')
return df
```
#### File: ensae_teaching_cs/homeblog/buildkeywords.py
```python
import re
import os
import xml.dom.minidom
from pyquickhelper.loghelper import fLOG
from .modifypost import load_and_modify_xml_dom
from .filefunction import find_all_blogs_function
def removeAccent(s):
return re.sub("([^~+'.0-9,ea-zA-Z&; -])", "", s)
def removeAccent_debug(s):
return re.sub("([^~+'.#çôéàèâû0-9,ea-zA-Z&; -])", "", s)
def removeHtmlAccent(s):
s = s.replace("é", "é") \
.replace("à", "à") \
.replace("â", "â") \
.replace("ê", "ê") \
.replace("ô", "ô") \
.replace("è", "è") \
.replace("ç", "ç") \
.replace("û", "û")
return s
def FixIssuesWithAccent(text):
"""
voir http://migo.sixbit.org/more/html-entities.html
http://www.thesauruslex.com/typo/eng/enghtml.htm
::
é = é = é
è = è = è
à = Ã = à
ï = ï = ï
ô = ô = ô
ç = ç = ç
ê = ê = ê
ù = ù = ù
æ = æ = æ
œ = Å = œ
ë = ë = ë
ü = ü = ü
â = â = â
€ = ⬠= €
© = © = ©
¤ = ¤ = ¤
"""
o = text
correspondance = [
("ã©", "é"),
("ô", "ô"),
("â", "â"),
("î", "î"),
("è", "è"),
("ê", "ê"),
("â", "â"),
("ç", "ç"),
("Ã ", "à "),
("\xE9", "é"),
("\xE0", "à"),
("\xA0", "à"),
("\xE8", "è"),
("\xA8", "è"),
("\xF4", "ô"),
("\xB4", "ô"),
("\xFB", "û"),
("\xC3\xAA", "ê"),
("\xC3\xAE", "î"),
("\xAE", "î"),
("\xEE", "î"),
("\xEA", "ê"),
("\xAA", "ê"),
("Ã", "à"),
]
for k, v in correspondance:
text = text.replace("\xC3" + k, v).replace("\xE3" + k, v)
text = text.replace(k, v)
if len(removeAccent_debug(text)) != len(text) and len(text) < 50:
fLOG("FixIssuesWithAccent", o.encode("utf8"), text.encode("utf8"))
fLOG("FixIssuesWithAccent", o, text)
raise ValueError("unable to deal with " +
str([text, [text], removeAccent_debug(text), text.encode("utf8")]))
return text
def modify_all_blogs_list_in_place(folder=".",
mainpage=os.path.join(
"blog", "xd_blog.html"),
outmainpage=os.path.join(
"blog", "xd_blog.html"),
allow_temp=False):
file = find_all_blogs_function(folder, allow_temp=allow_temp)
file = [os.path.split(_)[-1].replace(".html", "") for _ in file]
f = open(mainpage, "r", encoding="utf8")
cont = f.read()
f.close()
trois = cont.split("//////////////////////////////////////////")
assert len(trois) == 3
file.sort(reverse=True)
trois[1] = "\n" + ",\n".join(["\"%s\"" % _ for _ in file]) + "\n"
cont = "//////////////////////////////////////////".join(trois)
f = open(outmainpage, "w", encoding="utf8")
f.write(cont)
f.close()
def file_all_keywords(folder=".",
mainpage=os.path.join("blog", "xd_blog.html"),
outmainpage=os.path.join("blog", "xd_blog.html"),
exclude=None, allow_temp=False):
keepfile = find_all_blogs_function(folder, exclude, allow_temp=allow_temp)
if len(keepfile) == 0:
raise Exception("no found file")
hist = {}
store_keywords = {}
files = []
for f in keepfile:
dom = load_and_modify_xml_dom(f, None)
meta = dom.documentElement.getElementsByTagName("meta")
node = [_ for _ in meta if "name" in _.attributes and _.attributes[
"name"].value == "keywords"]
keywords = [_.strip() for _ in node[0].attributes[
"content"].value.split(",")]
keywords.sort()
store_keywords[f] = keywords
for k in keywords:
k = k.strip()
hist[k] = hist.get(k, 0) + 1
res = [(v, k) for k, v in hist.items() if v > 1]
res.sort(reverse=True)
# tag
f = open(mainpage, "r", encoding="utf8")
cont = f.read()
f.close()
trois = cont.split("////////////###########")
trois[1] = "\n" + ",\n".join(["[\"%s (%d)\",\"%s\"]" %
(FixIssuesWithAccent(k), v, removeAccent(k)) for v, k in res]) + "\n"
cont = "////////////###########".join(trois)
# documents
trois = cont.split("////////////---------------------")
rows = []
for k, v in res:
files = []
text = '"%s":' % removeAccent(v)
for f in keepfile:
keywords = store_keywords[f]
if v in keywords:
files.append(f)
files = [os.path.split(_)[-1].replace(".html", "") for _ in files]
files.sort(reverse=True)
files = ['"%s"' % _ for _ in files]
text += "[ %s ] " % ", ".join(files)
rows.append(text)
trois[1] = "\n" + ",\n".join([_ for _ in rows]) + "\n"
cont = "////////////---------------------".join(trois)
# rev keywords
trois = cont.split("////////////+++++++++++++++++")
rows = []
for k, v in res:
text = removeAccent(v)
rows.append('"%s":"%s"' % (text, FixIssuesWithAccent(v)))
trois[1] = "\n" + ",\n".join([_ for _ in rows]) + "\n"
cont = "////////////+++++++++++++++++".join(trois)
f = open(outmainpage, "w", encoding="utf8")
f.write(cont)
f.close()
modify_all_blogs_list_in_place(
folder, outmainpage, outmainpage, allow_temp=allow_temp)
return store_keywords
def build_bloc_keywords(res, frequence_threshold, rootfile):
"""
builds the keywords bloc
@param res ....
@param frequence_threshold number of times a keyword needs to appear before getting the right bar
"""
keywords = {}
for a, b in res.items():
for _ in b:
keywords[_] = keywords.get(_, 0) + 1
keywords = [(b, a) for a, b in keywords.items()]
keywords.sort(reverse=True)
text = []
for a, b in keywords:
if a >= frequence_threshold:
s = '<p class="keywordtitle"><a href="%s_%s.html" target="_parent">%s</a> (%d)</p>' % \
(rootfile, removeAccent(b), FixIssuesWithAccent(b), a)
text.append(s)
return "\n".join(text), keywords
def build_bloc_months(res, rootfile):
"""
builds the months bloc (we assume the page name is YYYY-MM-DD-something-.html
@param res list of blog per months
@param rootfile files location
"""
months = {}
for a, b in res.items():
month = os.path.split(a)[-1][:7]
months[month] = months.get(month, 0) + 1
months = [(a, str(b)) for a, b in months.items()]
months.sort(reverse=True)
text = []
year = None
for a, b in months:
if year is not None and a[:4] != year:
text.append('<p class="smallspace">.</p>')
s = '<p class="monthtitle"><a href="%s_%s.html" target="_parent">%s</a> (%s)</p>' % \
(rootfile, a, a, b)
text.append(s)
year = a[:4]
months = [(b, a) for a, b in months]
return "\n".join(text), months
def replace_xml_in_template_using_dom_dirty(dom, node, newvalue):
xmltext = node.toxml()
allxml = dom.documentElement.toxml()
pos = allxml.find(xmltext)
if pos == -1:
raise ValueError("unable to replace")
allxml = allxml.replace(xmltext, newvalue)
res = xml.dom.minidom.parseString(allxml)
return res
def get_node_div(template, cl):
sidebar = template.documentElement.getElementsByTagName("div")
sidebar = [_ for _ in sidebar if "class" in _.attributes]
sidebar = [_ for _ in sidebar if _.attributes["class"].value == cl]
if len(sidebar) != 1:
raise ValueError("issue with HTML format: " +
cl + ", " + str(len(sidebar)))
sidebar = sidebar[0]
return sidebar
def generate_html_article(res,
templateFile,
toFolder,
overwrite=False,
aggregatedFile=None,
maxAggregrate=15,
keywordsText=None,
otherLayer=None):
fileToReturn = []
if not os.path.exists(toFolder):
raise FileNotFoundError("not found " + toFolder)
# group files or not
toprocess = []
if aggregatedFile is not None:
counter = 0
stackFile = []
for file in sorted(res, reverse=True):
stackFile.append(file)
if len(stackFile) == maxAggregrate:
fileOutName = "%s_%04d.html" % (aggregatedFile.replace(".html", ""), counter) if counter > 0 \
else aggregatedFile
fileOutName = os.path.join(toFolder, fileOutName)
stackFile.sort(reverse=True)
toprocess.append((stackFile, fileOutName))
counter += len(stackFile)
stackFile = []
if len(stackFile) > 0:
fileOutName = "%s_%04d.html" % (aggregatedFile.replace(".html", ""), counter) if counter > 0 \
else aggregatedFile
fileOutName = os.path.join(toFolder, fileOutName)
stackFile.sort(reverse=True)
toprocess.append((stackFile, fileOutName))
else:
# we process all files, each of them gives a file
for file in sorted(res, reverse=True):
filename = os.path.split(file)[-1].replace(".html", "_nojs.html")
filename = os.path.join(toFolder, filename)
toprocess.append(([file], filename))
# updating the sidebar
template = load_and_modify_xml_dom(templateFile, None, False)
templateText = template.documentElement.toxml()
title_to_rep = template.documentElement.getElementsByTagName("title")[
0].toxml()
# all files to process are now in the list
for indexProcess, couple in enumerate(toprocess):
files, filename = couple
stackContent = []
scripthtml = ""
replacetitle = None
for file in files:
dom = load_and_modify_xml_dom(file, None)
date = os.path.split(file)[-1][:10]
title = dom.documentElement.getElementsByTagName("title")[
0].toxml()
if "XD blog" in title:
raise ValueError("a blog contains a bad title: " + file)
if len(files) == 1:
# in that case, we want to change the page title
replacetitle = title
title = title.replace("title>", "h2>")
link = '<a href="%s_nojs.html"><b>%s</b></a>' % (date, date)
title = title.replace("<h2>", "<h2>" + link + " ")
scripts = dom.documentElement.getElementsByTagName("script")
if len(scripts) > 1:
scr = [""] + [_.toxml() for _ in scripts]
scripthtml += "\n".join(scr)
b = dom.documentElement.getElementsByTagName("body")[0]
body = b.toxml()
body = body[6:]
body = body[:-7]
if len(files) > 1 and '<!-- CUT PAGE HERE -->' in body:
# here we deal with shortcuts except if we process a single
# document
body = body.split('<!-- CUT PAGE HERE -->')[0]
body += "<br />" + \
'<a href="%s_nojs.html">%s</a>' % (date, "more...")
if len(body.strip()) == 0:
raise ValueError("empty body for " + file)
stackContent.append(title + "\n" + body)
keywords = res[file]
# we
uniqueKeys = [_ for _ in set(keywords) if not _.startswith("~")]
uniqueKeys.sort()
keystext = ", ".join(uniqueKeys)
nextPage = ""
if indexProcess > 0:
nextPage += '<a href="%s"><i><--</i></a> ' % (
os.path.split(toprocess[indexProcess - 1][1])[-1])
if indexProcess < len(toprocess) - 1:
nextPage += '<a href="%s"><i>--></i></a> ' % (
os.path.split(toprocess[indexProcess + 1][1])[-1])
if keywordsText is not None:
keystext = keywordsText
# inside
post = templateText.replace(
"<!-- article here -->", "\n".join(stackContent))
post = post.replace(
'<a href="xd_blog_nojs_DDD.html"><i>suite</i></a>', nextPage)
post = post.replace("<!-- javascript here -->", scripthtml)
post = post.replace("<!-- article keywords -->", keystext)
post = post.replace("### KEYWORDS ###", keystext)
post = post.replace("### keywords ###", keystext)
enabled = False
if enabled:
olayer = '<p class="keywordtitle"><a href="xd_blog.html?date=%s">Other Layer</a></p>' % date \
if otherLayer is None else \
'<p class="keywordtitle"><a href="%s">Other Layer</a></p>' % otherLayer
post = post.replace("<!-- other layer -->", olayer)
# it does not work (pages too big)
post = '<?xml version="1.0" encoding="utf-8"?>\n' + post
post = post.replace('type="text/javascript"/>',
'type="text/javascript"></script>')
post = FixIssuesWithAccent(post)
if replacetitle is not None:
# there was only one document, we replace it
post = post.replace(title_to_rep, replacetitle)
# we save the results
if os.path.exists(filename):
try:
f = open(filename, "r", encoding="utf8")
hist = f.read()
f.close()
except UnicodeDecodeError as e:
fLOG("issue with file ", filename)
content = open(filename, "r").read()
fLOG(content[170:])
raise e
else:
hist = ""
if post != hist or overwrite:
if "\xC3" in post:
#raise Exception("forbidden character ")
pass
if not overwrite:
fLOG(" writing ", filename)
if "### keywords ###" in post.lower():
raise Exception(
"unable to release that document with this string ### KEYWORDS ###,\nkeywords should be " + str(keystext))
f = open(filename, "w", encoding="utf8")
f.write(post)
f.close()
fileToReturn.append(filename)
return fileToReturn
def build_process_all_pages(res,
keywordsHTML="frame_keywords.html",
siteFolder="../site/blog",
xd_blog_template_nojs=os.path.join(
"blog", "xd_blog_template_nojs.html"),
xd_blog_nojs="xd_blog_nojs.html",
frequence_keywords=3,
monthsHTML="frame_months.html"
):
"""
@param res output from function file_all_keywords
@param keywordsHTML html template for the keywords
@param siteFolder folder the blog (the one to be published)
@param xd_blog_template_nojs template for blog (static text, less javascript)
@param xd_blog_nojs main page (static text, less javascript)
@param frequence_keywords there won't be any page for a keyword whose frequency is below that threshold
@param monthsHTML html template for the months
@return all created pages
"""
add = []
fLOG("processing keywords")
htmlkey, keywords = build_bloc_keywords(
res, frequence_keywords, "xd_blog_key")
if keywordsHTML is not None:
file = os.path.join(siteFolder, keywordsHTML)
fLOG("writing ", file)
f = open(file, "w", encoding="utf8")
f.write("""<?xml version="1.0" encoding="utf-8"?>\n""")
f.write("<html>\n")
f.write("<head>\n")
f.write(
"""<meta content="text/html; charset=utf-8" http-equiv="Content-Type"/>\n""")
f.write("""<link href="pMenu.css" rel="stylesheet" type="text/css"/>\n""")
f.write("</head>\n")
f.write("<body>\n")
f.write("""<div class="sidebarfull">\n""")
f.write("""<p class="keywordtitle"><b>Keywords</b></p>\n""")
f.write(htmlkey)
f.write("\n</div>\n")
f.write("\n</body></html>\n")
f.close()
add.append(file)
fLOG("processing months")
htmlkeym, monthsp = build_bloc_months(res, "xd_blog_month")
if monthsHTML is not None:
file = os.path.join(siteFolder, monthsHTML)
fLOG("writing ", file)
f = open(file, "w", encoding="utf8")
f.write("""<?xml version="1.0" encoding="utf-8"?>\n""")
f.write("<html>\n")
f.write("<head>\n")
f.write(
"""<meta content="text/html; charset=utf-8" http-equiv="Content-Type"/>\n""")
f.write("""<link href="pMenu.css" rel="stylesheet" type="text/css"/>\n""")
f.write("</head>\n")
f.write("<body>\n")
f.write("""<div class="sidebarfullleft">\n<hr />\n""")
f.write("""<p class="monthtitle"><b>Months</b></p>\n""")
f.write(htmlkeym)
f.write("\n</div>\n")
f.write("\n</body></html>\n")
f.close()
add.append(file)
# build keyword pages
fLOG("building aggregated page for keywords")
add += generate_html_article(
res,
xd_blog_template_nojs,
siteFolder,
True,
xd_blog_nojs,
keywordsText="",
otherLayer="xd_blog.html")
# process all pages for each keyword)
for a, b in keywords:
fLOG("building page for keyword", FixIssuesWithAccent(b))
bb = removeAccent(b)
tempres = {}
for k, v in res.items():
if b in v:
tempres[k] = ""
add += generate_html_article(
tempres,
xd_blog_template_nojs,
siteFolder,
True,
"xd_blog_key_%s.html" % bb,
keywordsText=FixIssuesWithAccent(b),
otherLayer="xd_blog.html?tag=%s" % FixIssuesWithAccent(b))
# build months pages
fLOG("building aggregated page for months")
add += generate_html_article(
res,
xd_blog_template_nojs,
siteFolder,
True,
xd_blog_nojs,
keywordsText="",
otherLayer="xd_blog.html")
# process all pages for each months)
for a, b in monthsp:
fLOG("building page for months", b)
bb = removeAccent(b)
tempres = {}
for k, v in res.items():
if os.path.split(k)[-1].startswith(b):
tempres[k] = ""
add += generate_html_article(
tempres,
xd_blog_template_nojs,
siteFolder,
True,
"xd_blog_month_%s.html" % bb,
keywordsText=FixIssuesWithAccent(b),
otherLayer="xd_blog.html?tag=%s" % FixIssuesWithAccent(b))
# build all pages (one per blog)
fLOG("building all pages")
add += generate_html_article(
res,
xd_blog_template_nojs,
siteFolder,
overwrite=True,
otherLayer=None)
return add
```
#### File: ensae_teaching_cs/homeblog/clean_python_script_before_exporting_outside.py
```python
import re
def cleanFileFromtohtmlreplace(content):
if "#" + "## tohtmlreplace BEGIN ###" in content:
exps = "(#" + "## tohtmlreplace BEGIN ###((.|\n)*?)"
exps += "#" + "## tohtmlreplace ELSE ###((.|\n)*?)"
exps += "#" + "## tohtmlreplace END ###"
exps += ")"
exp = re.compile(exps)
res = exp.findall(content)
if len(res) == 0:
raise ValueError("unable to understand the format\n" + exps)
for rs in res:
torep = rs[0]
byrep = rs[1]
content = content.replace(torep, byrep)
return content
```
#### File: ensae_teaching_cs/homeblog/latex2html.py
```python
import os
import shutil
import sys
from PIL import Image
from pyquickhelper.loghelper import run_cmd
def convert_short_latex_into_png(latex, temp_folder=".", fLOG=print,
miktex=r"C:\Program Files\MiKTeX 2.9\miktex\bin\x64",
final_name=None):
"""
Convert a short latex script into an image.
@param latex latex equation
@param temp_folder temp_folder (where temporary files will be placed)
@param fLOG logging function
@param miktex miktex location
@param final_name if not None, copy the image at this location using this name
@return a location to the image (it should be copied), and its size
You should not call the function twice at the same in the same folder.
@warning The function ends the program if there was a failure. Something is missing on the command line.
"""
if not os.path.exists(miktex):
raise FileNotFoundError("unable to find miktex")
if sys.platform.startswith("win"):
htlatex = os.path.join(miktex, "htlatex.exe")
if not os.path.exists(htlatex):
raise FileNotFoundError("unable to find htlatex")
else:
htlatex = os.path.join(miktex, "htlatex")
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
eq = os.path.join(temp_folder, "eq.tex")
with open(eq, "w") as f:
f.write(r"""\documentclass[12pt]{article}
\pagestyle{empty}
\begin{document}
$$
%s
$$
\end{document}""".replace(" ", "") % latex.strip("\n\r "))
cmd = '"' + htlatex + '" eq.tex "html, graphics-300" "" "" "--interaction=nonstopmode"'
cwd = os.getcwd()
os.chdir(temp_folder)
out, err = run_cmd(cmd, wait=True)
os.chdir(cwd)
if "FAILED" in err:
raise Exception(
"it failed\n-----\n{0}\n----------\n{1}".format(out, err))
img = os.path.join(temp_folder, "eq0x.png")
if not os.path.exists(img):
with open(os.path.join(temp_folder, "eq.log"), "r") as f:
log = f.read()
raise FileNotFoundError("the compilation did not work\n" + log)
if final_name is not None:
# size reduction
im = Image.open(img)
shutil.copy(img, final_name)
return final_name, im.size
else:
im = Image.open(img)
return img, im.size
```
#### File: ensae_teaching_cs/special/einstein_prolog.py
```python
import copy
#: definition of all possible values (French terms)
#: colors
ttcouleur = ["jaune", "bleu", "rouge", "blanc", "vert"]
#: nationalities
ttnationalite = ["danois", "norvegien", "anglais", "allemand", "suedois"]
#: drinks
ttboisson = ["eau", "the", "lait", "cafe", "biere"]
#: smoke brand
ttcigare = ["Dunhill", "Blend", "Pall Mall", "Prince", "Bluemaster"]
#: animal
ttanimal = ["chats", "cheval", "oiseaux", "poisson", "chiens"]
#: all possibles values
ensemble = [ttcouleur, ttnationalite, ttboisson, ttcigare, ttanimal]
def permutation(nb):
"""
Compute all permutations of set [[ 1, 2, ..., nb ]].
Example for 3:
::
[[0, 1, 2], [0, 2, 1], [1, 0, 2],
[1, 2, 0], [2, 0, 1], [2, 1, 0]]
@param nb permutation over the set [[1..n]]
@return list of all possible permutations
@warning This method can be very long if nb is high (>10).
This function does something very similar to
`itertools.permutations <https://docs.python.org/3/library/itertools.html#itertools.permutations>`_.
"""
per = []
p = [i for i in range(0, nb)]
while p[0] < nb:
next = False
for i in range(1, nb):
if p[i] in p[0:i]:
next = True
break
if not next:
per.append(copy.copy(p))
p[nb - 1] += 1
for j in range(nb - 1, 0, -1):
if p[j] >= nb:
p[j] = 0
p[j - 1] += 1
return per
class Rule:
"""
This class defines a constraint of the problem
or a clause (see `http://en.wikipedia.org/wiki/Clause_(logic)`)
There are 5 different types of clauses described by Einstein's enigma
each of them is described by a different class. There are defined by classes:
@ref cl RulePosition, @ref cl RuleEquivalence, @ref cl RuleVoisin,
@ref cl RuleAvant, @ref cl RuleEnsemble.
"""
def __init__(self):
#: name of the rule
self.name = None
#: set of clauses
self.set = None
def genere(self):
"""
Generates all possible clauses (list of lists)
(l [0][0] et l [0][1]) ou (l [1][0] et l [1][1]),
a clause is a triplet of
(person, (property, category) )
"""
raise NotImplementedError()
def __str__(self):
"""
display
"""
if self.name is not None:
if not hasattr(self, "clauses"):
s = self.name + " \t: "
a = self.genere()
for al in a:
st = "\n ou " + str(al)
if len(st) > 260:
st = st[:260] + "..."
s += st
if len(s) > 1000:
break
return s
else:
s = self.name + " \t: " + str(self.set)
for al in self.clauses:
st = "\n ou " + str(al)
if len(st) > 260:
st = st[:260] + "..."
s += st
if len(s) > 1000:
break
return s
else:
return None
def combine(self, cl1, cl2):
"""
combine two clauses, two cases :
1. nothing in common or everything in common --> concatenation of clauses
2. a position or a property in common --> null clause
@param cl1 clause 1
@param cl2 clause 2
@return the new clause
A clause is a @ref cl Rule.
"""
# incompatibility
for p1 in cl1:
for p2 in cl2:
if p1[1][0] == p2[1][0]: # same property
if p1[0] != p2[0]: # but different positions
return None
if p1[0] == p2[0]: # same person
if p1[1][1] == p2[1][1] and p1[1][0] != p2[1][0]:
# same category but different properties
return None
# compatibility
r = copy.deepcopy(cl1)
for c in cl2:
if c not in r:
r.append(c)
return r
def combine_cross_sets(self, set1, set2):
"""
combines two sets of clauses
@param set1 set of clauses 1
@param set2 set of clauses 2
@return combination
"""
if len(set1) == 0:
return copy.deepcopy(set2)
if len(set2) == 0:
return copy.deepcopy(set1)
res = []
for cl1 in set1:
for cl2 in set2:
r = self.combine(cl1, cl2)
if r is not None:
res.append(r)
return res
class RulePosition(Rule):
"""
p1 at position
"""
def __init__(self, p1, pos):
Rule.__init__(self)
self.set = [p1]
self.name = "position"
self.position = pos
def genere(self):
"""
overrides method ``genere``
"""
return [[(self.position, self.set[0])]]
class RuleEquivalence(Rule):
"""
p1 equivalent to p2
"""
def __init__(self, p1, p2):
Rule.__init__(self)
self.set = [p1, p2]
self.name = "equivalence"
def genere(self):
"""
overrides method ``genere``
"""
lt = []
for i in range(0, 5):
lt.append([(i, self.set[0]), (i, self.set[1])])
return lt
class RuleVoisin(Rule):
"""
p1 and p2 are neighbors
"""
def __init__(self, p1, p2):
Rule.__init__(self)
self.set = [p1, p2]
self.name = "voisin"
def genere(self):
"""
overrides method ``genere``
"""
lt = []
for i in range(0, 4):
lt.append([(i, self.set[0]), (i + 1, self.set[1])])
lt.append([(i + 1, self.set[0]), (i, self.set[1])])
return lt
class RuleAvant(Rule):
"""
p1 before p2
"""
def __init__(self, p1, p2):
Rule.__init__(self)
self.set = [p1, p2]
self.name = "avant"
def genere(self):
"""
overrides method ``genere``
"""
lt = []
for j in range(1, 5):
for i in range(0, j):
lt.append([(i, self.set[0]), (j, self.set[1])])
return lt
class RuleEnsemble(Rule):
"""
permutation of the elements of a category
"""
def __init__(self, sets, categorie):
Rule.__init__(self)
self.set = [(s, categorie) for s in sets]
self.name = "ensemble"
def genere(self):
"""
overrides method ``genere``
"""
lt = []
per = permutation(5)
for p in per:
tl = []
for i in range(0, len(p)):
tl.append((i, self.set[p[i]]))
lt.append(tl)
return lt
class Enigma:
"""
this class solves the enigma
"""
def __init__(self, display=True):
"""
we describe the enigma using the classes we defined above
@param display if True, use print to print some information
"""
self.regle = []
self.regle.append(RulePosition(self.find("lait"), 2))
self.regle.append(RulePosition(self.find("norvegien"), 0))
self.regle.append(
RuleEquivalence(
self.find("Pall Mall"),
self.find("oiseaux")))
self.regle.append(
RuleEquivalence(
self.find("anglais"),
self.find("rouge")))
self.regle.append(
RuleEquivalence(
self.find("suedois"),
self.find("chiens")))
self.regle.append(
RuleEquivalence(
self.find("danois"),
self.find("the")))
self.regle.append(
RuleEquivalence(
self.find("vert"),
self.find("cafe")))
self.regle.append(
RuleEquivalence(
self.find("jaune"),
self.find("Dunhill")))
self.regle.append(
RuleEquivalence(
self.find("biere"),
self.find("Bluemaster")))
self.regle.append(
RuleEquivalence(
self.find("allemand"),
self.find("Prince")))
self.regle.append(
RuleVoisin(
self.find("Dunhill"),
self.find("cheval")))
self.regle.append(
RuleVoisin(
self.find("norvegien"),
self.find("bleu")))
self.regle.append(RuleVoisin(self.find("Blend"), self.find("eau")))
self.regle.append(RuleVoisin(self.find("Blend"), self.find("chats")))
self.regle.append(RuleAvant(self.find("vert"), self.find("blanc")))
self.regle.append(RuleEnsemble(ttcouleur, 0))
self.regle.append(RuleEnsemble(ttnationalite, 1))
self.regle.append(RuleEnsemble(ttboisson, 2))
self.regle.append(RuleEnsemble(ttcigare, 3))
self.regle.append(RuleEnsemble(ttanimal, 4))
for r in self.regle:
r.clauses = r.genere()
r.utilise = False
self.count = 0
def find(self, p):
"""
finds a clause in the different sets of clause (houses, colors, ...)
@param p clause
@return tuple (clause, position)
"""
for i in range(0, len(ensemble)):
if p in ensemble[i]:
return (p, i)
return None
def __str__(self):
"""
usual
"""
if "solution" not in self.__dict__ or self.solution is None or len(
self.solution) == 0:
if self.count > 0:
s = "solution impossible apres " + \
str(self.count) + " iterations \n"
else:
s = ""
for r in self.regle:
s += str(r) + "\n"
return s
else:
sr = ["solution, iteration " + str(self.count)]
matrix = [list(" " * 5) for _ in range(0, 5)]
for row in self.solution:
i = row[0]
j = row[1][1]
s = row[1][0]
matrix[i][j] = s + " " * (10 - len(s))
for row in matrix:
sr.append(", ".join(row))
classic = "\n".join(sr[1:])
html = classic.replace(",",
"</td><tr>").replace("\n",
"</td></tr>\n<tr><td>")
return sr[0] + "\n" + "\n".join([
classic,
"<table>",
"<tr><td>" + html + "</td></tr>",
"</table>"])
def solve(self, solution=None, logf=print): # solution = [ ]) :
"""
Solves the enigma by eploring in deepness,
the method is recursive
@param solution [] empty at the beginning, recursively used then
@return solution
"""
if solution is None:
solution = []
self.count += 1
if self.count % 10 == 0:
logf(
"*",
self.count,
" - properties in place : ",
len(solution) -
1)
if len(solution) == 25:
# we know the solution must contain 25 clauses,
# if are here than the problem is solved unless some
# incompatibility
for r in self.regle:
cl = r.combine_cross_sets([solution], r.clauses)
if cl is None or len(cl) == 0:
# the solution is incompatible with a solution
return None
self.solution = solution
return solution
# we are looking for the rule which generates the least possible clauses
# in order to reduce the number of possibilities as much as possible
# the research could be represented as a tree, we avoid creating two
# many paths
best = None
rule = None
for r in self.regle:
cl = r.combine_cross_sets([solution], r.clauses)
if cl is None:
# the solution is incompatible with a solution
return None
# we check rule r is bringing back some results
for c in cl:
if len(c) > len(solution):
break
else:
cl = None
if cl is not None and (best is None or len(best) > len(cl)):
best = cl
rule = r
if best is None:
# the solution is incompatible with a solution
return None
rule.utilise = True
# we test all clauses
for c in best:
r = self.solve(c, logf=logf)
if r is not None:
# we found
return r
rule.utilise = False # impossible
return None
if __name__ == "__main__":
en = Enigma()
print(en)
print("-----------------------------\n")
en.solve()
print("-----------------------------\n")
print(en)
```
#### File: ensae_teaching_cs/special/propagation_epidemic.py
```python
import random
import copy
import os
from collections import Counter
from pyquickhelper.loghelper import noLOG
from ..helpers.pygame_helper import wait_event, empty_main_loop
class Point:
"""
Defines a point.
"""
def __init__(self, x, y):
"""
constructor
@param x x
@param y y
"""
self.x, self.y = float(x), float(y)
def norm(self):
"""
return the norm l2
"""
return (self.x ** 2 + self.y ** 2) ** 0.5
def __add__(self, p):
"""
addition
"""
return Point(self.x + p.x, self.y + p.y)
def __sub__(self, p):
"""
soustraction
"""
return Point(self.x - p.x, self.y - p.y)
def __mul__(self, p):
"""
multiplication by a scalar
"""
return Point(self.x * p, self.y * p)
def __div__(self, p):
"""
division by a scalar
"""
return Point(self.x / p, self.y / p)
def __iadd__(self, p):
"""
addition inplace
"""
self.x += p.x
self.y += p.y
return self
def __isub__(self, p):
"""
soustraction inplace
"""
self.x -= p.x
self.y -= p.y
return self
def __imul__(self, p):
"""
multiplication by a scalar inplace
"""
self.x *= p
self.y *= p
return self
def __idiv__(self, p):
"""
divsion by a scalar inplace
"""
self.x /= p
self.y /= p
return self
class Rect:
"""
Defines a rectangle.
"""
def __init__(self, a, b):
"""
constructor
@param a Point
@param b Point
"""
self.a = a
self.b = b
def limit(self, pos):
"""
tells if point *pos* belongs to the area
defined by the rectangle
"""
r = False
if pos.x < self.a.x:
pos.x = self.a.x
r = True
if pos.y < self.a.y:
pos.y = self.a.y
r = True
if pos.x > self.b.x:
pos.x = self.b.x
r = True
if pos.y > self.b.x:
pos.y = self.b.y
r = True
return r
class Person:
"""
defines a person for the simulation
colors
* 0: sain
* 1: malade
* 2: mort
* 3: gueris
A person moves by drawing a random gaussian vector added to
its current acceleration.
"""
colors = {0: (255, 255, 255), 1: (0, 255, 0),
2: (0, 0, 0), 3: (50, 50, 200)}
def __init__(self, position, borne_pos=None, borne_acc=None,
alea_acc=5, sick_vit=0.25, rayon=10, nb_day=70,
prob_die=0.5, prob_cont=0.5):
"""
constructor
@param position position
@param borne_pos upper bound for the position (rectangle)
@param borne_acc upper bound for the acceleration (rectangle)
@param alea_acc sigma to draw random acceleration
@param sick_vit when people are sick, they go slower, this muliplies
the acceleration by a factor
@param rayon radius, below that distance, a sick person is contagious for the neighbours
@param nb_day number of days a person will be sick, after, the person
either recovers, either dies
@param prob_die probability to die at each iteration
@param prob_cont probability to transmit the disease to a neighbour
"""
self.pos = position
self.vit = Point(0, 0)
self.acc = Point(0, 0)
self.state = 0
self.alea_acc = alea_acc
self.borne_pos = borne_pos
self.borne_acc = borne_acc
self.sick_vit = sick_vit
self.rayon = rayon
self.nb_day = nb_day
self.prob_die = prob_die
self.prob_cont = prob_cont
# memorize the day the person got sick
self._since = 0
def __str__(self):
"""
usual
"""
return str(self.__dict__)
def distance(self, p):
"""
return the distance between this person and another one
"""
d = self.pos - p.pos
return d.norm()
def _get_new_acceleration(self):
"""
update the acceleration by adding a random gaussian vector
to the current acceleration, check that acceleration
is not beyond some boundary
"""
x = random.gauss(0, self.alea_acc)
y = random.gauss(0, self.alea_acc)
res = Point(x, y)
if self.borne_acc is not None:
r = self.borne_acc.limit(res)
if r:
self.acc = Point(0, 0)
self.vit = Point(0, 0)
return res
def state_evolution(self, population):
"""
update the state of the person: healthy --> sick --> cured or dead
@param population sets of other persons
The function updates the state of the persons.
One of steps involves looking over the entire population to check
if some sick people are close enough to transmis the disease.
"""
if self.state in [2, 3]:
return
elif self.state == 1:
if self._since < self.nb_day:
self._since += 1
else:
k = random.random()
if k < self.prob_die:
self.state = 2
else:
self.state = 3
self._since = 0
elif self.state == 0:
alls = []
for p in population:
if p.state != 1:
continue
d = self.distance(p)
if d <= self.rayon:
alls.append(p)
for k in alls:
p = random.random()
if p <= self.prob_cont:
self.state = 1
break
else:
raise Exception("impossible")
def evolution(self, dt, population):
"""
update the population, random acceleration
@param dt time delta (only used to update the position)
@param population other set of people
The function updates the state of the person,
draws a new acceleration and updates the position.
"""
self.state_evolution(population)
if self.state == 1:
dt *= self.sick_vit
elif self.state == 2:
dt = 0
self.pos += self.vit * dt
self.vit += self.acc * dt
self.acc = self._get_new_acceleration()
if self.borne_pos is not None:
r = self.borne_pos.limit(self.pos)
if r:
self.acc = Point(0, 0)
self.vit = Point(0, 0)
class EpidemicPopulation:
"""
defines a population
"""
def __init__(self, cote=500, nb=(100, 1), **params):
"""
constructeur
@param cote size of the zone person move
@param nb tuple number of people (healthy, sick)
@param params others parameters
Draws a population.
"""
if cote is None:
pass
else:
self.cote = cote
self.gens = []
for i in range(0, nb[0]):
p = Person(Point(random.randint(0, cote), random.randint(0, cote)),
Rect(Point(0, 0), Point(cote, cote)),
**params)
self.gens.append(p)
for i in range(0, nb[1]):
p = Person(Point(random.randint(0, cote), random.randint(0, cote)),
Rect(Point(0, 0), Point(cote, cote)),
**params)
p.state = 1
self.gens.append(p)
def __getitem__(self, i):
"""
usual
"""
return self.gens[i]
def __iter__(self):
"""
usual
"""
return self.gens.__iter__()
def __len__(self):
"""
usual
"""
return len(self.gens)
def count(self):
"""
return the distribution of healthy, sick, cured people
"""
return Counter(map(lambda p: p.state, self))
def evolution(self, dt=0.5):
"""
new iteration
@param dt dt
@return nb1,nb2
We walk through everybody and call
:meth:`evolution <ensae_teaching_cs.special.propragation_epidemics.Person.evolution>`.
"""
# on renouvelle une certaine proportion de pates (renouvellement)
# tire au hasard
pop = copy.deepcopy(self)
for p in self.gens:
p.evolution(dt, pop)
return self.count()
def display_person(self, screen, pygame):
"""
display a person on a pygame screen
@param self Person
@param screen screen
@param pygame module pygame
"""
c = Person.colors[self.state]
pygame.draw.rect(screen, c, pygame.Rect(
self.pos.x - 4, self.pos.y - 4, 8, 8))
def display_population(self, screen, pygame, font, back_ground):
"""
affichage
@param self Person
@param screen screen
@param font font (pygame)
@param back_ground back ground color
@param pygame module pygame
"""
screen.fill(back_ground)
for p in self.gens:
display_person(p, screen, pygame)
c = self.count()
text = "vie: %d" % c.get(0, 0)
text = font.render(text, True, (255, 255, 255))
screen.blit(text, (self.cote, 100))
text = "malade: %d" % c.get(1, 0)
text = font.render(text, True, (255, 255, 255))
screen.blit(text, (self.cote, 135))
text = "mort: %d" % c.get(2, 0)
text = font.render(text, True, (255, 255, 255))
screen.blit(text, (self.cote, 170))
text = "gueris: %d" % c.get(3, 0)
text = font.render(text, True, (255, 255, 255))
screen.blit(text, (self.cote, 205))
def pygame_simulation(pygame, first_click=False, folder=None,
iter=1000, cote=600, nb=(200, 20), flags=0,
**params):
"""
Runs a graphic simulation. The user can see a :epkg:`pygame`
screen showing the evolution of population.
A healthy person is white, green is sick,
blue is healed, black is dead. The function can save an image for
every iteration. They can be merged into a video with
function @see fn make_video.
@param pygame module pygame (avoids importing in this file)
@param first_click starts the simulation after a first click
@param folder to save the simulation, an image per simulation
@param iter number of iterations to run
@param cote @see cl EpidemicPopulation
@param nb @see cl EpidemicPopulation
@param params @see cl EpidemicPopulation
@param flags see `pygame.display.set_mode <https://www.pygame.org/docs/ref/display.html#pygame.display.set_mode>`_
@param fLOG logging function
The simulation looks like this:
.. raw:: html
<video autoplay="" controls="" loop="" height="400">
<source src="http://www.xavierdupre.fr/enseignement/complements/epidemic.mp4" type="video/mp4" />
</video>
Pour lancer la simulation::
from ensae_teaching_cs.special.propagation_epidemic import pygame_simulation
import pygame
pygame_simulation(pygame)
"""
pygame.init()
size = cote + 200, cote
screen = pygame.display.set_mode(size, flags)
font = pygame.font.Font("freesansbold.ttf", 30)
back_ground = (128, 128, 128)
pop = EpidemicPopulation(cote, nb)
display_population(pop, screen, pygame, font, back_ground)
pygame.display.flip()
if first_click:
wait_event(pygame)
for i in range(0, iter):
empty_main_loop(pygame)
nb = pop.evolution()
display_population(pop, screen, pygame, font, back_ground)
pygame.display.flip()
pygame.event.peek()
if folder is not None:
image = os.path.join(folder, "image_%04d.png" % i)
pygame.image.save(screen, image)
pygame.time.wait(50)
if 1 not in nb or nb[1] == 0:
break
if first_click:
wait_event(pygame)
def numerical_simulation(nb=(200, 20), cote=600, iter=1000, fLOG=noLOG, **params):
"""
Run a simulation, @see cl EpidemicPopulation.
@param iter number of iterations to run
@param cote @see cl EpidemicPopulation
@param nb @see cl EpidemicPopulation
@param params @see cl EpidemicPopulation
@param fLOG to display status every 10 iterations
@return population count
"""
pop = EpidemicPopulation(cote, nb, **params)
lasti = None
for i in range(0, iter):
nb = pop.evolution()
lasti = i
if 1 not in nb or nb[1] == 0:
break
if i % 10 == 0:
fLOG("iteration", i, ":", nb)
r = pop.count()
return r, lasti
```
#### File: ensae_teaching_cs/td_2a/edmonds_karp.py
```python
import copy
import collections
from pyquickhelper.loghelper import noLOG
class EdmondsKarpGraph:
"""
This class represents a directed graph using adjacency
matrix representation.
"""
def __init__(self, edges):
"""
The graph is defined as a list of tuple (n1, n2, capacity).
is the capacity of the graph.
@param edges list of tuple (n1, n2, capacity)
"""
graph = {}
for n1, n2, capacity in edges:
if n1 not in graph:
graph[n1] = {}
graph[n1][n2] = capacity
self._graph = graph # residual graph
def bfs(self, graph, s, t, parent):
'''
Returns True if there is a path from source *s* to sink *t* in
residual graph. Also fills *parent* to store the path.
@param graph graph
@param s node 1
@param t node 2
@param parent stores the path
@return boolean
'''
# Mark all the vertices as not visited
visited = {}
# Create a queue for BFS
queue = collections.deque()
# Mark the source node as visited and enqueue it
queue.append(s)
visited[s] = True
# Standard BFS Loop
while queue:
u = queue.popleft()
# Get all adjacent vertices's of the dequeued vertex u
# If a adjacent has not been visited, then mark it
# visited and enqueue it
for node, val in graph[u].items():
if (not visited.get(node, False)) and val > 0:
queue.append(node)
visited[node] = True
parent[node] = u
# If we reached sink in BFS starting from source, then return
# true, else false
return visited.get(t, False)
def edmonds_karp(self, source, sink, fLOG=noLOG, verbose=False,
update=None):
"""
Returns the maximum flow from *s* to *t* in the given graph.
@param source source of the flow
@param sink destination of the flow
@param fLOG logging function
@param verbose more information
@param update custom update function
@return maximum flow
The update function can take into account linked edges.
the default version is:
::
def update_default(graph, u, v, path_flow):
graph[u][v] -= path_flow
graph[v][u] += path_flow
"""
graph = copy.deepcopy(self._graph)
# Add symmetry.
add_edges = []
for n1, forward in graph.items():
for n2 in forward:
if n2 not in graph or n1 not in graph[n2]:
add_edges.append((n2, n1))
for n1, n2 in add_edges:
if n1 not in graph:
graph[n1] = {}
if n2 not in graph[n1]:
graph[n1][n2] = 0
if verbose:
ini = copy.deepcopy(graph)
fLOG("---------")
for k, v in sorted(graph.items()):
for kk, vv in sorted(v.items()):
if ini[k][kk] > 0:
fLOG(" {0} -> {1} : {2:03f}".format(k, kk, vv))
fLOG("---------")
# This array is filled by BFS and to store path
parent = {}
max_flow = 0 # There is no flow initially
def update_default(graph, u, v, path_flow):
graph[u][v] -= path_flow
graph[v][u] += path_flow
if update is None:
update = update_default
# Augment the flow while there is path from source to sink
iteration = 0
while self.bfs(graph, source, sink, parent):
iteration += 1
if fLOG:
fLOG("[edmonds_karp] max_flow={0}".format(max_flow))
# Find minimum residual capacity of the edges along the
# path filled by BFS. Or we can say find the maximum flow
# through the path found.
path_flow = float("Inf")
s = sink
while s != source:
path_flow = min(path_flow, graph[parent[s]][s])
s = parent[s]
# Add path flow to overall flow
max_flow += path_flow
# update residual capacities of the edges and reverse edges
# along the path
v = sink
while v != source:
u = parent[v]
update(graph, u, v, path_flow)
v = parent[v]
if iteration == 0:
raise ValueError("No path can increase max_flow.")
if verbose:
fLOG("---------")
for k, v in sorted(graph.items()):
for kk, vv in sorted(v.items()):
if ini[k][kk] != vv and ini[k][kk] > 0:
fLOG(
" {0} -> {1} : {2:03f} -- ini {3:03f}".format(k, kk, vv, ini[k][kk]))
fLOG("---", max_flow)
if fLOG:
fLOG("[edmonds_karp] max_flow={0}".format(max_flow))
return max_flow
```
#### File: ensae_teaching_cs/td_2a/homomorphic.py
```python
import random
class HomomorphicInt:
"""
Implements an "homomorphic integer".
"""
__slots__ = ['V', 'N', 'P', 'Q', 'E']
@staticmethod
def pgcd(a, b):
"""
Computes the :epkg:`PGCD`.
"""
while a != b:
d = abs(a - b)
c = min(a, b)
a, b = d, c
return a
@staticmethod
def lcm(a, b):
"""
Computes the least common multiple
(:epkg:`PPCM`).
"""
p = HomomorphicInt.pgcd(a, b)
return a * b // p
@staticmethod
def find_e(p, q):
"""
Finds one exposant for the :epkg:`RSA` encryption.
"""
c = HomomorphicInt.pgcd(p - 1, q - 1)
qn = (p - 1) * (q - 1) // c
i = 0
while True:
h = random.randint(2, qn - 1)
pg = HomomorphicInt.pgcd(h, qn)
if pg == 1:
e = HomomorphicInt(h, (p - 1) // c, q - 1, (h, h))
try:
ei = e.inv().V
except ZeroDivisionError:
i += 1
continue
h2 = random.randint(2, p * q - 1)
e2 = HomomorphicInt(h2, p, q, (h2, h2))
try:
ei2 = e2.inv().V
except ZeroDivisionError:
i += 1
continue
return (h, ei, h2, ei2)
i += 1
if i > 100:
raise ValueError(
"Unable to find a number prime with (p-1)(q-1).")
def __init__(self, value, p=673, q=821, e=None):
"""
@param value initial value
@param p p for RSA
@param q q for RSA
@param e e for RSA (e, and inverse e)
Other prime numbers can be found at
`The First 100,008 Primes <https://primes.utm.edu/lists/small/100000.txt>`_.
"""
self.N = p * q
self.P = p
self.Q = q
if self.N <= 2:
raise ValueError("p*q must be > 2")
self.V = value % self.N
if e is None:
self.E = HomomorphicInt.find_e(self.P, self.Q)
elif not isinstance(e, tuple):
raise TypeError("e must a tuple.")
else:
self.E = e
def new_int(self, v):
"""
Returns a @see cl HomomorphicInt with the same encrypted parameters.
"""
return HomomorphicInt(v, self.P, self.Q, self.E)
def __repr__(self):
"""
Usual
"""
return 'HomomorphicInt({},{},{},{})'.format(self.V, self.P, self.Q, self.E).replace(" ", "")
def __pow__(self, n):
"""
Power operator.
"""
if n == 0:
return HomomorphicInt(1, self.P, self.Q, self.E)
s = self.V
while n > 1:
s *= self.V
s %= self.N
n -= 1
return HomomorphicInt(s, self.P, self.Q, self.E)
def __add__(self, o):
"""
Addition.
"""
if self.N != o.N:
raise ValueError("{0} != {1}".format(self.N, o.N))
return HomomorphicInt(self.V + o.V, self.P, self.Q, self.E)
def __sub__(self, o):
"""
Soustraction.
"""
if self.N != o.N:
raise ValueError("{0} != {1}".format(self.N, o.N))
return HomomorphicInt(self.V - o.V, self.P, self.Q, self.E)
def __mul__(self, o):
"""
Multiplication.
"""
if self.N != o.N:
raise ValueError("{0} != {1}".format(self.N, o.N))
return HomomorphicInt(self.V * o.V, self.P, self.Q, self.E)
def inv(self):
"""
Inversion. This only works in all cases if *n* is a prime number.
We use :math:`a^{-1} \\equiv a^{n-2} \\mod n`.
The implementation can be improved (use binary decomposition) and cached.
"""
s = self.V
for i in range(1, self.N - 2):
s *= self.V
s %= self.N
if ((self.V * i) % self.N) == 1:
return HomomorphicInt(i, self.P, self.Q, self.E)
if ((s * self.V) % self.N) != 1:
raise ZeroDivisionError(
"Inverse of {0} does not exist.".format(self.V))
return HomomorphicInt(s, self.P, self.Q, self.E)
def __div__(self, o):
"""
Division, implies to find the inverse (so very costly).
"""
if self.N != o.N:
raise ValueError("{0} != {1}".format(self.N, o.N))
i = o.inv()
return HomomorphicInt(self.V * i.V, self.P, self.Q, self.E)
def crypt_mult(self):
"""
Crypt a number and preserve multiplication.
We use `RSA <https://fr.wikipedia.org/wiki/Chiffrement_RSA>`_.
"""
return self ** self.E[0]
def decrypt_mult(self):
"""
Decrypt a number and preserve multiplication.
"""
return self ** self.E[1]
def crypt_add(self):
"""
Simple permutation.
"""
return HomomorphicInt(self.V * self.E[2], self.P, self.Q, self.E)
def decrypt_add(self):
"""
Decrypt a number and preserve multiplication.
"""
return HomomorphicInt(self.V * self.E[3], self.P, self.Q, self.E)
```
#### File: ensae_teaching_cs/tests/american_cities.py
```python
import os
import pandas
from pyquickhelper.loghelper import fLOG
from ..faq.faq_matplotlib import graph_cities
from ..special import tsp_kruskal_algorithm, distance_haversine
def american_cities(df_or_filename, nb_cities=-1, img=None, fLOG=fLOG):
"""
Computes the :epkg:`TSP` for american cities.
@param df_or_filename dataframe
@param nb_cities number of cities to keep
@param img image to produce
@param fLOG logging function
@return dataframe (results)
"""
def haversine(p1, p2):
return distance_haversine(p1[0], p1[1], p2[0], p2[1])
if isinstance(df_or_filename, str):
df = pandas.read_csv(df_or_filename)
else:
df = df_or_filename
df["Longitude"] = -df["Longitude"]
df = df[df.Latitude < 52]
df = df[df.Longitude > -130].copy()
fLOG(df.columns)
df = df.dropna()
if nb_cities > 0:
df = df[:nb_cities].copy()
fLOG(df.shape)
points = [(row[1], row[2], row[3])
for row in df.itertuples(index=False)]
fLOG("number of cities:", len(points))
trip = tsp_kruskal_algorithm(
points, distance=haversine, fLOG=fLOG, max_iter=10)
# trip
dftrip = pandas.DataFrame(
trip, columns=["Latitude", "Longitude", "City"])
# graph
for i in range(0, dftrip.shape[0]):
if i % 10 != 0:
dftrip.loc[i, "City"] = ""
if img is not None:
import matplotlib.pyplot as plt
fig, ax = graph_cities(dftrip, markersize=3, linked=True, fLOG=fLOG,
fontcolor="red", fontsize='16', loop=True, figsize=(32, 32))
assert ax is not None
fig.savefig(img)
assert os.path.exists(img)
plt.close('all')
fLOG("end")
return dftrip
```
#### File: data/court_chemin/load_distance_matrix_dot.py
```python
import urllib
import os
import os.path
def charge_donnees(file="matrix_distance_7398.txt"):
if os.path.exists(file):
# si le fichier existe (il a déjà été téléchargé une fois)
f = open(file, "r")
text = f.read()
f.close()
else:
# si le fichier n'existe pas
link = "http://www.xavierdupre.fr/enseignement/td_python/" + \
"python_td_minute/data/court_chemin/" + file
url = urllib.urlopen(link)
text = url.read()
# on enregistre les données pour éviter de les télécharger une seconde
# fois
f = open(file, "w")
f.write(text)
f.close()
lines = text.split("\n")
lines = [l.split("\t") for l in lines if len(l) > 1]
return lines
def conversion_en_dictionnaire(lines):
res = {}
for a, b, c in lines:
c = int(c)
res[a, b] = c
res[b, a] = c
return res
def graphviz_script(mat_dict):
script = ["graph {"]
vertex = {}
villes = [_[0] for _ in mat_dict.keys()]
for v in villes:
if v not in vertex:
vertex[v] = len(vertex)
for k, v in vertex.iteritems():
script.append("%d [label=\"%s\"];" % (v, k))
for k, v in mat_dict.iteritems():
i1 = vertex[k[0]]
i2 = vertex[k[1]]
if i1 < i2 and v < 15000:
# on coupe des arcs car le tracé est trop long sinon
script.append("%d -- %d [label=\"%skm\"];" % (i1, i2, v / 1000))
script.append("}")
return "\n".join(script)
if __name__ == "__main__":
matrice_line = charge_donnees()
mat = conversion_en_dictionnaire(matrice_line)
f = open("graph.gv", "w")
f.write(graphviz_script(mat))
f.close()
print("nombre d'arcs ", len(mat))
os.system("dot -Tpng -o graph.png graph.gv")
```
#### File: data/lworld/lworld.py
```python
def build_graph():
relation = """
yolanda candace bette coleman <NAME> <NAME>
a<NAME> bette nadia
<NAME> eric
<NAME> helena isabella
<NAME> helena <NAME>
winnie helena catherine
amy jodi bette alice sean
april alice andrew
alice tasha eva-papi kit benjamin
hazel angus kit
gregg alice phyllis leonard
joyce phyllis
tayo alice lisa
uta alice nina brooke heather <NAME>
paige sh<NAME>
<NAME> carmen eva-papi
<NAME>
carmen <NAME> melissa
heather <NAME> l<NAME> alice tayo
lara d<NAME>
lara claybourne <NAME>
sh<NAME>
lacey shane paige
<NAME> gene
<NAME>
<NAME>-<NAME>
grace moira-max
<NAME>
becky tim
helena isabella
<NAME> helena winnie
helena catherine
<NAME>
"""
relation = [_.split() for _ in relation.split("\n")]
arcs = []
for rel in relation:
for i in range(1, len(rel)):
a = [rel[i - 1], rel[i]]
a.sort()
arcs.append(a)
arcs.sort()
temp = arcs
arcs = []
for t in temp:
if len(arcs) == 0 or t != arcs[-1]:
arcs.append(t)
for a in arcs:
print(",".join(a))
noeuds = {}
for a, b in arcs:
noeuds[a] = min(len(noeuds), noeuds.get(a, 100000))
noeuds[b] = min(len(noeuds), noeuds.get(b, 100000))
import sys
sys.path.append(r"D:\Dupre\_data\program\hal\hal_Python")
import hal_python as HAL
HAL.Begin()
vertices = [(b, a) for a, b in noeuds.iteritems()]
edges = [(noeuds[a], noeuds[b]) for a, b in arcs]
im = HAL.ArcGraphDraw(vertices, edges)
im.Display()
HAL.Pause()
"""
char *argv2 [6] = { "_graphviz_draw.exe",
".",
"../_hal_data/hal_graph/tmp_DrawGraph.graph",
"tmp_DrawGraph.png",
"png",
"neato"} ;
"""
if __name__ == "__main__":
build_graph()
```
#### File: _todo/programme/exemple_fourier.py
```python
import pygame
import pygame.mixer
import pygame.sndarray
import FFT
import math
import numpy as Numeric
import string
import copy
import pylab
import numpy
pygame.mixer.init ()
pygame.init ()
fourier = None
indice = None
def get_sound ():
"""charge le son sound010.wav"""
s = pygame.mixer.Sound ("sound010.wav")
t = pygame.sndarray.array (s)
return s
def play_sound(s):
"""joue un son"""
s.play ()
def convert_array(t, s):
"""joue un son decrit dans un tableau a une dimension"""
s = pygame.sndarray.array (s)
for i in range (0, len (s)) :
s [i]= t [i]
#tt = Numeric.array ([ [x, x] for x in t] )
#print tt [0:10]
s = pygame.sndarray.make_sound (s)
return s
def array_sound(s):
"""convertit un son en un tableau d'entiers"""
a = pygame.sndarray.array(s)
t = Numeric.array([i for i in xrange(0,len(a))])
for i in xrange(0,len(a)): t [i] = a [i][0]
return t
def dessine_son(mem,t,fourier,ind,a,b):
"""dessine une partie du son, limite la taille a 512"""
m = len (mem)
if m > 256 : m = 256
x = [ i for i in xrange (ind,ind+m) ]
y1 = [ mem[i] for i in xrange (ind,ind+m) ]
y2 = [ t[i] for i in xrange (ind,ind+m) ]
pylab.figure (1)
p1 = pylab.plot (x,y1)
p2 = pylab.plot (x,y2)
pylab.title ("Fourier")
pylab.xlabel ("frequence")
pylab.ylabel ("amplitude")
pylab.legend ( ("son", "son + filtre"))
m = len (fourier)
if m > 256 : m = 256
#x = [ i for i in xrange (0,m) ]
pylab.figure (2)
x = [ i for i in xrange (0,m) ]
y1 = [ abs(fourier[i]) for i in xrange (0,m) ]
y2 = []
for i in x :
if a <= i <= b : y2.append (450000.0)
else : y2.append (0.0)
p3 = pylab.plot (x,y1)
p4 = pylab.plot (x,y2)
pylab.legend ( ("fourrier", "filtre"))
pylab.show()
def filtre_son_extrait(t,a,b):
"""calcul de la transformee de Fourier, application du filtre [a,b],
recomposition du signal"""
fft = FFT.fft (t)
global fourier
if fourier == None and indice != None : fourier = copy.copy(fft)
for i in xrange(0,len(t)):
if a <= i <= b:
pass
else:
fft [i] = complex(0,0)
tt = FFT.inverse_fft(fft)
for i in xrange(0,len(t)):
t [i] = int(tt [i].real)
def filtre_son(t,a,b,div = 256):
"""filtre un son par tranche de div frequences, ne garde que les
frequences comprises entre a et b"""
global indice
nb = len (t) / div
for i in xrange (0,nb):
if i == nb / 2 : indice = i * div
ext = t [i * div : (i+1) * div]
filtre_son_extrait (ext,a,b)
def essai():
print "chargement du son"
s = get_sound ()
print "duree : ", s.get_length (), " secondes"
print "musique"
play_sound (s)
pygame.time.delay (6000)
t = array_sound (s)
mem = copy.copy(t)
print "nombre de donnees ", len (t)
print "duree d'une donnee ", s.get_length () * 1000 / len (t), "millisecondes"
rep = ""
if rep != "n" :
#rep = string.split (rep, ",")
#a = int (rep [0])
#b = int (rep [1])
a,b = 10,100
print "filtrage [%d,%d]" % (a,b)
filtre_son (t,a,b)
print "dessin des premiers instants, son filtre"
dessine_son (mem,t,fourier,indice, a,b)
print "son filtre"
s = convert_array (t, s)
play_sound (s)
pygame.time.delay (6000)
essai ()
```
#### File: _todo/programme/filedistance.py
```python
def get_lines (file) :
"""retourne toutes les lignes d'un fichier, nettoie les fins de lignes et les espaces"""
f = open (file, "r")
li = f.readlines ()
f.close ()
return [ l.strip ("\r\n") for l in li ]
def distance (line1, line2) :
"""construit une distance entre deux tableaux de lignes"""
d = { (-1,-1):(0,(-1,-1), "") }
for i in xrange (0, len (line1)) :
d [ i,-1 ] = (i+1, (i-1,-1), "+ " + line1 [i])
for j in xrange (0, len (line2)) :
d [ -1,j ] = (j+1, (-1,j-1), "- " + line2 [j])
for i in xrange (0, len (line1)) :
l1 = line1 [i]
for j in xrange (0, len (line2)) :
l2 = line2 [j]
c = abs (cmp (l1, l2))
i1 = d [i-1,j][0] + 1
i2 = d [i,j-1][0] + 1
i3 = d [i-1,j-1][0] + 2*c
if i1 <= min (i2, i3) :
d [i,j] = (i1, (i-1,j), "+ " + l1)
elif i2 <= min (i1, i3) :
d [i,j] = (i2, (i,j-1), "- " + l2)
else :
d [i,j] = (i3, (i-1,j-1), " " + l1)
last = (len (line1)-1, len (line2)-1)
pos = [d [last]]
pn = pos [0][1]
while pn != (-1,-1) :
p = pos [len (pos)-1]
pn = p [1]
pos.append (d [pn])
pos.pop ()
pos.reverse ()
return [ p [2] for p in pos ]
def distance_file (file1, file2) :
line1 = get_lines (file1)
line2 = get_lines (file2)
return distance (line1, line2)
if __name__ == "__main__" :
file1 = "filedistance.py"
file2 = "filedistance2.py"
res = distance_file (file1, file2)
for r in res :
print r
```
#### File: _unittests/ut_automation/test_publish.py
```python
import sys
import os
import unittest
from pyquickhelper.loghelper import noLOG
from pyquickhelper.pycode import ExtTestCase
from ensae_teaching_cs.automation import publish_teachings_to_web
class TestPublish(ExtTestCase):
def test_publish(self):
if sys.platform.startswith("win"):
letter = "d" if os.path.exists("d:") else "c"
location = letter + ":\\jenkins\\pymy\\%s\\%s%s\\dist\\%s"
else:
location = "/var/lib/jenkins/workspace/%s/%s%s/dist/%s"
rootw = "/www/htdocs/app/%s/%s"
google_id = "NOGOODID"
suffix = ("_UT_%d%d_std" % sys.version_info[:2],)
projects = publish_teachings_to_web("nologin", location=location, exc=False,
suffix=suffix, transfer=False,
fLOG=noLOG, google_id=google_id,
rootw=rootw)
n = 0
for _ in projects:
if "ensae_teaching_cs" not in _["local"]:
continue
self.assertIn("/helpsphinx", _["root_web"])
n += 1
self.assertGreater(n, 1)
if __name__ == "__main__":
unittest.main()
```
#### File: _unittests/ut_dnotebooks/test_label.py
```python
import os
import unittest
from pyquickhelper.loghelper import fLOG
class TestLabel(unittest.TestCase):
def test_label_bom(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fold = os.path.abspath(os.path.dirname(__file__))
fhel = os.path.join(
fold,
"..",
"..",
"_doc",
"sphinxdoc",
"build3",
"html",
"specials",
"algorithm_culture.html")
fhel = os.path.normpath(fhel)
if os.path.exists(fhel):
with open(fhel, "r", encoding="utf8") as f:
content = f.read()
self.assertTrue(".. _l-algoculture:</p>" not in content)
else:
fLOG("unable to test", fhel)
if __name__ == "__main__":
unittest.main()
```
#### File: _unittests/ut_dnotebooks/test_LONG_2A_notebook_eleves_2017.py
```python
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, add_missing_development_version
import ensae_teaching_cs
class TestNotebookEleves2017(unittest.TestCase):
def setUp(self):
add_missing_development_version(["pymyinstall", "pyensae", "pymmails", "jyquickhelper"],
__file__, hide=True)
def get_replacements(self):
return {"https://archive.ics.uci.edu/ml/machine-learning-databases/00222/":
"http://www.xavierdupre.fr/enseignement/complements/"}
def test_notebook_runner_eleves(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from ensae_teaching_cs.automation.notebook_test_helper import ls_notebooks, execute_notebooks
temp = get_temp_folder(__file__, "temp_notebook_eleves_2017")
keepnote = ls_notebooks("notebook_eleves/2017-2018")
self.assertTrue(len(keepnote) > 0)
execute_notebooks(temp, keepnote, (lambda i, n: True), fLOG=fLOG,
replacements=self.get_replacements(), dump=ensae_teaching_cs)
if __name__ == "__main__":
unittest.main()
```
#### File: _unittests/ut_faq/test_LONG_faq_matplotlib_video.py
```python
import sys
import os
import unittest
import warnings
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, fix_tkinter_issues_virtualenv
from ensae_teaching_cs.tests.american_cities import american_cities
class TestLONGFaqMatplotlibVideo(unittest.TestCase):
def test_all_american_cities(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if sys.version_info[:2] <= (3, 4):
warnings.warn(
"Issue with Python 3.4, bug probably related to wrong pointers")
return
fix_tkinter_issues_virtualenv()
temp = get_temp_folder(__file__, "temp_LONG_matplotlib_video")
name = os.path.join(temp, "..", "data", "american_cities.txt")
img = os.path.join(temp, "img.png")
res = american_cities(name, -1, img, fLOG)
assert res is not None
if __name__ == "__main__":
unittest.main()
```
#### File: _unittests/ut_homeblog/test_filename_helper.py
```python
import os
import unittest
from pyquickhelper.loghelper import fLOG
from ensae_teaching_cs.homeblog import music_statistics
class TestFilenameHelper(unittest.TestCase):
def test_TableFormulaCore_Excel(self):
fLOG(__file__, self._testMethodName,
OutputPrint=__name__ == "__main__")
this = os.path.abspath(os.path.dirname(__file__))
r = music_statistics(this)
assert isinstance(r, dict)
if __name__ == "__main__":
unittest.main()
```
#### File: _unittests/ut_ml/test_gini.py
```python
import unittest
import numpy
from pyquickhelper.pycode import ExtTestCase
from ensae_teaching_cs.ml.gini import gini
class TestGini(ExtTestCase):
def test_gini(self):
Y = numpy.array([1, 1, 1, 1, 1, 1])
g = gini(Y)
self.assertEqual(g, 0.5)
Y = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
g = gini(Y)
self.assertEqual(g, 0.9)
Y = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
g = gini(Y, X=numpy.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 11]))
self.assertEqual(g, 0.7)
if __name__ == "__main__":
unittest.main()
```
#### File: _unittests/ut_module/test_statnb_helper.py
```python
import os
import unittest
import pandas
from pyquickhelper.helpgen.stat_helper import enumerate_notebooks_link
from pyquickhelper.pycode import ExtTestCase
class TestHelpGenStatHelper(ExtTestCase):
def test_format_history(self):
this = os.path.abspath(os.path.dirname(__file__))
nb_folder = os.path.join(this, "..", "..", "_doc", "notebooks")
self.assertTrue(os.path.exists(nb_folder))
nb_doc = os.path.join(this, "..", "..", "_doc", "sphinxdoc", "source")
self.assertTrue(os.path.exists(nb_doc))
nb = 0
counts = {'title': 0}
nbfound = set()
rows = []
for ind, r in enumerate(enumerate_notebooks_link(nb_folder, nb_doc)):
rl = list(r)
rl[0] = None if r[0] is None else os.path.split(r[0])[-1]
rl[1] = os.path.split(r[1])[-1]
nb += 1
m = rl[2]
counts[m] = counts.get(m, 0) + 1
self.assertTrue(r[-2] is None or isinstance(r[-2], str))
self.assertTrue(r[-1] is None or isinstance(r[-1], str))
if r[-1] is not None:
counts["title"] += 1
nbfound.add(rl[1])
rows.append(rl[:2] + rl[-2:] + [r[1].split("_doc")[-1]])
if __name__ != "__main__" and ind > 30:
break
self.assertGreater(counts.get("ref", 0), 0)
# self.assertTrue(counts.get(None, 0) > 0)
self.assertNotEmpty(counts["title"])
self.assertGreater(len(nbfound), 5)
# self.assertIn("graph4exos.ipynb", nbfound)
# self.assertTrue(counts.get("refn", 0) > 0)
# self.assertTrue(counts.get("toctree", 0) > 0)
df = pandas.DataFrame(data=rows, columns=[
"rst", "ipynb", "link", "title", "path"])
name = os.path.join(os.path.dirname(__file__), "temp_notebook_rst.txt")
df = df[df.rst != "all_notebooks.rst"]
df.sort_values("ipynb").to_csv(name, sep="\t", index=False)
self.assertTrue(name)
if __name__ == "__main__":
unittest.main()
```
#### File: _unittests/ut_td_1a/test_edit_distance.py
```python
import unittest
from pyquickhelper.loghelper import fLOG
from ensae_teaching_cs.td_1a import edit_distance
class TestEditDistance(unittest.TestCase):
def test_edit_distance(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
d, p = edit_distance("distance", "dizstnce")
self.assertEqual(d, 2)
self.assertEqual(p, [(-1, -1), (0, 0), (1, 1), (1, 2),
(2, 3), (3, 4), (4, 4), (5, 5), (6, 6), (7, 7)])
def test_edit_distance_bug(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
s1 = ""
s2 = "*"
d, p = edit_distance(s1, s2)
fLOG(d, p)
s1 = ""
s2 = ("*** H *** H/ *** H *** H/ *** H *** H/ *** H *** H/ *** H *** H/ *** H ***" +
" H/ *** H *** H/ *** H *** H/ *** H *** H/ *** H *** H/ *** H *** H/ *** H *** H")
d, p = edit_distance(s1, s2)
fLOG(d, p)
if __name__ == "__main__":
unittest.main()
```
#### File: _unittests/ut_td_2a/test_parallel.py
```python
import unittest
import numpy
from pyquickhelper.loghelper import fLOG
from ensae_teaching_cs.td_2a import ParallelThread
class TestParallel(unittest.TestCase):
def test_parallel(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
def inv(m):
return numpy.linalg.inv(m)
nps = [[numpy.random.random((5, 5))] for i in range( # pylint: disable=E1101
0, 1000)] # pylint: disable=E1101
mm = ParallelThread.parallel(inv, nps, 10)
fLOG(len(mm))
self.assertEqual(len(mm), 1000)
if __name__ == "__main__":
unittest.main()
```
#### File: _unittests/ut_td_2a/test_serialize.py
```python
import os
import unittest
import io
import pandas
from ensae_teaching_cs.td_2a import load_object, dump_object
class TestSerialization(unittest.TestCase):
def test_serialize(self):
temp = os.path.abspath(os.path.dirname(__file__))
temp = os.path.join(temp, "temp_serialization")
if not os.path.exists(temp):
os.mkdir(temp)
df = pandas.DataFrame([{"name": "xavier", "school": "ENSAE"},
{"name": "antoine", "school": "ENSAE"}])
outfile = os.path.join(temp, "out_df.bin")
if os.path.exists(outfile):
os.remove(outfile)
dump_object(df, outfile)
assert os.path.exists(outfile)
df2 = load_object(outfile)
assert df.values.tolist() == df2.values.tolist()
s = io.BytesIO()
dump_object(df, s)
s.seek(0)
df3 = load_object(s)
assert df.values.tolist() == df3.values.tolist()
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jeromemaleski/raster-vision",
"score": 3
}
|
#### File: core/rv_pipeline/object_detection_config.py
```python
from enum import Enum
from rastervision.pipeline.config import register_config, Config, Field
from rastervision.core.rv_pipeline import RVPipelineConfig
from rastervision.core.data.label_store import ObjectDetectionGeoJSONStoreConfig
from rastervision.core.evaluation import ObjectDetectionEvaluatorConfig
class ObjectDetectionWindowMethod(Enum):
"""Enum for window methods
Attributes:
chip: the default method
"""
chip = 'chip'
@register_config('object_detection_chip_options')
class ObjectDetectionChipOptions(Config):
neg_ratio: float = Field(
1.0,
description=
('The ratio of negative chips (those containing no bounding '
'boxes) to positive chips. This can be useful if the statistics '
'of the background is different in positive chips. For example, '
'in car detection, the positive chips will always contain roads, '
'but no examples of rooftops since cars tend to not be near rooftops.'
))
ioa_thresh: float = Field(
0.8,
description=
('When a box is partially outside of a training chip, it is not clear if (a '
'clipped version) of the box should be included in the chip. If the IOA '
'(intersection over area) of the box with the chip is greater than ioa_thresh, '
'it is included in the chip.'))
window_method: ObjectDetectionWindowMethod = ObjectDetectionWindowMethod.chip
@register_config('object_detection_predict_options')
class ObjectDetectionPredictOptions(Config):
merge_thresh: float = Field(
0.5,
description=
('If predicted boxes have an IOA (intersection over area) greater than '
'merge_thresh, then they are merged into a single box during postprocessing. '
'This is needed since the sliding window approach results in some false '
'duplicates.'))
score_thresh: float = Field(
0.5,
description=
('Predicted boxes are only output if their score is above score_thresh.'
))
@register_config('object_detection')
class ObjectDetectionConfig(RVPipelineConfig):
chip_options: ObjectDetectionChipOptions = ObjectDetectionChipOptions()
predict_options: ObjectDetectionPredictOptions = ObjectDetectionPredictOptions(
)
def build(self, tmp_dir):
from rastervision.core.rv_pipeline.object_detection import ObjectDetection
return ObjectDetection(self, tmp_dir)
def get_default_label_store(self, scene):
return ObjectDetectionGeoJSONStoreConfig()
def get_default_evaluator(self):
return ObjectDetectionEvaluatorConfig()
```
|
{
"source": "jeromemartin/centreon-sdk-python",
"score": 2
}
|
#### File: configuration/common/centreonnotifyobject.py
```python
import centreonapi.webservice.configuration.common as common
from centreonapi.webservice.configuration.common import CentreonObject
from centreonapi.webservice.configuration.contact import Contact, ContactGroup
class CentreonNotifyObject(CentreonObject):
def __init__(self):
super(CentreonNotifyObject, self).__init__()
self.contacts = dict()
self.contactgroups = dict()
def getcontact(self):
"""
:return: state (True/False), contacts or error message
"""
state, cs = self.webservice.call_clapi(
'getcontact',
self._clapi_action,
self.reference)
if state:
if len(cs['result']) > 0:
for c in cs['result']:
c_obj = Contact(c)
self.contacts[c_obj.name] = c_obj
return state, self.contacts
else:
return state, None
else:
return state, cs
def addcontact(self, contacts):
c = "|".join(common.build_param(contacts, Contact))
return self.webservice.call_clapi(
'addcontact',
self._clapi_action,
self._prepare_values(c))
def setcontact(self, contacts):
c = "|".join(common.build_param(contacts, Contact))
return self.webservice.call_clapi(
'setcontact',
self._clapi_action,
self._prepare_values(c))
def deletecontact(self, contacts):
c = "|".join(common.build_param(contacts, Contact))
return self.webservice.call_clapi(
'delcontact',
self._clapi_action,
self._prepare_values(c))
def getcontactgroup(self):
state, cgs = self.webservice.call_clapi(
'getcontactgroup',
self._clapi_action,
self.reference)
if state:
if len(cgs['result']) > 0:
for c in cgs['result']:
cg_obj = ContactGroup(c)
self.contactgroups[cg_obj.name] = cg_obj
return state, self.contactgroups
else:
return state, None
else:
return state, cgs
def addcontactgroup(self, contactgroups):
c = "|".join(common.build_param(contactgroups, ContactGroup))
return self.webservice.call_clapi(
'addcontactgroup',
self._clapi_action,
self._prepare_values(c))
def setcontactgroup(self, contactgroups):
c = "|".join(common.build_param(contactgroups, ContactGroup))
return self.webservice.call_clapi(
'setcontactgroup',
self._clapi_action,
self._prepare_values(c))
def deletecontactgroup(self, contactgroups):
c = "|".join(common.build_param(contactgroups, ContactGroup))
return self.webservice.call_clapi(
'delcontactgroup',
self._clapi_action,
self._prepare_values(c))
```
|
{
"source": "JeromeMberia/MyBlog",
"score": 3
}
|
#### File: MyBlog/tests/test_post.py
```python
import unittest
from app.models import Post
class PostModelTest(unittest.TestCase):
def setUp(self):
self.new_post = Post(id = 1, title ='cool' ,content='It boring' ,user_id= 2)
def test_instance(self):
self.assertTrue(isinstance(self.new_post,Post))
def test_variables(self):
self.assertEquals(self.new_post.id, 1)
self.assertEquals(self.new_post.title, 'cool')
self.assertEquals(self.new_post.content, 'It boring')
self.assertEquals(self.new_post.user_id, 2)
```
|
{
"source": "JeromeMberia/Taarifa_Leo",
"score": 3
}
|
#### File: app/main/view.py
```python
from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_source,get_article,get_category
from ..models import Article
from ..models import Source
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
source = get_source()
title = 'News Live'
return render_template('index.html', title = title, source = source)
@main.route('/article/<article_id>')
def article(article_id):
article = get_article(article_id)
title = f'{article_id}'
return render_template('article.html',id = article_id,title = title,article = article)
@main.route('/category/<cat_name>')
def category(cat_name):
category = get_category(cat_name)
print (category)
title = f'{cat_name}'
return render_template('category.html',title = title, category = category)
```
|
{
"source": "JeromeMutgeert/da-faster-rcnn",
"score": 3
}
|
#### File: da-faster-rcnn/TargetDataLoaderProcess/data_loader.py
```python
import aiohttp
import asyncio
import async_timeout
import os
import numpy as np
import time
import cv2
# Settings:
Buffer = 100
BackBuffer = 20
Max_Query = 12
Timeout = 40
url_base = "https://test.yisual.com/images/media/download/picturethis/"
headers = {"api-key": "ccea03e0e3a08c428870393376e5cf7b7be7a55c", "api-secret": os.environ["SECRET"]}
cacheLoc = "/media/jerome/DATA/Study_d/ThesisD/TargetData/"
# dummy_im_id = "5461e5219da59bde29aed195"
# dummy_url = url_base + dummy_im_id
# counter txt's interface:
def update_fetched(fetched):
with open("fetched_temp.txt",'w') as f:
f.write(str(fetched))
f.flush()
os.fsync(f.fileno())
# atomic:
os.rename("fetched_temp.txt","fetched.txt")
def get_read():
global read
with open("read.txt",'r') as f:
numstr = f.read()
read = int(numstr)
return read
def to_filename(im_id):
return "target_{}.jpg".format(im_id)
def append_log(msg):
with open("log.txt",'a') as f:
f.write(str(time.time()) + ' :\t')
f.write(str(msg) + '\n')
async def download_coroutine(session, im_id, im_num):
# im_id = "5c59addcb71ee102f1e439ba"
cache = cacheLoc + im_id + '.jpg'
filename = to_filename(im_num)
if os.path.exists(cache):
# copy from cache:
os.system('cp {} ./{}'.format(cache,filename))
return
url = url_base + im_id
im = None
problematic = False
while type(im) == type(None):
try:
with async_timeout.timeout(Timeout):
async with session.get(url,headers=headers) as response:
with open(filename, 'wb') as f_handle:
while True:
chunk = await response.content.read(1024)
if not chunk:
# print('done')
break
f_handle.write(chunk)
f_handle.flush()
os.fsync(f_handle.fileno())
res = await response.release()
# Verify if download was succesfull:
im = cv2.imread(filename)
if type(im) == type(None):
problematic = True
append_log("{} {} Incorrect download.".format(im_num,im_id))
print("{} {} Incorrect download.".format(im_num,im_id))
except:
problematic = True
append_log("Downloading timed out, retrying {} {}".format(im_num,im_id))
print("Downloading timed out, retrying {} {}".format(im_num,im_id))
if problematic:
append_log("Succeeded! {} {}".format(im_num,im_id))
# Finally:
if os.path.exists(cacheLoc):
os.system('cp {} {}'.format(filename,cache))
return res
async def get_batch(loop,im_ids,im_nums):
async with aiohttp.ClientSession(loop=loop) as session:
tasks = [download_coroutine(session, im_id, im_num) for im_id,im_num in zip(im_ids,im_nums)]
await asyncio.gather(*tasks)
if __name__ == "__main__":
# init gobals
present = []
fetched = 0
read = 0
removed = 0
idle_count = 0
# init/reset protocol files:
update_fetched(0)
with open("read.txt",'w') as f:
f.write('0')
f.flush()
os.fsync(f.fileno())
ids = []
with open("ids.txt",'r') as f:
ids = [i.strip() for i in f.readlines()]
def shuffle(ids, epoch):
np.random.shuffle(ids)
filename = "ids_ep{}.txt".format(epoch)
with open(filename,'w') as f:
f.write('\n'.join(ids))
f.flush()
os.system('cp {} ids_current.txt'.format(filename))
def id_generator():
i = 0
epoch = 0
shuffle(ids,epoch)
while True:
yield ids[i]
i += 1
if i == len(ids):
i = 0
epoch += 1
shuffle(ids,epoch)
print("Loaded epoch {}".format(epoch))
id_gen = id_generator()
append_log("Starting")
while True:
# update read
read = get_read()
# print(fetched,read,removed)
# refill:
if (fetched - read) < Buffer:
# TODO: determine next imgs:
load_N = read + Buffer - fetched
load_N = min(load_N,Max_Query)
im_nums = [str(i) for i in range(fetched,fetched+load_N)]
im_ids = [next(id_gen) for _ in range(fetched,fetched+load_N)]
loop = asyncio.get_event_loop()
loop.run_until_complete(get_batch(loop,im_ids,im_nums))
# done fetching
fetched += load_N
present.extend(im_nums)
# broadcast
update_fetched(fetched)
idle_count = 0
else:
# we're all set
# Check for exitting:
stop = False
idle_count += 1
if idle_count > 1000: # about 3 mins idle
append_log("Idle time-out. Exiting.")
stop = True
if (fetched - read) > Buffer: # read.txt has decreased:
append_log("Read.txt has decreased. Exiting.")
stop = True
if stop:
for im_num in present:
os.remove(to_filename(im_num))
exit()
# sleep a bit to avoid spinning.
time.sleep(.2)
# remove
while removed < (read - BackBuffer):
try:
im_num = present[0]
except:
append_log("Non-existing file reported as read. Exiting.")
exit()
present = present[1:]
try:
os.remove(to_filename(im_num))
except:
append_log("While removing: File not found: {}".format(to_filename(im_num)))
print("While removing: File not found: {}".format(to_filename(im_num)))
removed += 1
```
|
{
"source": "JeromeMutgeert/Detectron-DA-Faster-RCNN",
"score": 2
}
|
#### File: detectron/modeling/PADA.py
```python
import numpy as np
from detectron.core.config import cfg
# import matplotlib
# import matplotlib.pyplot as plt
from tools.analyse_detects import coco_classes, KL_div
def print_dist(dist,name):
order = np.argsort(dist)[::-1]
dist = dist[order]
classes = coco_classes[order]
print(name)
for w,c in list(zip(dist,classes))[:]:
print("p = ({:.3f}):{}".format(w,c))
class ClassWeightDB(object):
def __init__(self,weight_db=None,conf_matrix=None,ns=None,fg_acc=(None,),weighted_fg_acc=(None,)):
self.weight_db = weight_db
self.total_sum_softmax = None
self.class_weights = None
self.gt_ins_dist = None
self.starting_dist = None
self.avg_pada_weight = 0
# self.prepared = False
# self.maxes = None
self.conf_matrix = conf_matrix
self.conf_col_avgs = ns
self.fg_acc = fg_acc
self.weighted_fg_acc = weighted_fg_acc
def setup(self,roi_data_loader):
source_roidb = roi_data_loader._roidb
target_roidb = roi_data_loader._target_roidb
continuing = self.weight_db is not None
if continuing:
print("weight_db taken from checkpoint")
gt_ins_counts = np.bincount([cls_idx
for rois in source_roidb
for cls_idx, crowd in zip(rois['gt_classes'],rois['is_crowd'])
if not crowd],
minlength=cfg.MODEL.NUM_CLASSES)
n_instances = gt_ins_counts.sum()
self.gt_ins_dist = gt_ins_counts/float(n_instances) + np.finfo(float).eps
print_dist(self.gt_ins_dist,'gt_ins_dist')
if not continuing:
self.weight_db = np.concatenate([rois['sum_softmax'][None,:] for rois in target_roidb],axis=0)
else:
self.starting_dist = np.sum([rois['sum_softmax'] for rois in target_roidb],axis=0)
self.starting_dist /= self.starting_dist.sum()
print("ClassWeightDB initiated with weight db of shape {}".format(self.weight_db.shape))
self.total_sum_softmax = self.weight_db.sum(axis=0)
self.class_weights = self.total_sum_softmax / self.total_sum_softmax.max()
# w = self.class_weights
# print('absolute weights: mean,min,max,median:',w.mean(),w.min(),w.max(),np.median(w))
if not continuing:
self.starting_dist = self.class_weights/self.class_weights.sum()
# print_dist(self.starting_dist,name='starting_dist')
self.class_weights = self.class_weights / self.gt_ins_dist
self.class_weights /= self.class_weights.max()
w = self.class_weights
print('pada weights: mean,min,max,median:',w.mean(),w.min(),w.max(),np.median(w))
print_dist(self.class_weights,'Corrected pada weights')
avg_pada_weight = (self.class_weights * self.gt_ins_dist).sum()
print("Weighted avg pada weight (by gt dist):", avg_pada_weight)
self.avg_pada_weight = avg_pada_weight
# init confusion matrix
nclasses = len(self.class_weights)
if self.conf_matrix is None:
self.conf_matrix = np.eye(nclasses)
ns = [1000] * nclasses if self.conf_col_avgs is None else self.conf_col_avgs
self.conf_col_avgs = [(c,RollingAvg(2000, avg_init=self.conf_matrix[:, c], n_init=ns[c])) for c in range(nclasses)]
# if self.fg_acc is None:
self.fg_acc = RollingAvg(10000,*self.fg_acc)
self.weighted_fg_acc = RollingAvg(10000,*self.weighted_fg_acc)
def update_class_weights(self,im_idx,sum_softmax):
# Update the weight_db, and apply the diff to the total_sum_softmax
prev_sum_softmax = self.weight_db[im_idx].copy()
self.weight_db[im_idx] = sum_softmax
# print('NormalizedMeanSquaredUpdate:',((prev_sum_softmax - sum_softmax)**2).mean()/prev_sum_softmax.sum(),prev_sum_softmax.sum(),sum_softmax.sum(),im_idx)
self.total_sum_softmax += sum_softmax - prev_sum_softmax
# map the sum_softmax'es to the expected gt space:
gt_sum_softmax = np.matmul(self.conf_matrix,self.total_sum_softmax[:,None])[:,0]
gt_sum_softmax[0] = 0.0 # discard confusion with bg
# correct for source gt dist and normalize (so that class_weights * gt_ins_dist \propto the target gt dist)
self.class_weights = gt_sum_softmax / self.gt_ins_dist
self.class_weights /= self.class_weights.max()
# update avg_pada_weight
self.avg_pada_weight = (self.class_weights * self.gt_ins_dist).sum()
def update_confusion_matrix(self,probs,labels):
nrois, nclasses = probs.shape
sel = labels > -1 # labels of -1 are instances that are not supervised
probs = probs[sel,:]
labels = labels[sel]
nrois = len(labels)
one_hot_labels = np.zeros((nclasses,nrois),dtype=np.float32)
one_hot_labels[labels,np.arange(nrois)] = 1.0 #maxes
# print(one_hot_labels.shape)
# compose the confusion matrix for the current predictions
pij = np.matmul(one_hot_labels,probs)
# normalize over first dim, so each column is a distribution.
total_weights = pij.sum(axis=0)
zeroed_cls = np.where(total_weights == 0.0)
total_weights[zeroed_cls] = -1
pij /= total_weights[None,:] # normalisation such that pij[i,j] = P(gt=i|pred=j)
# update each column with
for (c,col),w in zip(self.conf_col_avgs,total_weights):
if w > 0:
self.conf_matrix[:,c] = col.update_and_get(pij[:,c],weight=w)
sel = labels > 0 # only confuse fg classes.
# maxes = maxes[sel]
probs = probs[sel,:]
labels = labels[sel]
# nrois = len(labels)
corrects = probs.argmax(axis=1) == labels
fg_accuracy = corrects.sum() / float(len(labels))
# print('Foreground accuracy: {} ({}/{})'.format(fg_accuracy,correct,len(labels)))
self.fg_acc.update_and_get(fg_accuracy,len(labels))
weights = self.class_weights[labels]
wsum = weights.sum()
w_fg_accuracy = (corrects * weights).sum() / wsum
self.weighted_fg_acc.update_and_get(w_fg_accuracy,wsum)
print('fg instances: {} ({})'.format(len(labels),wsum))
def get_avg_pada_weight(self):
return self.avg_pada_weight
def get_dist(self):
current_dist = self.class_weights * self.gt_ins_dist
current_dist /= current_dist.sum()
return current_dist
def get_KL_to_init(self):
return KL_div(self.get_dist(),self.starting_dist)
def get_state(self):
return \
self.weight_db, \
self.conf_matrix, \
np.array([avg.n for _,avg in self.conf_col_avgs]), \
np.array([self.fg_acc.get(), self.fg_acc.n]), \
np.array([self.weighted_fg_acc.get(), self.weighted_fg_acc.n])
class RollingAvg(object):
def __init__(self, max_sample_size, avg_init=None, n_init=None):
self.n = 0
self.max_n = max_sample_size
self.sum = 0.0
self.avg = 0.0
if avg_init is not None:
if n_init is not None:
self.n = n_init
else:
self.n = self.max_n
self.sum = avg_init * self.n
if self.n != 0:
self.avg = self.sum / self.n
def update_and_get(self,sample,weight = 1):
if (self.n + weight) < self.max_n:
self.n += weight
self.sum += sample * weight
elif self.n < self.max_n:
diff = (self.max_n - self.n)
self.sum += sample * diff
self.n = self.max_n
weight = weight - diff
if self.n >= self.max_n:
self.sum = self.sum * (self.n - weight) / self.n + sample * weight
self.avg = self.sum / self.n
return self.avg
def get(self):
return self.avg
class DAScaleFading(object):
"""Fading-in the adversarial objective according the way of DANN:
http://sites.skoltech.ru/compvision/projects/grl/files/paper.pdf
The formula for the weight given the progression p from 0 to 1 is:
2 / (1 + exp(- gamma * p) -1
where gamma is chosen by the autors as 10 and kept fixed across experiments."""
def __init__(self,max_iter,gamma=10.0):
self.max_iter = float(max_iter)
self.gamma = float(gamma)
self.it = 0
self.weight = 0.
self.set_iter(self.it)
def set_iter(self,it):
self.it = it
self.weight = 2 / (1 + np.exp(-self.gamma * float(it) / self.max_iter)) - 1
def get_weight(self):
return self.weight
```
#### File: Detectron-DA-Faster-RCNN/tools/analyse_detects.py
```python
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import argparse
# import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import os
# import pprint
# import sys
# import time
# from caffe2.python import workspace
# from detectron.core.config import assert_and_infer_cfg
# from detectron.core.config import cfg
# from detectron.core.config import merge_cfg_from_file
# from detectron.core.config import merge_cfg_from_list
# from detectron.core.test_engine import run_inference
from detectron.datasets.dummy_datasets import get_coco_dataset
from detectron.utils.io import load_object
from detectron.utils.logging import setup_logging
# import detectron.utils.c2 as c2_utils
# c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
# cv2.ocl.setUseOpenCL(False)
def parse_args():
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
# parser.add_argument(
# '--vis', dest='vis', help='visualize detections', action='store_true'
# )
# parser.add_argument(
# '--multi-gpu-testing',
# dest='multi_gpu_testing',
# help='using cfg.NUM_GPUS for inference',
# action='store_true'
# )
parser.add_argument(
'--class_weights',
dest='class_weights',
help='class distribution file by summed softmax outputs (of the source set)',
default=None,
type=str,
nargs=1
)
parser.add_argument(
'--voc_class_weights',
dest='voc_class_weights',
help='class distribution file by summed softmax outputs for the target set',
default=None,
type=str,
nargs=3
)
parser.add_argument(
'--target_class_weights',
dest='target_class_weights',
help='class distribution file by summed softmax outputs for the target set',
default=None,
type=str,
nargs=1
)
parser.add_argument(
'--target_divergence',
dest='target_divergence',
help='the desired KL-divergence between source and voc',
default=None,
type=float,
nargs=1
)
parser.add_argument(
'--features',
dest='feats',
help='feature vector collection for t-sne visualisation',
default=None,
type=str,
nargs=1
)
parser.add_argument(
'--do_val_set',
dest='do_val_set',
action='store_true'
)
# # This allows to set all fields of cfg. Use to set TEST.WEIGTHS and optionally override NUM_GPUS:
# parser.add_argument(
# 'opts',
# help='See detectron/core/config.py for all options',
# default=None,
# nargs=argparse.REMAINDER
# )
# if len(sys.argv) == 1:
# parser.print_help()
# sys.exit(1)
return parser.parse_args()
def plt_bars(counts,labels,counts2=None,reversed=True,ax=None,eps=np.finfo(float).eps,figsize=(13,7),log=None,**kwargs):
if ax is None:
fig,ax = plt.subplots(figsize=figsize)
order = counts.argsort()
if counts2 is not None:
ratios = -(counts2 * np.log(counts/(counts2 + eps) + eps) + (1 - counts2) * np.log((1 - counts)/(1 - counts2 + eps) + eps))
print(ratios[1:].sum())
order = ratios.argsort()
if reversed:
order = order[::-1]
x = np.arange(len(labels))
ax.bar(x,counts[order],**kwargs)
if counts2 is not None:
ax.bar(x,counts2[order],alpha=.4,**kwargs)
plt.xticks(x,labels[order],rotation=90)
plt.ylabel('Probability')
if log:
ax.set_yscale('log')
fig.subplots_adjust(bottom=.23)
coco_classes = np.array(get_coco_dataset().classes.values(),dtype=str)
def plot_dists(coco_dist, yisual_dist, source_name,target_name):
plt_bars(coco_dist,coco_classes,yisual_dist)
plt.title('Class distributions of detections: KL( {} || {} ) = {:.4f}'.format(target_name,source_name,KL_div(yisual_dist,coco_dist)))
plt.legend([source_name,target_name])
plt.show()
def plot_dist(coco_dist,name=None,log=None):
plt_bars(coco_dist,coco_classes,log=log)
if name is not None:
plt.legend([name])
plt.show()
def KL_div(target,source,eps=np.finfo(float).eps):
# We take KL(target||source), the distance of the source distribution from the pespective of the target distribution.
# assert len(target.shape) == 1 or len(source.shape) == 1
if len(target.shape) == len(source.shape):
return -(target * np.log(source/(target + eps) + eps)).sum()
elif len(target.shape) == 2:
assert len(source.shape) == 1
return -(target * np.log(source[None,:]/(target + eps) + eps)).sum(axis=1)
elif len(source.shape) == 2:
assert len(target.shape) == 1
target = target[None,:]
return -(target * np.log(source/(target + eps) + eps)).sum(axis=1)
else:
assert False,"Weird shapes received"
def get_dist(wts):
dist = wts.sum(axis=0)
dist /= dist.sum()
return dist
# def plot_dist(dist,dist2=None):
# fig,ax = plt.subplots()
# plt_bars(dist, coco_classes, dist2, ax=ax)
# plt.show()
if __name__ == '__main__':
# workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
logger = setup_logging(__name__)
args = parse_args()
logger.info('Called with args:')
logger.info(args)
assert args.class_weights is not None, 'class_weights file required'
print(str(args.class_weights[0]))
coco_wts = load_object(args.class_weights[0])
print(coco_wts.shape)
coco_dist = get_dist(coco_wts)
print("coco size: ",coco_wts.sum())
assert args.voc_class_weights is not None, 'voc dist files needed'
voc_files = args.voc_class_weights
if args.do_val_set:
voc_files = ['collecting/test/voc_2007_test/generalized_rcnn/class_weights.pkl',
'collecting/test/voc_2012_val/generalized_rcnn/class_weights.pkl']
voc_wts = np.concatenate([load_object(vocfile) for vocfile in voc_files], axis=0)
print(voc_wts.shape)
voc_dist = get_dist(voc_wts)
print('voc size:',voc_wts.sum())
assert args.target_class_weights is not None
# source_wts = wts
# wts = load_object(args.target_class_weights[0])
# print('voc weights overloaded with targets')
yisual_wts = load_object(args.target_class_weights[0])
yisual_dist = get_dist(yisual_wts)
print('yisual size:',yisual_wts.sum())
sets = [(coco_wts,'coco'), (voc_wts, 'voc'), (yisual_wts, 'yisual')]
pairs = [(sets[0],sets[1]),(sets[0],sets[2]),(sets[1],sets[2])]
for set1,set2 in pairs:
for s1,s2 in [(set1,set2),(set2,set1)]:
wts_source,source_name = s1
wts_target,target_name = s2
dist_source, dist_target = get_dist(wts_source), get_dist(wts_target)
kl_div = KL_div(dist_target,dist_source)
print("KL({}||{}) = {}".format(target_name,source_name,kl_div))
plot_dists(dist_source,dist_target,source_name,target_name)
# source_dist = get_dist(source_wts)
#
# # # plot_dists:
# plt_bars(source_dist,coco_classes,get_dist(wts))
# plt.title('Class distribution of detections')
# plt.legend(['coco train','yisual train'])
# plt.show()
#
# voc_dist = get_dist(wts)
# print(KL_div(voc_dist,source_dist))
# # print(KL_div(voc_dist,source_dist))
#
# detects = wts.sum(axis=1)
# voc_mean_detect = detects.mean()
# source_detects = source_wts.sum(axis=1)
# source_mean_detect = source_detects.mean()
# print('mean amount of detections per image: source: {} target: {}'.format(source_mean_detect,voc_mean_detect))
# Filtering VOC
def remove_from_dist(dist,subdists,portions):
aportions = (1 - portions)
return (dist[None,:]/aportions[:,None]) - (portions/aportions)[:,None] * subdists
return dist/aportion - (portion/aportion) * subdist
new_dist = (dist - portion * subdist)
# 'new_dist/(1-portion)' would be a correct answer. The following answer has more numerical stability: re-normalizing.
return new_dist / new_dist.sum()
def score_fn(proposed,prop_weight):
"""The function to be maximized by (greedy) subset selection"""
return np.log(KL_div(proposed,coco_dist)) - np.log(KL_div(yisual_dist,proposed))
# return KL_div(proposed,coco_dist)/divergences_coco[-1] - KL_div(yisual_dist,proposed)/divergences[-1] #+ prop_weight/total_weight
# return prop_weight**2 *KL_div(proposed,coco_dist)/KL_div(yisual_dist,proposed)
# normalise per-image:
im_weights = voc_wts.sum(axis=1)
total_weight = im_weights.sum()
min_weight = .55 * total_weight
im_dists = voc_wts / im_weights[:, None]
# scores = [ KL_div(yisual_dist,im_dist) - KL_div(im_dist,coco_dist) for im_dist in im_dists] #
# order = np.argsort(scores)[::-1]
N = int(len(im_weights)*1.15)
weights = [total_weight]
divergences = [KL_div(yisual_dist,voc_dist)]
divergences_coco = [KL_div(voc_dist,coco_dist)]
dists = [voc_dist[:]]
dists = np.empty((N+1,81),dtype=np.float32)
dists[0,:] = voc_dist[:]
shown = False
start_score = score_fn(voc_dist,total_weight)
# for i in order:
# im_dist = im_dists[i]
# im_weight = im_weights[i]
# voc_dist = remove_from_dist(voc_dist,im_dist,im_weight/total_weight)
# total_weight -= im_weight
# weights.append(total_weight)
# divergences.append(KL_div(yisual_dist,voc_dist))
# divergences_coco.append(KL_div(voc_dist,coco_dist))
# dists.append(voc_dist[:])
#
# if total_weight < weights[0]/2 and not shown: # if half-way:
# plot_dists(coco_dist,voc_dist,'voc_sub','coco')
# plot_dists(coco_dist,yisual_dist,'coco','yisual')
# plot_dists(voc_dist,yisual_dist,'voc_sub','yisual')
# shown = True
removed = np.full(len(im_weights), False, dtype=bool)
choices = []
remains = len(im_weights)
ns = [remains]
undos = 0
prev_i = -1
best_score = -np.infty
for n in range(N):
if n % 100 == 0:
print(n)
if undos != 0:
print('undos:',undos)
undos = 0
prop_dists = remove_from_dist(voc_dist,im_dists,im_weights/total_weight)
scores = (score_fn(prop_dists,total_weight - im_weights) - score_fn(voc_dist,total_weight)) / np.abs(im_weights) #normalized by im_weights
if total_weight < min_weight:
scores[~removed] = -np.infty
i = np.argmax(scores)
if i == prev_i:
break
prev_i = i
remains += 1 if removed[i] else -1
undos += removed[i]
if removed[i]:
print('undo ({})'.format(undos))
if remains == 0:
break
choices.append(i)
removed[i] = ~removed[i] #flip, on or off.
im_dist = im_dists[i]
im_weight = im_weights[i]
im_weights[i] = -im_weights[i] # flip, on or off. Removing with Negative im weight equals adding again.
# voc_dist = remove_from_dist(voc_dist,im_dist,im_weight/total_weight)
voc_dist = prop_dists[i]
voc_dist /= voc_dist.sum() # for numeric stability
total_weight -= im_weight
weights.append(total_weight)
divergences.append(KL_div(yisual_dist,voc_dist))
divergences_coco.append(KL_div(voc_dist,coco_dist))
# print(len(voc_dist[:]))
dists[n+1,:] = voc_dist[:]
ns.append(remains)
score = score_fn(voc_dist,total_weight)
if score > best_score:
best_score = score
best_dist = voc_dist
best_removed = removed[:]
else:
print('nope',n)
# if total_weight < weights[0]/2 and not shown: # if half-way:
# plot_dists(coco_dist,voc_dist,'coco','voc_sub')
# plot_dists(coco_dist,yisual_dist,'coco','yisual')
# plot_dists(voc_dist,yisual_dist,'voc_sub','yisual')
# shown = True
removed = best_removed
voc_dist = best_dist
# if args.do_val_set:
# np.save('voc_subset_val.npy',~removed)
# else:
# np.save('voc_subset.npy',~removed)
# Some score analysis for the log-KL-divergence-sum-score:
diff = best_score-start_score
factorsum = np.exp(diff)
coco_div_improve = KL_div(voc_dist,coco_dist)/KL_div(dists[0,:],coco_dist)
yisual_div_improve = KL_div(yisual_dist,dists[0,:])/KL_div(yisual_dist,voc_dist)
print('Score impovement: {} -> {}, diff: {} (log-space), {} (factor space), KL(voc||coco)) *= {}, KL(yisual||voc) /= {}'.format(
start_score,best_score,diff,factorsum,coco_div_improve,yisual_div_improve))
print(remains,float(remains)/len(im_weights))
plot_dists(coco_dist,voc_dist,'coco','voc_sub')
plot_dists(coco_dist,yisual_dist,'coco','yisual')
plot_dists(voc_dist,yisual_dist,'voc_sub','yisual')
plt.figure(figsize=(30,20))
plt.plot(weights,divergences)
plt.plot(weights,divergences_coco)
ns = (np.array(ns))*weights[0]/im_dists.shape[0]
plt.plot(ns,divergences)
plt.plot(ns,divergences_coco)
nops = np.linspace(weights[0],weights[-1],len(weights))
plt.plot(nops,ns/weights[0])
plt.plot(nops,divergences)
plt.plot(nops,divergences_coco)
plt.plot([weights[0],weights[-1]],[KL_div(yisual_dist,coco_dist)]*2)
plt.ylim(0,1)
plt.show()
# plt.figure(figsize=(30,20))
# nimgs = np.arange(len(weights))[::-1] + 1
# plt.plot(nimgs,divergences)
# plt.plot(nimgs,divergences_coco)
# plt.plot([nimgs[0],nimgs[-1]],[KL_div(yisual_dist,coco_dist)]*2)
# plt.show()
```
|
{
"source": "jerome-nexedi/Zope",
"score": 3
}
|
#### File: ZTUtils/tests/testZope.py
```python
from unittest import TestCase, makeSuite
import urllib
from ZTUtils.Zope import make_query, complex_marshal
from ZTUtils.Zope import make_hidden_input
from DateTime import DateTime
class QueryTests(TestCase):
def testMarshallLists(self):
'''Test marshalling lists'''
test_date = DateTime()
list_ = [1, test_date, 'str']
result = complex_marshal([('list',list_),])
assert result == [('list', ':int:list', 1),
('list', ':date:list', test_date),
('list', ':list', 'str')]
def testMarshallRecords(self):
'''Test marshalling records'''
test_date = DateTime()
record = {'arg1': 1, 'arg2': test_date, 'arg3': 'str'}
result = complex_marshal([('record',record),])
assert result == [('record.arg1', ':int:record', 1),
('record.arg2', ':date:record', test_date),
('record.arg3', ':record', 'str')]
def testMarshallListsInRecords(self):
'''Test marshalling lists inside of records'''
test_date = DateTime()
record = {'arg1': [1, test_date, 'str'], 'arg2': 1}
result = complex_marshal([('record',record),])
assert result == [('record.arg1', ':int:list:record', 1),
('record.arg1', ':date:list:record', test_date),
('record.arg1', ':list:record', 'str'),
('record.arg2', ':int:record', 1)]
def testMakeComplexQuery(self):
'''Test that make_query returns sane results'''
test_date = DateTime()
quote_date = urllib.quote(str(test_date))
record = {'arg1': [1, test_date, 'str'], 'arg2': 1}
list_ = [1, test_date, 'str']
date = test_date
int_ = 1
str_ = 'str'
query = make_query(date=test_date, integer=int_, listing=list_,
record=record, string=str_)
assert query == 'date:date=%s&integer:int=1&listing:int:list=1&listing:date:list=%s&listing:list=str&string=str&record.arg1:int:list:record=1&record.arg1:date:list:record=%s&record.arg1:list:record=str&record.arg2:int:record=1'%(quote_date,quote_date,quote_date)
def testMakeHiddenInput(self):
tag = make_hidden_input(foo='bar')
self.assertEqual(tag, '<input type="hidden" name="foo" value="bar">')
tag = make_hidden_input(foo=1)
self.assertEqual(tag, '<input type="hidden" name="foo:int" value="1">')
# Escaping
tag = make_hidden_input(foo='bar & baz')
self.assertEqual(tag, '<input type="hidden" name="foo" value="bar & baz">')
tag = make_hidden_input(foo='<bar>')
self.assertEqual(tag, '<input type="hidden" name="foo" value="<bar>">')
tag = make_hidden_input(foo='"bar"')
self.assertEqual(tag, '<input type="hidden" name="foo" value=""bar"">')
def test_suite():
return makeSuite(QueryTests)
```
|
{
"source": "JeromeParadis/django-dynasite",
"score": 2
}
|
#### File: django-dynasite/dynasite/cache.py
```python
from __future__ import unicode_literals
import hashlib
import re
import time
from django.conf import settings
from django.core.cache import caches
from django.utils.encoding import iri_to_uri, force_bytes, force_text
from django.utils.http import http_date
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
from django.utils.cache import cc_delim_re, _i18n_cache_key_suffix
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(force_bytes(value))
path = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, path.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
path = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, path.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request path and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global path registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header('Vary'):
headerlist = ['HTTP_'+header.upper().replace('-', '_')
for header in cc_delim_re.split(response['Vary'])]
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.get_full_path()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
```
#### File: django-dynasite/dynasite/context_processors.py
```python
from django.conf import settings
from django.template import RequestContext
from django.contrib.sites.models import Site
from django.contrib.sites.requests import RequestSite
from models import get_current_site
def dynasite_context_processor(request):
try:
dynasite = { 'site': get_current_site(request) }
dynasite['site_url'] = 'http%s://%s' % ('s' if request.is_secure else '', dynasite['site'].domain, )
except:
dynasite = None
return { 'dynasite': dynasite }
```
#### File: django-dynasite/dynasite/models.py
```python
from django.conf import settings
from django.contrib.sites.models import Site
SITES_CACHE = {}
def get_current_site(request):
"""
Get current site by SITE_ID if defined or use host otherwise
"""
if hasattr(settings, 'SITE_ID') and settings.SITE_ID:
return Site.objects.get_current()
host = request.get_host()
if host == 'testserver' and hasattr(settings, 'DEFAULT_SITE_ID'):
return Site.objects.get(pk=settings.DEFAULT_SITE_ID)
# Get from host cache if exists
# -----------------------
if host in SITES_CACHE:
return SITES_CACHE[host]
# Try by raw hostname
# -----------------------
try:
site = Site.objects.get(domain=host)
SITES_CACHE[host] = site
return site
except Site.DoesNotExist:
pass
# Try by hostname without port
# -----------------------
shost = host.rsplit(':', 1)[0] # only host, without port
if shost != host:
if shost in SITES_CACHE:
return SITES_CACHE[shost]
try:
site = Site.objects.get(domain=shost)
SITES_CACHE[host] = site
return site
except Site.DoesNotExist:
pass
raise Site.DoesNotExist
```
|
{
"source": "jeromepatel/food-recognition-benchmark-starter-kit",
"score": 3
}
|
#### File: food-recognition-benchmark-starter-kit/evaluator/utils.py
```python
import os
import signal
from contextlib import contextmanager
class TimeoutException(Exception): pass
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException("Prediction timed out!")
use_signals_in_timeout = True
if os.name == 'nt':
"""
Windows doesnt support signals, hence
timeout_decorators usually fall apart.
Hence forcing them to not using signals
whenever using the timeout decorator.
"""
use_signals_in_timeout = False
if use_signals_in_timeout:
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
if use_signals_in_timeout:
signal.alarm(0)
```
#### File: food-recognition-benchmark-starter-kit/utils/mmdet_inference.py
```python
import mmcv
import numpy as np
import torch
from mmcv.ops import RoIPool
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets import replace_ImageToTensor
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
# import time
def inference(model, imgs):
# start = time.process_time()
imgs = [imgs]
cfg = model.cfg
device = 'cuda:0'
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
data = dict(img_info=dict(filename=imgs[0]), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
datas.append(data)
data = collate(datas, samples_per_gpu=len(imgs))
# just get the actual data from DataContainer
data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
data['img'] = [img.data[0] for img in data['img']]
# scatter to specified GPU
data = scatter(data, [device])[0]
# forward the model
with torch.no_grad():
results = model(return_loss=False, rescale=True, **data)
# your code here
# print(time.process_time() - start)
return results[0]
```
|
{
"source": "jeromerg/filoc",
"score": 3
}
|
#### File: filoc/backends/backend_pickle.py
```python
import os
import pickle
from typing import Dict, Any
from fsspec import AbstractFileSystem
from filoc.contract import PropsList, BackendContract, Constraints, Props
from filoc.utils import filter_and_coerce_loaded_file_content, coerce_file_content_to_write
class PickleBackend(BackendContract):
"""
filoc backend used to read data from Pickle files and write into them. This implementation is used when you call the filoc factory with the ``backend`` argument set to ``'pickle'``. Example:
.. code-block:: python
loc = filoc('/my/locpath/{id}/data.pickle', backend='pickle')
It is recommended to read files that you wrote with filoc itself. If you want to read pickle files written by a third library, it is recommended to implement your own backend,
so that you can better handle the edge cases and print out better error messages.
"""
def __init__(self, is_singleton) -> None:
super().__init__()
self.is_singleton = is_singleton
def read(self, fs: AbstractFileSystem, path: str, path_props : Props, constraints: Constraints) -> PropsList:
"""(see BackendContract contract) """
with fs.open(path, 'rb') as f:
return filter_and_coerce_loaded_file_content(path, pickle.load(f), path_props, constraints, self.is_singleton)
def write(self, fs: AbstractFileSystem, path: str, props_list: PropsList) -> None:
"""(see BackendContract contract)"""
fs.makedirs(os.path.dirname(path), exist_ok=True)
with fs.open(path, 'wb') as f:
return pickle.dump(coerce_file_content_to_write(path, props_list, self.is_singleton), f)
```
#### File: filoc/filoc/filoc_io.py
```python
import logging
import os
import re
import uuid
from io import UnsupportedOperation
from typing import Dict, Any, List, Mapping, Optional, Set
from typing import Tuple
import fsspec
import parse
from fsspec import AbstractFileSystem
from fsspec.core import OpenFile
from filoc.contract import Constraints, Constraint
log = logging.getLogger('filoc')
# ---------
# Constants
# ---------
_re_natural = re.compile(r"(\d+)")
_re_path_placeholder = re.compile(r'({[^}]+})')
# -------
# Helpers
# -------
def sort_natural(li: List[str]) -> List[str]:
""" Perform natural sort of string containing numbers. Floating number are currently supported but cannot be compared to integers (missing dot separator)"""
# TODO: support mix of int and float
return sorted(li, key=lambda s: [int(part) if part.isdigit() else part for part in _re_natural.split(s)])
def coerce_nullable_mapping(d) -> Optional[Mapping[str, Any]]:
"""
Pass through Mapping or None instance, tries to call ``to_dict()`` method elsewhere
Args:
d:
Returns:
the coerced mapping of type ``Mapping[str, Any]`` or None
Raises:
TypeError: if ``d`` is neither an instance of Mapping, nor None, nor contains `to_dict()` method
"""
if d is None:
return d
if isinstance(d, Mapping):
return d
if getattr(d, "to_dict", None):
# especially valid for pandas Series
return d.to_dict()
raise TypeError(f"Expected instance of Mapping or implementing to_dict, got {type(d)}!")
def mix_dicts_and_coerce(dict1, dict2) -> Mapping[str, Any]:
"""
Coerce dict1 and dict2 to Mapping and mix both together
Args:
dict1: first dictionary argument (either Mapping or implements `to_dict()`)
dict2: second dictionary argument (either Mapping or implements `to_dict()`)
Returns:
The combined Mapping
"""
dict1 = coerce_nullable_mapping(dict1)
dict2 = coerce_nullable_mapping(dict2)
dict2 = None if len(dict2) == 0 else dict2
if dict1 and dict2:
result = dict()
result.update(dict1)
result.update(dict2)
return result
elif dict1:
return dict1
elif dict2:
return dict2
else:
return dict()
# TODO: Support path character escaping
# -------------------
# Class FilocIO
# -------------------
class FilocIO:
"""
Class providing access to files, given the locpath definition. The locpath is a format string, which placeholders
are variables used by FilocIO to map variables to paths and paths to variables.
"""
def __init__(
self, locpath: str,
writable: bool = False,
fs: AbstractFileSystem = None
) -> None:
super().__init__()
self.original_locpath = locpath
self.writable = writable
# split locpath to distinguish placeholders from constant parts
path_elts = _re_path_placeholder.split(locpath)
# Normalize the input path, by creating an fsspec OpenFile, then by getting the path property,
# which is normalized. But the placeholders within the locpath are not valid, so we replace them by
# a valid random string, build the OpenFile, get the normalized string, and replace the random
# string by the original placeholders.
path_elts_and_ersatz = [ (elt, str(uuid.uuid4()) if elt.startswith("{") else None) for elt in path_elts ]
some_valid_path = "".join([ersatz if ersatz else elt for elt, ersatz in path_elts_and_ersatz])
if fs is None:
open_file = fsspec.open(some_valid_path)
else:
open_file = OpenFile(fs, some_valid_path)
# now build the normalized locpath, by replacing ersatz string by the original placeholder strings
self.locpath = open_file.path
for elt, ersatz in path_elts_and_ersatz:
if ersatz:
self.locpath = self.locpath.replace(ersatz, elt)
self.fs = open_file.fs # type: AbstractFileSystem
self.path_parser = parse.compile(self.locpath) # type: parse.Parser
# Get the root folder: the last folder, that is not variable
self.root_folder = self.locpath.split("{")[0]
self.root_folder = self.fs.sep.join((self.root_folder + "dummy_to_ensure_subfolder").split(self.fs.sep)[:-1])
# parse library contains the _named_fields property, which provides us with the set of placeholder names
# noinspection PyProtectedMember
self.path_props = set(self.path_parser._named_fields) # type: Set[str]
# noinspection PyDefaultArgument
def parse_path_properties(self, path: str) -> Dict[str, Any]:
"""
Extract the ``self.locpath`` placeholder values contained in ``path``
Args:
path: path string
Returns:
A dictionary containing the "placeholder name" -> value mapping
"""
try:
return self.path_parser.parse(path).named
except Exception as e:
raise ValueError(f'Could not parse {path} with {self.locpath} parser: {e}')
def render_path(self, constraints : Optional[Constraints] = None, **constraints_kwargs : Constraint) -> str:
"""
Render the path defined by the provided placeholder values (``constraints``).
Args:
constraints: The placeholders values required by ``self.locpath``.
**constraints_kwargs: The placeholders values required by ``self.locpath``.
Returns:
The rendered path
Raises:
ValueError: If a placeholder value is missing
"""
constraints = mix_dicts_and_coerce(constraints, constraints_kwargs)
undefined_keys = self.path_props - set(constraints)
if len(undefined_keys) > 0:
raise ValueError('Required props undefined: {}. Provided: {}'.format(undefined_keys, constraints))
return self.locpath.format(**constraints) # result should be normalized, because locpath is
def render_glob_path(self, constraints : Optional[Constraints] = None, **constraints_kwargs : Constraint) -> str:
"""
Render a glob path defined by the provided placeholder values (``constraints``). The missing missing placeholders
are replaced by ``*`` in the glob path.
Args:
constraints: The placeholders values defined in ``self.locpath``.
**constraints_kwargs: The placeholders values defined in ``self.locpath``.
Returns:
A glob path
"""
constraints = mix_dicts_and_coerce(constraints, constraints_kwargs)
provided_keys = set(constraints)
undefined_keys = self.path_props - provided_keys
defined_keys = self.path_props - undefined_keys
path_values = dict()
path_values.update({(k, constraints[k]) for k in defined_keys})
glob_path = self.locpath
for undefined_key in undefined_keys:
glob_path = re.sub(r'{' + undefined_key + r'(?::[^}]*)?}', '?*', glob_path)
# finally format
glob_path = glob_path.format(**path_values)
return glob_path # result should be normalized, because locpath is
def list_paths(self, constraints : Optional[Constraints] = None, **constraints_kwargs : Constraint) -> List[str]:
"""
Gets the list of all existing and valid paths fulfilling the provided constraints
Args:
constraints: The equality constraints applied to the ``self.locpath`` placeholders
**constraints_kwargs: The equality constraints applied to the ``self.locpath`` placeholders
Returns:
The list of valid and existing paths fulfilling the provided constraints
"""
constraints = mix_dicts_and_coerce(constraints, constraints_kwargs)
paths = self.fs.glob(self.render_glob_path(constraints))
return sort_natural(paths)
def list_paths_and_props(self, constraints : Optional[Constraints] = None, **constraints_kwargs : Constraint) -> List[Tuple[str, Dict[str, Any]]]:
"""
Gets the list of all existing and valid paths fulfilling the provided constraints, along with the list of associated placeholder values
Args:
constraints: The equality constraints applied to the ``self.locpath`` placeholders
**constraints_kwargs: The equality constraints applied to the ``self.locpath`` placeholders
Returns:
A list of tuples containing for each valid path, the path and the list of related placeholder values
"""
constraints = mix_dicts_and_coerce(constraints, constraints_kwargs)
paths = self.list_paths(constraints)
return [(p, self.parse_path_properties(p)) for p in paths]
def exists(self, constraints : Optional[Constraints] = None, **constraints_kwargs : Constraint) -> bool:
"""
Checks if the path defined by the provided placeholder values (``constraints``) exists
Args:
constraints: The equality constraints applied to the ``self.locpath`` placeholders
**constraints_kwargs: The equality constraints applied to the ``self.locpath`` placeholders
Returns:
True if the path exists, False elsewhere
"""
constraints = mix_dicts_and_coerce(constraints, constraints_kwargs)
return self.fs.exists(self.render_path(constraints))
def open(
self,
constraints : Constraints,
mode : str = "rb",
block_size : int = None,
cache_options : Optional[Dict] = None,
**kwargs
):
"""
Opens the path defined by the provided placeholder values (``constraints``)
Args:
constraints: The equality constraints applied to the ``self.locpath`` placeholders
mode: See builtin ``open()``
block_size: Some indication of buffering - this is a value in bytes
cache_options: Extra arguments to pass through to the cache.
**kwargs: Additional keyed arguments passed to ``fsspec.OpenFile.open(...)``
Returns:
a file-like object from the underlying fsspec filesystem
"""
is_writing = len(set(mode) & set("wa+")) > 0
if is_writing and not self.writable:
raise UnsupportedOperation('this filoc is not writable. Set writable flag to True to enable writing')
path = self.render_path(constraints)
dirname = os.path.dirname(path)
if is_writing:
self.fs.makedirs(dirname, exist_ok=True)
return self.fs.open(path, mode, block_size, cache_options, **kwargs)
# noinspection PyDefaultArgument
def delete(self, constraints : Optional[Constraints] = {}, dry_run=False):
"""
Delete the path defined by the provided placeholder values (``constraints``)
Args:
constraints: The equality constraints applied to the ``self.locpath`` placeholders
dry_run: If True, only simulates the deletion
"""
# TODO: Unit test to test deletion of folders
if not self.writable:
raise UnsupportedOperation('this filoc is not writable. Set writable flag to True to enable deleting')
path_to_delete = self.list_paths(constraints)
dry_run_log_prefix = '(dry_run) ' if dry_run else ''
log.info(f'{dry_run_log_prefix}Deleting {len(path_to_delete)} files with path_props "{constraints}"')
for path in path_to_delete:
log.info(f'{dry_run_log_prefix}Deleting "{path}"')
if dry_run:
continue
if self.fs.isfile(path):
self.fs.delete(path)
elif self.fs.isdir(path):
self.fs.rm(path, recursive=True)
else:
raise ValueError(f'path is neither a direction nor a file: "{path}"')
log.info(f'{dry_run_log_prefix}Deleted {len(path_to_delete)} files with path_props "{constraints}"')
```
#### File: filoc/frontends/frontend_json.py
```python
import logging
from collections import Mapping, Collection
from filoc.contract import SingletonExpectedError, TContents, PropsList, TContent, FrontendContract, \
FrontendConversionError, ReadOnlyPropsList
log = logging.getLogger('filoc')
class JsonFrontend(FrontendContract):
"""
JSON frontend implementation.
"""
def read_content(self, props_list: PropsList) -> TContent:
"""(see FrontendContract contract)"""
if len(props_list) != 1:
raise SingletonExpectedError(f'Expected singleton, got {len(props_list)} items to convert to content')
return props_list[0]
def read_contents(self, props_list: PropsList) -> TContents:
"""(see FrontendContract contract)"""
return props_list
def write_content(self, content: TContent) -> ReadOnlyPropsList:
"""(see FrontendContract contract)"""
if isinstance(content, Mapping):
return [content]
else:
raise FrontendConversionError(f'Expected instance of Mapping, got {type(content).__name__}')
def write_contents(self, contents: TContents) -> ReadOnlyPropsList:
"""(see FrontendContract contract)"""
if isinstance(contents, Collection):
return contents
else:
raise FrontendConversionError(f'Expected instance of Collection, got {type(contents).__name__}')
```
#### File: filoc/tests/test_FilocSingle.py
```python
import json
import os
import shutil
import tempfile
import time
import unittest
from pathlib import Path
from filoc import filoc_json, FilocIO
# noinspection PyMissingOrEmptyDocstring
from filoc.contract import SingletonExpectedError
def touch_file(file_path):
os.makedirs(os.path.dirname(file_path))
Path(file_path).touch()
# noinspection DuplicatedCode
# noinspection PyMissingOrEmptyDocstring
class TestFilocSingle(unittest.TestCase):
"""
TODO:
- Test cache behavior on delete of files (should currently fail --> TODO DEV Feature)
"""
def setUp(self):
self.maxDiff = None
self.test_dir = tempfile.mkdtemp().replace('\\', '/')
self.path_fmt = self.test_dir + r'/simid={simid:d}/epid={epid:d}/hyperparameters.json'
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_read_write_read(self):
wloc = FilocIO(self.path_fmt, writable=True)
with wloc.open({"simid": 1, "epid": 10}, "w") as f: json.dump({'a': 100}, f)
with wloc.open({"simid": 1, "epid": 20}, "w") as f: json.dump({'a': 200}, f)
with wloc.open({"simid": 2, "epid": 10}, "w") as f: json.dump({'a': 300}, f)
with wloc.open({"simid": 2, "epid": 20}, "w") as f: json.dump({'a': 400}, f)
loc = filoc_json(self.path_fmt)
p = loc.read_contents({'epid': 10})
self.assertEqual(len(p), 2)
self.assertEqual('[{"a": 100, "epid": 10, "simid": 1}, {"a": 300, "epid": 10, "simid": 2}]', json.dumps(p, sort_keys=True))
# change file
time.sleep(0.1) # ensures different timestamp
with wloc.open({"simid": 2, "epid": 10}, "w") as f:
json.dump({'a': 333}, f)
p = loc.read_contents({'epid': 10})
self.assertEqual(len(p), 2)
self.assertEqual('[{"a": 100, "epid": 10, "simid": 1}, {"a": 333, "epid": 10, "simid": 2}]', json.dumps(p, sort_keys=True))
def test_read_all(self):
wloc = FilocIO(self.path_fmt, writable=True)
with wloc.open({"simid": 1, "epid": 10}, "w") as f: json.dump({'a': 100}, f)
with wloc.open({"simid": 1, "epid": 20}, "w") as f: json.dump({'a': 200}, f)
with wloc.open({"simid": 2, "epid": 10}, "w") as f: json.dump({'a': 300}, f)
with wloc.open({"simid": 2, "epid": 20}, "w") as f: json.dump({'a': 400}, f)
loc = filoc_json(self.path_fmt)
p = loc.read_contents()
self.assertEqual(len(p), 4)
self.assertEqual('[{"a": 100, "epid": 10, "simid": 1}, {"a": 200, "epid": 20, "simid": 1}, {"a": 300, "epid": 10, "simid": 2}, {"a": 400, "epid": 20, "simid": 2}]', json.dumps(p, sort_keys=True))
def test_with_constraints_on_path_placeholders(self):
wloc = FilocIO(self.path_fmt, writable=True)
with wloc.open({"simid": 1, "epid": 10}, "w") as f: json.dump({'a': 100}, f)
with wloc.open({"simid": 1, "epid": 20}, "w") as f: json.dump({'a': 200}, f)
with wloc.open({"simid": 2, "epid": 10}, "w") as f: json.dump({'a': 300}, f)
with wloc.open({"simid": 2, "epid": 20}, "w") as f: json.dump({'a': 400}, f)
loc = filoc_json(self.path_fmt)
p = loc.read_contents({'epid': 10})
self.assertEqual(len(p), 2)
self.assertEqual('[{"a": 100, "epid": 10, "simid": 1}, {"a": 300, "epid": 10, "simid": 2}]', json.dumps(p, sort_keys=True))
def test_with_constraints_on_content_attributes(self):
wloc = FilocIO(self.path_fmt, writable=True)
with wloc.open({"simid": 1, "epid": 10}, "w") as f: json.dump({'a': 100}, f)
with wloc.open({"simid": 1, "epid": 20}, "w") as f: json.dump({'a': 200}, f)
with wloc.open({"simid": 2, "epid": 10}, "w") as f: json.dump({'a': 300}, f)
with wloc.open({"simid": 2, "epid": 20}, "w") as f: json.dump({'a': 400}, f)
loc = filoc_json(self.path_fmt)
p = loc.read_contents({'a': 300})
self.assertEqual(len(p), 1)
self.assertEqual('[{"a": 300, "epid": 10, "simid": 2}]', json.dumps(p, sort_keys=True))
def test_read_contents_with_cache(self):
print("write files")
wloc = FilocIO(self.path_fmt, writable=True)
with wloc.open({"simid": 1, "epid": 10}, "w") as f:
json.dump({'a': 100}, f)
with wloc.open({"simid": 1, "epid": 20}, "w") as f:
json.dump({'a': 200}, f)
with wloc.open({"simid": 2, "epid": 10}, "w") as f:
json.dump({'a': 300}, f)
with wloc.open({"simid": 2, "epid": 20}, "w") as f:
json.dump({'a': 400}, f)
loc = filoc_json(self.path_fmt, cache_locpath='.cache')
print("read_contents 1")
p = loc.read_contents({'epid': 10})
self.assertEqual(len(p), 2)
self.assertEqual('[{"a": 100, "epid": 10, "simid": 1}, {"a": 300, "epid": 10, "simid": 2}]',
json.dumps(p, sort_keys=True))
time.sleep(0.1) # small delay between the two writings to ensure that the file gets an new timestamp
print("change one file")
# change to file triggers cache refresh
with wloc.open({"simid": 2, "epid": 10}, "w") as f:
json.dump({'a': 333}, f)
# f.flush()
# os.fsync(f.fileno())
print("re read_contents 2")
p = loc.read_contents({'epid': 10})
self.assertEqual(len(p), 2)
self.assertEqual('[{"a": 100, "epid": 10, "simid": 1}, {"a": 333, "epid": 10, "simid": 2}]', json.dumps(p, sort_keys=True))
# act_assert()
# Trick to test: signature change does not take effect, because of cache
loc = filoc_json(self.path_fmt, cache_locpath='.cache')
print("re read_contents 3")
p = loc.read_contents({'epid': 10})
self.assertEqual(len(p), 2)
self.assertEqual('[{"a": 100, "epid": 10, "simid": 1}, {"a": 333, "epid": 10, "simid": 2}]',
json.dumps(p, sort_keys=True))
def test_write_contents(self):
wloc = filoc_json(self.path_fmt, writable=True)
wloc._write_props_list([
{"simid": 1, "epid": 10, 'a': 100},
{"simid": 1, "epid": 20, 'a': 200},
{"simid": 2, "epid": 10, 'a': 300},
{"simid": 2, "epid": 20, 'a': 400},
])
wloc = FilocIO(self.path_fmt)
with wloc.open({"simid": 1, "epid": 10}) as f:
c1 = json.load(f)
with wloc.open({"simid": 1, "epid": 20}) as f:
c2 = json.load(f)
with wloc.open({"simid": 2, "epid": 10}) as f:
c3 = json.load(f)
with wloc.open({"simid": 2, "epid": 20}) as f:
c4 = json.load(f)
self.assertEqual('{"a": 100}', json.dumps(c1, sort_keys=True))
self.assertEqual('{"a": 200}', json.dumps(c2, sort_keys=True))
self.assertEqual('{"a": 300}', json.dumps(c3, sort_keys=True))
self.assertEqual('{"a": 400}', json.dumps(c4, sort_keys=True))
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeromerobert/gcvb",
"score": 2
}
|
#### File: gcvb/dashboard/index.py
```python
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import os
import io
import gcvb.db as db
from gcvb.util import str_to_ip
import gcvb.loader as loader
if __name__ == '__main__':
from app import app
from apps import runs, run, test, history
else:
from .app import app
from .apps import runs, run, test, history
import flask
cwd = os.getcwd()
url = dcc.Location(id='url', refresh=False)
navbar = dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink("Runs", href="/runs")),
dbc.NavItem(dbc.NavLink("Last run", href="/run")),
dbc.NavItem(dbc.NavLink("Data", href="/data")),
],
brand="gcvb-dashboard",
brand_href="",
sticky="top",
)
content = html.Div(id="page-content")
app.layout=html.Div([url, navbar, content])
def _get_mimetype(base, test, filename):
base = int(base)
if base not in loader.loader.allowed_files:
flask.abort(404)
if test not in loader.loader.allowed_files[base]:
flask.abort(404)
if filename not in loader.loader.allowed_files[base][test]:
flask.abort(404)
return loader.loader.allowed_files[base][test][filename].get(
"mimetype", "text/plain"
)
@app.server.route("/files/<base>/<test>/<file>")
def serve_from_results(base, test, file):
return flask.send_file(
f"{cwd}/results/{base}/{test}/{file}", mimetype=_get_mimetype(base, test, file)
)
@app.server.route("/dbfiles/<base>/<test>/#")
def serve_from_db(base, test, filename):
return flask.send_file(
io.BytesIO(db.retrieve_file(int(base), test, filename)),
mimetype=_get_mimetype(base, test, filename)
)
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if not pathname:
return 'Bonjour'
page=pathname.split("/")
if page[1] == 'runs':
return runs.layout
if page[1] == 'run':
return run.layout
if page[1] == 'test':
return test.layout
if page[1] == 'history':
return history.layout
else:
return 'Bonjour'
def run_server(debug=False, bind_to="127.0.0.1:8050"):
host, port = str_to_ip(bind_to)
app.run_server(debug=debug, host=host, port=port)
if __name__ == '__main__':
app.run_server(debug=True)
```
#### File: gcvb/gcvb/db.py
```python
import sqlite3
import os
import glob
import gzip
from collections import defaultdict
from . import util
import datetime
#SCRIPTS
creation_script="""
CREATE TABLE gcvb(id INTEGER PRIMARY KEY,
command_line TEXT,
yaml_file TEXT,
modifier TEXT,
creation_date TIMESTAMP);
CREATE TABLE run(id INTEGER PRIMARY KEY,
start_date TIMESTAMP,
end_date TIMESTAMP,
gcvb_id INTEGER,
config_id TEXT,
FOREIGN KEY(gcvb_id) REFERENCES gcvb(id));
CREATE TABLE test(id INTEGER PRIMARY KEY,
name TEXT,
start_date TIMESTAMP,
end_date TIMESTAMP,
run_id INTEGER,
FOREIGN KEY(run_id) REFERENCES run(id));
CREATE TABLE task(id INTEGER PRIMARY KEY,
step INTEGER,
parent INTEGER,
start_date TIMESTAMP,
end_date TIMESTAMP,
test_id INTEGER,
status INTEGER DEFAULT -3, -- >=0, exit_status | -1 running | -2 ready | -3 pending
FOREIGN KEY(test_id) REFERENCES test(id));
CREATE TABLE valid(id INTEGER PRIMARY KEY,
metric TEXT,
value REAL,
test_id INTEGER,
task_step INTEGER,
FOREIGN KEY(task_step) REFERENCES task(step),
FOREIGN KEY(test_id) REFERENCES test(id));
CREATE TABLE files(id INTEGER PRIMARY KEY,
filename TEXT,
file BLOB,
test_id INTEGER,
FOREIGN KEY(test_id) REFERENCES test(id));
CREATE TABLE yaml_cache(mtime REAL, filename TEXT, pickle BLOB);
"""
def now():
return datetime.datetime.now()
#GLOBAL
database="gcvb.db"
synchronous=None
def set_db(db_path):
global database, synchronous
database=db_path
def connect(file,f, *args, **kwargs):
global synchronous
conn=sqlite3.connect(file, timeout=50, detect_types=sqlite3.PARSE_DECLTYPES)
if synchronous is None:
# See https://www.sqlite.org/pragma.html#pragma_synchronous
# OFF is known to be needed with Lustre
synchronous = os.environ.get("GCVB_SYNC", "FULL")
conn.execute(f"PRAGMA synchronous={synchronous}")
conn.row_factory=sqlite3.Row
c=conn.cursor()
try:
res = f(c, *args, **kwargs) #supposed to contain execute statements.
except:
conn.rollback()
raise
else:
conn.commit()
finally:
conn.close()
return res
def get_exclusive_access():
"""Returns a sqlite3.Connection with exclusive access to the db.
Must be closed afterwards"""
conn=sqlite3.connect(database, timeout=50)
conn.execute(f"PRAGMA synchronous={synchronous}")
conn.row_factory=sqlite3.Row
conn.isolation_level = 'EXCLUSIVE'
conn.execute('BEGIN EXCLUSIVE')
return conn
def with_connection(f):
"""decorator for function needing to connect to the database"""
def with_connection_(*args, **kwargs):
return connect(database,f, *args, **kwargs)
return with_connection_
@with_connection
def create_db(cursor):
cursor.executescript(creation_script)
@with_connection
def new_gcvb_instance(cursor, command_line, yaml_file, modifier):
cursor.execute("INSERT INTO gcvb(command_line,yaml_file,modifier,creation_date) VALUES (?,?,?,?)",[command_line,yaml_file,modifier,now()])
return cursor.lastrowid
@with_connection
def get_last_gcvb(cursor):
cursor.execute("SELECT * from gcvb ORDER BY creation_date DESC LIMIT 1")
return cursor.fetchone()["id"]
@with_connection
def get_base_from_run(cursor, run_id):
cursor.execute("SELECT gcvb_id FROM run WHERE id=?",[run_id])
return cursor.fetchone()["gcvb_id"]
@with_connection
def add_run(cursor, gcvb_id, config_id):
cursor.execute("INSERT INTO run(gcvb_id,config_id) VALUES (?,?)",[gcvb_id,config_id])
return cursor.lastrowid
@with_connection
def add_tests(cursor, run, test_list, chain):
tests=[(t["id"],run) for t in test_list]
for t in test_list:
cursor.execute("INSERT INTO test(name,run_id) VALUES(?,?)",[t["id"],run])
t["id_db"]=cursor.lastrowid
step=0
parent = 0
for task in t["Tasks"]:
step += 1
predecessor = step - 1 if chain else parent
status = -3 if parent else -2 #Ready (-2) if parent is 0 else Pending (-3)
cursor.execute("INSERT INTO task(step,parent,test_id,status) VALUES(?,?,?,?)",
[step, predecessor, t["id_db"], status])
parent = step
for valid in task.get("Validations",[]):
step += 1
predecessor = step - 1 if chain else parent
cursor.execute("INSERT INTO task(step,parent,test_id) VALUES (?,?,?)",
[step,predecessor,t["id_db"]])
@with_connection
def start_test(cursor,run,test_id):
cursor.execute("""UPDATE test
SET start_date = ?
WHERE id = ? AND run_id = ?""",[now(), test_id, run])
@with_connection
def end_test(cursor, run, test_id):
cursor.execute("""UPDATE test
SET end_date = ?
WHERE id = ? AND run_id = ?""",[now(), test_id, run])
@with_connection
def start_task(cursor, test_id, step):
cursor.execute("""UPDATE task
SET start_date = ?, status = -1
WHERE step = ? AND test_id = ?""", [now(), step, test_id])
@with_connection
def end_task(cursor, test_id, step, exit_status):
cursor.execute("""UPDATE task
SET end_date = ?, status = ?
WHERE step = ? AND test_id = ?""", [now(), exit_status, step, test_id])
@with_connection
def start_run(cursor,run):
#update only if there is no start date already.
#Multiple launch scripts can be started, and we might not be the first.
cursor.execute("""UPDATE run
SET start_date = ?
WHERE id = ?
AND start_date IS NULL""",[now(), run])
@with_connection
def end_run(cursor,run):
#update only if every tests is completed.
#multiple scripts can be launched, we might not be the last.
cursor.execute("""SELECT count(*) FROM test
WHERE run_id = ?
AND end_date IS NULL""",[run])
count=cursor.fetchone()["count(*)"]
if not(count):
cursor.execute("""UPDATE run
SET end_date = ?
WHERE id = ?""",[now(), run])
@with_connection
def add_metric(cursor, run_id, test_id, step, name, value):
cursor.execute("INSERT INTO valid(metric,value,test_id,task_step) VALUES (?,?,?,?)",[name,value,test_id, step])
@with_connection
def get_last_run(cursor):
cursor.execute("SELECT * from run ORDER BY id DESC LIMIT 1")
res=cursor.fetchone()
if res is None:
return None, None
else:
return res["id"], res["gcvb_id"]
@with_connection
def get_run_infos(cursor, run_id):
cursor.execute("SELECT * from run WHERE id = ?", [run_id])
return cursor.fetchone()
@with_connection
def load_report(cursor, run_id):
a="""SELECT metric, value, name
FROM valid
INNER JOIN test
ON test_id=test.id
WHERE test.run_id=(?)"""
cursor.execute(a,[run_id])
res={}
for row in cursor.fetchall():
res.setdefault(row["name"],{})[row["metric"]]=row["value"]
return res
@with_connection
def load_report_n(cursor, run_id):
a="""SELECT metric, value, name, task_step
FROM valid
INNER JOIN test
ON test_id=test.id
WHERE test.run_id=(?)"""
cursor.execute(a,[run_id])
res = defaultdict(lambda : defaultdict(dict))
for t in cursor.fetchall():
res[t["name"]][t["task_step"]][t["metric"]]=t["value"]
return res
@with_connection
def save_blobs(cursor, data):
request="""INSERT INTO files(filename,file, test_id)
VALUES (?,?,?)"""
for params in data:
cursor.execute(request, params)
@with_connection
def save_files(cursor, run_id, test_id, file_list):
request="""INSERT INTO files(filename,file, test_id)
VALUES (?,?,?)"""
for pattern in file_list:
for file in glob.iglob(pattern):
content=util.file_to_compressed_binary(file)
cursor.execute(request,[file,content,test_id])
@with_connection
def save_yaml_cache(cursor, mtime, filename, res_dict):
req1 = "DELETE FROM yaml_cache WHERE filename = ?"
req2 = "INSERT INTO yaml_cache(mtime, filename, pickle) VALUES (?,?,?)"
loaded_dict = util.pickle_obj_to_binary(res_dict)
cursor.execute(req1, (filename,))
cursor.execute(req2, (mtime, filename, loaded_dict))
return res_dict
@with_connection
def load_yaml_cache(cursor, filename):
request = "SELECT mtime, pickle FROM yaml_cache WHERE filename = ?"
cursor.execute(request, (filename, ))
res = cursor.fetchone()
if res is None:
return 0, None
else:
return res["mtime"], util.pickle_binary_to_obj(res["pickle"])
@with_connection
def get_tests(cursor, run_id):
request="""SELECT id, name, start_date, end_date
FROM test
WHERE run_id = ?"""
cursor.execute(request, [run_id])
return cursor.fetchall()
@with_connection
def get_file_list(cursor, run_id, test_name):
request="""SELECT filename
FROM files
INNER JOIN test ON test_id=test.id
INNER JOIN run ON test.run_id=run.id
WHERE run.gcvb_id = ? AND test.name = ?"""
cursor.execute(request,[run_id,test_name])
res=cursor.fetchall()
return [f["filename"] for f in res]
@with_connection
def retrieve_file(cursor, run_id, test_name, filename):
request="""SELECT file
FROM files
INNER JOIN test ON test_id=test.id
INNER JOIN run ON test.run_id=run.id
WHERE run.gcvb_id = ? AND test.name = ? AND filename = ?"""
cursor.execute(request, [run_id,test_name, filename])
return gzip.decompress(cursor.fetchone()["file"])
@with_connection
def retrieve_input(cursor, run):
request="""SELECT yaml_file, modifier
FROM gcvb
INNER JOIN run ON gcvb.id=run.gcvb_id
WHERE run.id=?"""
cursor.execute(request, [run])
res=cursor.fetchone()
return (res["yaml_file"],res["modifier"])
@with_connection
def retrieve_test(cursor, run, test_id):
request="""SELECT id, start_date, end_date
FROM test
WHERE name=? AND run_id=?"""
cursor.execute(request,[test_id,run])
res=dict(cursor.fetchone())
request="""SELECT metric, value
FROM valid
WHERE test_id=?"""
cursor.execute(request,[res["id"]])
metrics=cursor.fetchall()
res["metrics"]={m["metric"]:m["value"] for m in metrics}
return res
@with_connection
def retrieve_history(cursor, test_id, metric_id):
request="""SELECT metric, value, test.run_id as run, test.name as test_id
FROM valid
INNER JOIN test ON test.id=valid.test_id
WHERE test.name=? AND valid.metric=?"""
cursor.execute(request,[test_id,metric_id])
res=cursor.fetchall()
return res
@with_connection
def get_steps(cursor, run_id):
request="""SELECT test.name, step, task.start_date, task.end_date, status
FROM task
INNER JOIN test ON test.id=task.test_id
WHERE test.run_id = ?"""
cursor.execute(request, [run_id])
res = defaultdict(lambda : defaultdict(dict))
for t in cursor.fetchall():
res[t["name"]][t["step"]]=dict(t)
return res
```
#### File: gcvb/gcvb/loader.py
```python
import gcvb.yaml_input as yaml_input
import gcvb.db as db
import gcvb.job as job
from collections import defaultdict
import os
class BaseLoader(object):
def __init__(self):
self.data_root = "./data"
self.config = {"executables" : {}}
self.loaded = {}
self.references = {}
self.allowed_files = {}
def load_base(self, run_id):
ya,mod = db.retrieve_input(run_id)
base = db.get_base_from_run(run_id)
if not os.path.exists(ya):
# FIXME replace all ./results/ by os.path.join("results", ...)
runya = f"./results/{base}/tests.yaml"
print(f"Warning: {ya} file missing. Trying {runya}")
ya = runya
# results/*/test.yaml have already modified applied
mod = None
if (ya,mod) not in self.loaded:
self.loaded[(ya,mod)] = yaml_input.load_yaml(ya,mod)
refs = yaml_input.get_references(self.loaded[(ya,mod)]["Tests"].values(),self.data_root)
self.references.update(refs)
if base not in self.allowed_files:
self.allowed_files[base] = self.__populate_allowed_files(ya, mod)
return self.loaded[(ya,mod)]
def __populate_one_allowed(self, taskorval, allowedentry, at_job_creation):
lr = taskorval.get("serve_from_results",[])
ldb = taskorval.get("serve_from_db",[])
for f in lr + ldb:
filename = job.format_launch_command(f["file"], self.config, at_job_creation)
allowedentry[filename] = f
def __populate_allowed_files(self, ya, mod):
s = defaultdict(dict)
for test_id,test in self.loaded[(ya,mod)]["Tests"].items():
for c,task in enumerate(test["Tasks"]):
at_job_creation = {}
job.fill_at_job_creation_task(at_job_creation, task, f"{test_id}_{c}", self.config)
self.__populate_one_allowed(task, s[test_id], at_job_creation)
for valid in task.get("Validations",[]):
job.fill_at_job_creation_validation(at_job_creation, valid, self.data_root,
test["data"], self.config, self.references)
self.__populate_one_allowed(valid, s[test_id], at_job_creation)
return s
loader = BaseLoader()
```
|
{
"source": "jeromerobert/pvpycgnsreader",
"score": 2
}
|
#### File: jeromerobert/pvpycgnsreader/cgns.py
```python
from collections import namedtuple
import ctypes
from ctypes import (
POINTER,
create_string_buffer,
c_char_p,
c_int,
c_void_p,
c_double,
c_size_t,
)
import sys
import numpy as np
# Possible CGNS exception types
CGNSError = type("CGNSError", (Exception,), {})
CGNSNodeNotFound = type("CGNSNodeNotFound", (CGNSError,), {})
CGNSIncorrectPath = type("CGNSIncorrectPath", (CGNSError,), {})
CGNSNoIndexDim = type("CGNSNoIndexDim", (CGNSError,), {})
def _load_lib(root):
if sys.platform == "darwin":
templates = [
"@executable_path/../Libraries/lib%s.dylib",
"lib%s.dylib",
"lib%s.so",
"lib%s.bundle",
"%s.dylib",
"%s.so",
"%s.bundle",
"%s",
]
elif sys.platform == "win32" or sys.platform == "cygwin":
templates = ["%s.dll", "lib%s.dll"]
else:
templates = ["lib%s.so", "%s.so", "%s"]
for t in templates:
try:
return ctypes.CDLL(t % root)
except OSError:
pass
raise OSError("Cannot load the %s library" % root)
CG_MODE_READ = 0
CG_MODE_WRITE = 1
CG_MODE_MODIFY = 2
CG_MODE_CLOSED = 3
CG_FILE_NONE = 0
CG_FILE_ADF = 1
CG_FILE_HDF5 = 2
CG_FILE_ADF2 = 3
CGNSNode = namedtuple(
"CGNSNode", ["name", "id", "children", "dtype", "dimensions", "label"]
)
_CGNS_TO_NUMPY = {
"MT": None,
"C1": "S",
"I4": "i4",
"I8": "i8",
"U4": "u4",
"U8": "u8",
"R4": "f4",
"R8": "f8",
"B1": "i1",
}
# Possible return codes
CGNS_STATUS = {
1: CGNSError,
2: CGNSNodeNotFound,
3: CGNSIncorrectPath,
4: CGNSNoIndexDim,
76: Exception("H5Gopen:open of a node group failed"),
}
def _errcheck(status, fn, arg):
if status != 0:
print(status, fn, arg)
try:
raise CGNS_STATUS[status]
except KeyError:
raise CGNSError
class _Prefixer:
def __init__(self, delegate, prefix):
self.delegate = delegate
self.prefix = prefix
def __getattr__(self, name):
return getattr(self.delegate, self.prefix + name)
class _CGNSWrappers:
def __init__(self):
# This is the name of the CGNS lib in Paraview
libname, self.prefix, self.cgsize_t = "vtkcgns-pv5.9", "vtkcgns_cgio_", c_size_t
# For debug
# libname, self.prefix, self.cgsize_t = "/home/robert/elfipole/CGNS/build/src/libcgns.so", "cgio_", c_int
self.lib = _load_lib(libname)
# ier = cgio_open_file(const char *filename, int file_mode, int file_type, int *cgio_num);
self._proto("open_file", [c_char_p, c_int, c_int, POINTER(c_int)])
# ier = cgio_number_children(int cgio_num, double id, int *num_child);
self._proto("number_children", [c_int, c_double, POINTER(c_int)])
# ier = cgio_get_root_id(int cgio_num, double *rootid);
self._proto("get_root_id", [c_int, POINTER(c_double)])
# ier = cgio_children_ids(int cgio_num, double id, int start, int max_ret, int *num_ret, double *child_ids);
self._proto(
"children_ids",
[c_int, c_double, c_int, c_int, POINTER(c_int), POINTER(c_double)],
)
# int cgio_get_name (int cgio_num, double id, char *name);
self._proto("get_name", [c_int, c_double, c_char_p])
# ier = cgio_get_data_type(int cgio_num, double id, char *data_type);
self._proto("get_data_type", [c_int, c_double, c_char_p])
# ier = cgio_get_dimensions(int cgio_num, double id, int *ndims, cgsize_t *dims);
self._proto(
"get_dimensions", [c_int, c_double, POINTER(c_int), POINTER(self.cgsize_t)]
)
# ier = cgio_get_label(int cgio_num, double id, char *label);
self._proto("get_label", [c_int, c_double, c_char_p])
# ier = cgio_read_all_data_type(int cgio_num, double id, const char *m_data_type, void *data);
self._proto("read_all_data_type", [c_int, c_double, c_char_p, c_void_p])
self.lib = _Prefixer(self.lib, self.prefix)
def _proto(self, fname, argtypes):
r = getattr(self.lib, self.prefix + fname)
r.argtypes = argtypes
r.errcheck = _errcheck
def open(self, name):
file = c_int()
name = bytes(name, "utf-8")
self.lib.open_file(name, CG_MODE_READ, CG_FILE_NONE, ctypes.byref(file))
rootid = c_double()
self.lib.get_root_id(file, ctypes.byref(rootid))
return file, self._create_node(file, rootid)
def _create_node(self, cgio_num, node_id):
nc = c_int()
self.lib.number_children(cgio_num, node_id, ctypes.byref(nc))
nc = nc.value
ids = (c_double * nc)()
num_ret = c_int()
self.lib.children_ids(cgio_num, node_id, 1, nc, ctypes.byref(num_ret), ids)
buf = create_string_buffer(32)
self.lib.get_name(cgio_num, node_id, buf)
name = buf.value.decode("utf-8")
self.lib.get_label(cgio_num, node_id, buf)
label = buf.value.decode("utf-8")
self.lib.get_data_type(cgio_num, node_id, buf)
dtype = buf.value.decode("utf-8")
ndim = c_int()
dims = (self.cgsize_t * 12)()
self.lib.get_dimensions(cgio_num, node_id, ctypes.byref(ndim), dims)
dims = list(dims[: ndim.value])
children = {}
for i in ids:
c = self._create_node(cgio_num, c_double(i))
children[c.name] = c
return CGNSNode(
name=name,
id=node_id,
children=children,
dtype=dtype,
dimensions=dims,
label=label,
)
def read_data(self, cgio_num, node):
dim = list(reversed(node.dimensions))
isstring = node.dtype == "C1" and dim[-1] == 32
if isstring:
dtype = "|S32"
dim = dim[:-1]
else:
dtype = _CGNS_TO_NUMPY[node.dtype]
buf = np.zeros(dim, dtype=dtype)
self.lib.read_all_data_type(
cgio_num, node.id, node.dtype.encode(), buf.ctypes.data
)
if isstring:
buf = buf.tolist()
_bytetostr(buf)
return buf
_cgns_wrapper = _CGNSWrappers()
def _bytetostr(alist):
"""Convert a list of numpy array of char to a list of string"""
for i, e in enumerate(alist):
if isinstance(e, list):
_bytetostr(e)
else:
# This should not happen in valid CGNS file
if len(alist[i]) == 0 or alist[i][0] == 0:
alist[i] = None
continue
try:
alist[i] = e.decode("utf-8").strip()
except UnicodeDecodeError:
alist[i] = None
if alist[i] == "Null":
alist[i] = None
def _get_path(cgnsnode, path):
r = cgnsnode.children.get(path[0])
if r is None or len(path) == 1:
return r
else:
return _get_path(r, path[1:])
class Reader:
def __init__(self, filename):
# FIXME add a close method
self.cgio_num, self.data = _cgns_wrapper.open(filename)
def node(self, path=[]):
"""Return the meta data of a node"""
return _get_path(self.data, path)
def read_array(self, node):
return _cgns_wrapper.read_data(self.cgio_num, node)
def read_path(self, path):
"""Read the data associated to a path"""
n = self.node(path)
return None if n is None else self.read_array(n)
if __name__ == "__main__":
r = Reader("test.cgns")
print(r.read_path(["Base", "TimeIterValues", "TimeValues"]))
print(r.read_path(["Base", "0000001_T#1_WALL_2_1", "Elem"]))
print(r.node(["Base", "TimeIterValues", "ZonePointers"]))
print(r.read_path(["Base", "TimeIterValues", "ZonePointers"]))
```
|
{
"source": "jeromerobert/scikit-decide",
"score": 2
}
|
#### File: scikit-decide/docs/autodoc.py
```python
import ast
import importlib
import inspect
import json
import os
import pkgutil
import re
import sys
from functools import lru_cache
from glob import glob
import skdecide
refs = set()
header_comment = '# %%\n'
# https://github.com/kiwi0fruit/ipynb-py-convert/blob/master/ipynb_py_convert/__main__.py
def py2nb(py_str):
cells = []
chunks = py_str.split(f'\n\n{header_comment}')[1:]
for chunk in chunks:
cell_type = 'code'
chunk = chunk.strip()
if chunk.startswith("'''"):
chunk = chunk.strip("'\n")
cell_type = 'markdown'
cell = {
'cell_type': cell_type,
'metadata': {},
'source': chunk.splitlines(True),
}
if cell_type == 'code':
cell.update({'outputs': [], 'execution_count': None})
cells.append(cell)
notebook = {
'cells': cells,
'metadata': {
'anaconda-cloud': {},
'kernelspec': {
'display_name': 'Python 3',
'language': 'python',
'name': 'python3'},
'language_info': {
'codemirror_mode': {'name': 'ipython', 'version': 3},
'file_extension': '.py',
'mimetype': 'text/x-python',
'name': 'python',
'nbconvert_exporter': 'python',
'pygments_lexer': 'ipython3',
'version': '3.6.1'}},
'nbformat': 4,
'nbformat_minor': 1
}
return notebook
# https://stackoverflow.com/questions/48879353/how-do-you-recursively-get-all-submodules-in-a-python-package
def find_abs_modules(package):
path_list = []
spec_list = []
for importer, modname, ispkg in pkgutil.walk_packages(package.__path__):
import_path = f'{package.__name__}.{modname}'
if ispkg:
spec = pkgutil._get_spec(importer, modname)
try:
importlib._bootstrap._load(spec)
spec_list.append(spec)
except Exception as e:
print(f'Could not load package {modname}, so it will be ignored ({e}).')
else:
path_list.append(import_path)
for spec in spec_list:
del sys.modules[spec.name]
return path_list
def py_parse(filepath): # avoid using ast package just for extracting file doc string?
with open(filepath) as fd:
file_contents = fd.read()
module = ast.parse(file_contents)
docstring = ast.get_docstring(module)
docstring = '' if docstring is None else docstring.strip()
name = os.path.splitext(os.path.basename(filepath))[0]
return docstring, name, file_contents
@lru_cache(maxsize=1000)
def get_ref(object):
name = getattr(object, '__qualname__', None)
if name is None:
name = getattr(object, '__name__', None)
if name is None:
name = object._name
reflist = [name]
if hasattr(object, '__module__'):
reflist.insert(0, object.__module__)
ref = '.'.join(reflist)
refs.add(ref)
return ref
def format_doc(doc):
# Replace references like "#obj.func()" by "`obj.func()`" for Markdown code highlighting - TODO: replace in doc?
doc = re.sub(r'#(?P<ref>[\w\.,()]*[\w()])', lambda m: f'`{m.group("ref")}`', doc)
# Replace content of "# Parameters" by list of parameters
def list_content(content):
content = re.sub(r'^(?P<param>\w+)', lambda m: f'- **{m.group("param")}**', content, flags=re.MULTILINE)
return content.strip()
doc = re.sub(r'^(?<=# Parameters\n)(?P<content>(?:\n?\s*\w.*)+)',
lambda m: list_content(m.group("content")), doc, flags=re.MULTILINE)
# Replace "# Title" (e.g. "# Parameters") by "#### Title"
doc = re.sub(r'^# (?=Parameters|Returns|Example)', '#### ', doc, flags=re.MULTILINE)
# Replace "!!! container" (e.g. "!!! tip") by "::: container [...] :::"
def strip_content(content):
content = re.sub(r'^ {4}', '', content, flags=re.MULTILINE)
return content.rstrip()
doc = re.sub(r'!!! (?=tip|warning|danger)(?P<head>.*)\n(?P<content>(?:\n? {4,}.*)+)',
lambda m: f'::: {m.group("head")}\n{strip_content(m.group("content"))}\n:::', doc)
return doc
def add_func_method_infos(func_method, autodoc):
if inspect.isfunction(func_method):
autodoc['type'] = 'function'
elif inspect.ismethod(func_method):
autodoc['type'] = 'method'
# Get signature
signature = inspect.signature(func_method)
parameters = signature.parameters
params = []
for k, v in parameters.items():
if not (k == 'self' and func_method.__name__ == '__init__'):
parameter = parameters[k]
param = {'name': k}
if parameter.default != signature.empty:
param['default'] = str(parameter.default)
if 'lambda' in param['default']:
param['default'] = '<lambda function>' # TODO: improve?
if parameter.annotation != signature.empty:
param['annotation'] = parameter.annotation
params.append(param)
autodoc['signature'] = {'params': params}
if signature.return_annotation != signature.empty:
autodoc['signature']['return'] = signature.return_annotation
def add_basic_member_infos(member, autodoc):
try:
autodoc['ref'] = get_ref(member)
source, line = inspect.getsourcelines(member)
autodoc['source'] = ''.join(source) # TODO: keep?
autodoc['line'] = line
doc = inspect.getdoc(member)
if doc is not None:
autodoc['doc'] = format_doc(doc)
except Exception: # can happen e.g. when member is TypeVar
pass
def json_escape(obj):
return json.dumps(obj).replace("'", r"\'").replace('"', "'")
def md_escape(md):
return re.sub(r'[_<>]', lambda m: f'\\{m.group()}', md)
def doc_escape(md):
return re.sub(r'[<]', lambda m: f'\\{m.group()}', md)
def write_signature(md, member):
if 'signature' in member:
escape_json_sig = json_escape(member['signature'])
md += f'<skdecide-signature name= "{member["name"]}" :sig="{escape_json_sig}"></skdecide-signature>\n\n'
return md
def is_implemented(func_code):
return not func_code.strip().endswith('raise NotImplementedError')
if __name__ == '__main__':
# ========== GATHER AUTODOC INFOS ==========
# Get all scikit-decide (sub)modules
modules = []
for m in find_abs_modules(skdecide):
try:
module = importlib.import_module(m)
modules.append(module)
except Exception as e:
print(f'Could not load module {m}, so it will be ignored ({e}).')
autodocs = []
for module in modules:
autodoc = {}
# Get module-level infos
autodoc['ref'] = get_ref(module)
doc = inspect.getdoc(module)
if doc is not None:
autodoc['doc'] = format_doc(doc)
members = inspect.getmembers(module, lambda x: inspect.getmodule(x) == module)
autodoc_members = []
for member_name, member in members:
member = inspect.unwrap(member)
# Get member-level infos
if getattr(member, '__doc__', None) is not None:
autodoc_member = {}
autodoc_member['name'] = member_name
add_basic_member_infos(member, autodoc_member)
if inspect.isfunction(member):
add_func_method_infos(member, autodoc_member)
elif inspect.isclass(member):
autodoc_member['type'] = 'class'
autodoc_member['bases'] = list(map(get_ref, member.__bases__)) if member.__bases__ != (object,) else None
autodoc_member['inheritance'] = list(map(get_ref, inspect.getmro(member)[:-1]))
submembers = inspect.getmembers(member)
autodoc_submembers = []
for submember_name, submember in submembers:
submember = inspect.unwrap(submember)
# Get class member-level infos
if submember_name == '__init__' or not submember_name.startswith('__'):
autodoc_submember = {}
autodoc_submember['name'] = submember_name if submember_name != '__init__' else member_name
add_basic_member_infos(submember, autodoc_submember)
# Find original owner class of this member (in class inheritance)
if submember_name == '__init__':
autodoc_submember['owner'] = member_name
else:
for cls in inspect.getmro(member):
if hasattr(cls, submember_name):
autodoc_submember['owner'] = cls.__name__
if inspect.isfunction(submember) or inspect.ismethod(submember) or submember_name == '__init__':
add_func_method_infos(submember, autodoc_submember)
else:
# Class variables (e.g. T_memory, T_agent...)
autodoc_submember['type'] = 'variable'
if 'doc' in autodoc_submember or autodoc_submember.get('type') == 'variable':
autodoc_submembers.append(autodoc_submember)
autodoc_member['members'] = sorted(autodoc_submembers, key=lambda x: x['line'] if 'line' in x else 0)
if 'doc' in autodoc_member:
autodoc_members.append(autodoc_member)
autodoc['members'] = sorted(autodoc_members, key=lambda x: x['line'] if 'line' in x else 0)
autodocs.append(autodoc)
# ========== GENERATE MARKDOWN FILES ==========
# Remove all previously auto-generated files
for oldpath in glob('reference/_*.md') + glob('guide/_*.md') + glob('.vuepress/public/notebooks/*.ipynb'):
os.remove(oldpath)
# Generate Reference Markdown files (reference/_skdecide.*.md)
for module in autodocs:
# Initiate Markdown
md = ''
# Write module title
md += f'# {module["ref"].split(".", 1)[-1]}\n\n'
# Write module doc (if any)
if 'doc' in module:
md += f'{module["doc"]}\n\n'
# Write Table Of Content
md += '[[toc]]\n\n'
# Write domain spec summary
md += '::: tip\n<skdecide-summary></skdecide-summary>\n:::\n\n'
# Write members
for member in module['members']:
# Write member title
md += f'## {md_escape(member["name"])}\n\n'
# Write member signature (if any)
md = write_signature(md, member)
# Write member doc (if any)
if 'doc' in member:
md += f'{doc_escape(member["doc"])}\n\n'
# Write submembers (if any)
if 'members' in member:
for submember in sorted(member['members'], key=lambda x: (x['name'].startswith('_'), x['name'])):
if submember['type'] != 'variable':
# Write submember title
md += f'### {md_escape(submember["name"]) if submember["name"] != member["name"] else "Constructor"}' \
f' <Badge text="{submember["owner"]}" type="{"tip" if submember["owner"] == member["name"] else "warn"}"/>\n\n'
# Write submember signature (if any)
md = write_signature(md, submember)
# Write submember doc (if any)
if 'doc' in submember:
md += f'{doc_escape(submember["doc"])}\n\n'
with open(f'reference/_{module["ref"]}.md', 'w') as f:
f.write(md)
# Write Reference index (reference/README.md)
REF_INDEX_MAXDEPTH = 5
ref_entries = sorted([tuple(m['ref'].split('.')) for m in autodocs], key=lambda x: (len(x), x)) # sorted entries
ref_entries = filter(lambda e: len(e) <= REF_INDEX_MAXDEPTH, ref_entries) # filter out by max depth
ref_entries = [{'text': e[-1], 'link': '.'.join(e), 'section': e[:-1]} for e in ref_entries] # organize entries
reference = ''
sections = set()
for e in ref_entries:
for i in range(1, len(e['section']) + 1):
section = e['section'][:i]
if section not in sections:
title = 'Reference'
if section[-1] != 'skdecide':
title = section[-1]
reference += '\n'
reference += f'{"".join(["#"]*i)} {title}\n\n'
sections.add(section)
reference += f'- <router-link to="_{e["link"]}">{e["text"]}</router-link>\n'
with open(f'reference/README.md', 'w') as f:
f.write(reference)
# Write Domain/Solver Specification pages (guide/_domainspec.md & guide/_solverspec.md)
state = {
'selection': {},
'templates': {},
'characteristics': {},
'methods': {},
'types': {},
'signatures': {},
'objects': {}
}
for element in ['domain', 'solver']:
spec = ''
characteristics = [module for module in autodocs if module['ref'].startswith(f'skdecide.builders.{element}.')]
default_characteristics = {c['ref'].split('.')[-1].capitalize(): '(none)' for c in characteristics}
tmp_templates = []
for template in [member for module in autodocs if module['ref'] == f'skdecide.{element}s' for member in module['members']]:
if template['name'] == element.capitalize():
mandatory_characteristics = [base.split('.')[-2].capitalize() for base in template['bases'] or []]
tmp_templates.append({'name': template['name'], 'characteristics': dict(default_characteristics, **{base.split('.')[-2].capitalize(): base.split('.')[-1] for base in template['bases'] or [] if base.split('.')[-1] != element.capitalize()})})
spec += f'<template v-slot:{template["name"]}>\n\n'
if 'doc' in template:
spec += f'{doc_escape(template["doc"])}\n\n'
spec += '</template>\n\n'
tmp_characteristics = []
for characteristic in characteristics:
characteristic_name = characteristic['ref'].split('.')[-1].capitalize()
tmp_characteristics.append({'name': characteristic_name, 'levels': []})
if characteristic_name not in mandatory_characteristics:
tmp_characteristics[-1]['levels'].append('(none)')
for level in characteristic['members']:
tmp_characteristics[-1]['levels'].append(level['name'])
spec += f'<template v-slot:{level["name"]}>\n\n'
if 'doc' in level:
spec += f'{doc_escape(level["doc"])}\n\n'
spec += '</template>\n\n'
state['selection'][element] = {
'template': tmp_templates[0]['name'],
'characteristics': tmp_templates[0]['characteristics'],
'showFinetunedOnly': True
}
if element == 'domain':
state['selection'][element]['simplifySignatures'] = True
state['templates'][element] = tmp_templates
state['characteristics'][element] = tmp_characteristics
spec = '---\n' \
'navbar: false\n' \
'sidebar: false\n' \
'---\n\n' \
f'<skdecide-spec{" isSolver" if element == "solver" else ""}>\n\n' + spec
spec += '</skdecide-spec>\n\n'
with open(f'guide/_{element}spec.md', 'w') as f:
f.write(spec)
# Write Json state (.vuepress/_state.json)
state['objects'] = {member['name']: f'/reference/_skdecide.core.html#{member["name"].lower()}' for module in autodocs if module['ref'] == 'skdecide.core' for member in module['members']}
for element in ['domain', 'solver']:
tmp_methods = {} # TODO: detect classmethods/staticmethods to add decorator in code generator (only necessary if there was any NotImplemented classmethod/staticmethod in base template or any characteristic level)
tmp_types = {}
tmp_signatures = {}
for module in autodocs:
if module['ref'].startswith(f'skdecide.builders.{element}.'):
not_implemented = set()
for level in module.get('members', []):
level_name = level['name']
types_dict = {}
for member in level.get('members', []):
member_name = member['name']
if member['type'] == 'function':
tmp_signatures[member_name] = member['signature']
if is_implemented(member['source']):
not_implemented.discard(member_name)
else:
not_implemented.add(member_name)
elif member['type'] == 'variable':
types_dict[member_name] = member['ref']
tmp_methods[level_name] = list(not_implemented)
tmp_types[level_name] = types_dict
elif module['ref'] == f'skdecide.{element}s':
for template in module['members']:
if template['name'] == element.capitalize():
tmp_methods[element] = []
for member in template.get('members', []):
if member['type'] == 'function' and member['owner'] == element.capitalize() and not is_implemented(member['source']):
member_name = member['name']
tmp_signatures[member_name] = member['signature']
tmp_methods[element].append(member_name)
state['methods'][element] = tmp_methods
state['types'][element] = tmp_types
state['signatures'][element] = tmp_signatures
with open('.vuepress/_state.json', 'w') as f:
json.dump(state, f)
# Convert selected examples to notebooks & write Examples page (guide/_examples.md)
examples = '# Examples\n\n'
selected_examples = []
for example in glob('../examples/*.py'):
docstr, name, code = py_parse(example)
if docstr.startswith('Example '):
selected_examples.append((docstr, name, code))
sorted_examples = sorted(selected_examples)
for docstr, name, code in sorted_examples:
examples += f'## {docstr[docstr.index(":")+1:]}\n\n'
examples += f'<el-link type="primary" icon="el-icon-bottom" :underline="false" style="margin: 10px" href="/notebooks/{name}.ipynb">Download Notebook</el-link>\n'
examples += f'<el-link type="warning" icon="el-icon-cloudy" :underline="false" style="margin: 10px" href="https://colab.research.google.com/github/airbus/scikit-decide/blob/gh-pages/notebooks/{name}.ipynb">Run in Google Colab</el-link>\n\n'
notebook = py2nb(code)
for cell in notebook.get('cells', []):
cell_type = cell['cell_type']
cell_source = ''.join(cell['source'])
if cell_type == 'markdown':
examples += f'{cell_source}\n\n'
elif cell_type == 'code':
examples += f'``` py\n{cell_source}\n```\n\n'
with open(f'.vuepress/public/notebooks/{name}.ipynb', 'w') as f:
json.dump(notebook, f, indent=2)
with open('guide/_examples.md', 'w') as f:
f.write(examples)
```
#### File: scikit-decide/examples/riw_gym_solver.py
```python
import gym
import numpy as np
from typing import Callable
from skdecide.hub.domain.gym import DeterministicGymDomain, GymWidthDomain, GymDiscreteActionDomain
from skdecide.hub.solver.riw import RIW
from skdecide.utils import rollout
ENV_NAME = 'CartPole-v0'
HORIZON = 200
class D(DeterministicGymDomain, GymWidthDomain, GymDiscreteActionDomain):
pass
class GymRIWDomain(D):
"""This class wraps a cost-based deterministic OpenAI Gym environment as a domain
usable by a width-based planner
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, gym_env: gym.Env,
set_state: Callable[[gym.Env, D.T_memory[D.T_state]], None] = None,
get_state: Callable[[gym.Env], D.T_memory[D.T_state]] = None,
continuous_feature_fidelity: int = 1,
discretization_factor: int = 10,
branching_factor: int = None,
max_depth: int = 50) -> None:
"""Initialize GymRIWDomain.
# Parameters
gym_env: The deterministic Gym environment (gym.env) to wrap.
set_state: Function to call to set the state of the gym environment.
If None, default behavior is to deepcopy the environment when changing state
get_state: Function to call to get the state of the gym environment.
If None, default behavior is to deepcopy the environment when changing state
continuous_feature_fidelity: Number of integers to represent a continuous feature
in the interval-based feature abstraction (higher is more precise)
discretization_factor: Number of discretized action variable values per continuous action variable
branching_factor: if not None, sample branching_factor actions from the resulting list of discretized actions
max_depth: maximum depth of states to explore from the initial state
"""
DeterministicGymDomain.__init__(self,
gym_env=gym_env,
set_state=set_state,
get_state=get_state)
GymDiscreteActionDomain.__init__(self,
discretization_factor=discretization_factor,
branching_factor=branching_factor)
GymWidthDomain.__init__(self, continuous_feature_fidelity=continuous_feature_fidelity)
gym_env._max_episode_steps = max_depth
domain_factory = lambda: GymRIWDomain(gym_env=gym.make(ENV_NAME),
continuous_feature_fidelity=5,
discretization_factor=3,
max_depth=HORIZON)
domain = domain_factory()
domain.reset()
if RIW.check_domain(domain):
solver_factory = lambda: RIW(domain_factory=domain_factory,
state_features=lambda d, s: d.bee2_features(s),
use_state_feature_hash=False,
use_simulation_domain=True,
time_budget=200,
rollout_budget=1000,
max_depth=200,
exploration=0.25,
parallel=False,
debug_logs=False)
with solver_factory() as solver:
GymRIWDomain.solve_with(solver, domain_factory)
initial_state = solver._domain.reset()
rollout(domain, solver, from_memory=initial_state, num_episodes=1, max_steps=HORIZON-1, max_framerate=30,
outcome_formatter=lambda o: f'{o.observation} - cost: {o.value.cost:.2f}')
```
#### File: builders/domain/initialization.py
```python
from __future__ import annotations
import functools
from skdecide.core import D, Distribution, SingleValueDistribution, autocastable
__all__ = ['Initializable', 'UncertainInitialized', 'DeterministicInitialized']
class Initializable:
"""A domain must inherit this class if it can be initialized."""
@autocastable
def reset(self) -> D.T_agent[D.T_observation]:
"""Reset the state of the environment and return an initial observation.
By default, #Initializable.reset() provides some boilerplate code and internally calls #Initializable._reset()
(which returns an initial state). The boilerplate code automatically stores the initial state into the #_memory
attribute and samples a corresponding observation.
# Returns
An initial observation.
"""
return self._reset()
def _reset(self) -> D.T_agent[D.T_observation]:
"""Reset the state of the environment and return an initial observation.
By default, #Initializable._reset() provides some boilerplate code and internally
calls #Initializable._state_reset() (which returns an initial state). The boilerplate code automatically stores
the initial state into the #_memory attribute and samples a corresponding observation.
# Returns
An initial observation.
"""
initial_state = self._state_reset()
self._memory = self._init_memory(initial_state)
initial_observation = self._get_observation_distribution(initial_state).sample()
return initial_observation
def _state_reset(self) -> D.T_state:
"""Reset the state of the environment and return an initial state.
This is a helper function called by default from #Initializable._reset(). It focuses on the state level, as
opposed to the observation one for the latter.
# Returns
An initial state.
"""
raise NotImplementedError
class UncertainInitialized(Initializable):
"""A domain must inherit this class if its states are initialized according to a probability distribution known as
white-box."""
def _state_reset(self) -> D.T_state:
initial_state = self._get_initial_state_distribution().sample()
return initial_state
@autocastable
def get_initial_state_distribution(self) -> Distribution[D.T_state]:
"""Get the (cached) probability distribution of initial states.
By default, #UncertainInitialized.get_initial_state_distribution() internally
calls #UncertainInitialized._get_initial_state_distribution_() the first time and automatically caches its value
to make future calls more efficient (since the initial state distribution is assumed to be constant).
# Returns
The probability distribution of initial states.
"""
return self._get_initial_state_distribution()
@functools.lru_cache()
def _get_initial_state_distribution(self) -> Distribution[D.T_state]:
"""Get the (cached) probability distribution of initial states.
By default, #UncertainInitialized._get_initial_state_distribution() internally
calls #UncertainInitialized._get_initial_state_distribution_() the first time and automatically caches its value
to make future calls more efficient (since the initial state distribution is assumed to be constant).
# Returns
The probability distribution of initial states.
"""
return self._get_initial_state_distribution_()
def _get_initial_state_distribution_(self) -> Distribution[D.T_state]:
"""Get the probability distribution of initial states.
This is a helper function called by default from #UncertainInitialized._get_initial_state_distribution(), the
difference being that the result is not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
The probability distribution of initial states.
"""
raise NotImplementedError
class DeterministicInitialized(UncertainInitialized):
"""A domain must inherit this class if it has a deterministic initial state known as white-box."""
def _get_initial_state_distribution_(self) -> Distribution[D.T_state]:
return SingleValueDistribution(self._get_initial_state())
@autocastable
def get_initial_state(self) -> D.T_state:
"""Get the (cached) initial state.
By default, #DeterministicInitialized.get_initial_state() internally
calls #DeterministicInitialized._get_initial_state_() the first time and automatically caches its value to make
future calls more efficient (since the initial state is assumed to be constant).
# Returns
The initial state.
"""
return self._get_initial_state()
@functools.lru_cache()
def _get_initial_state(self) -> D.T_state:
"""Get the (cached) initial state.
By default, #DeterministicInitialized._get_initial_state() internally
calls #DeterministicInitialized._get_initial_state_() the first time and automatically caches its value to make
future calls more efficient (since the initial state is assumed to be constant).
# Returns
The initial state.
"""
return self._get_initial_state_()
def _get_initial_state_(self) -> D.T_state:
"""Get the initial state.
This is a helper function called by default from #DeterministicInitialized._get_initial_state(), the difference
being that the result is not cached here.
# Returns
The initial state.
"""
raise NotImplementedError
```
#### File: builders/solver/assessability.py
```python
from __future__ import annotations
from skdecide.core import D, autocastable
__all__ = ['Utilities', 'QValues']
class Utilities:
"""A solver must inherit this class if it can provide the utility function (i.e. value function)."""
@autocastable
def get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
"""Get the estimated on-policy utility of the given observation.
In mathematical terms, for a fully observable domain, this function estimates:
$$V^\\pi(s)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$ the initial state for the trajectories.
# Parameters
observation: The observation to consider.
# Returns
The estimated on-policy utility of the given observation.
"""
return self._get_utility(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
"""Get the estimated on-policy utility of the given observation.
In mathematical terms, for a fully observable domain, this function estimates:
$$V^\\pi(s)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$ the initial state for the trajectories.
# Parameters
observation: The observation to consider.
# Returns
The estimated on-policy utility of the given observation.
"""
raise NotImplementedError
class QValues(Utilities):
"""A solver must inherit this class if it can provide the Q function (i.e. action-value function)."""
@autocastable
def get_q_value(self, observation: D.T_agent[D.T_observation],
action: D.T_agent[D.T_concurrency[D.T_event]]) -> D.T_value:
"""Get the estimated on-policy Q value of the given observation and action.
In mathematical terms, for a fully observable domain, this function estimates:
$$Q^\\pi(s,a)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s,a_0=a]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$/$a_0$ the initial state/action for the
trajectories.
# Parameters
observation: The observation to consider.
action: The action to consider.
# Returns
The estimated on-policy Q value of the given observation and action.
"""
return self._get_q_value(observation, action)
def _get_q_value(self, observation: D.T_agent[D.T_observation],
action: D.T_agent[D.T_concurrency[D.T_event]]) -> D.T_value:
"""Get the estimated on-policy Q value of the given observation and action.
In mathematical terms, for a fully observable domain, this function estimates:
$$Q^\\pi(s,a)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s,a_0=a]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$/$a_0$ the initial state/action for the
trajectories.
# Parameters
observation: The observation to consider.
action: The action to consider.
# Returns
The estimated on-policy Q value of the given observation and action.
"""
raise NotImplementedError
```
#### File: domain/maze/maze.py
```python
from __future__ import annotations
from copy import deepcopy
from enum import Enum
from typing import NamedTuple, Optional, Any
import matplotlib.pyplot as plt
from skdecide import DeterministicPlanningDomain, TransitionValue, Space
from skdecide.builders.domain import UnrestrictedActions, Renderable
from skdecide.hub.space.gym import ListSpace, EnumSpace, MultiDiscreteSpace
DEFAULT_MAZE = '''
+-+-+-+-+o+-+-+-+-+-+
| | | |
+ + + +-+-+-+ +-+ + +
| | | | | | | |
+ +-+-+ +-+ + + + +-+
| | | | | | |
+ + + + + + + +-+ +-+
| | | | | |
+-+-+-+-+-+-+-+ +-+ +
| | | |
+ +-+-+-+-+ + +-+-+ +
| | | |
+ + + +-+ +-+ +-+-+-+
| | | | | |
+ +-+-+ + +-+ + +-+ +
| | | | | | | |
+-+ +-+ + + + +-+ + +
| | | | | | |
+ +-+ +-+-+-+-+ + + +
| | | | |
+-+-+-+-+-+x+-+-+-+-+
'''
class State(NamedTuple):
x: int
y: int
class Action(Enum):
up = 0
down = 1
left = 2
right = 3
class D(DeterministicPlanningDomain, UnrestrictedActions, Renderable):
T_state = State # Type of states
T_observation = T_state # Type of observations
T_event = Action # Type of events
T_value = float # Type of transition values (rewards or costs)
T_info = None # Type of additional information given as part of an environment outcome
class Maze(D):
def __init__(self, maze_str: str = DEFAULT_MAZE):
maze = []
for y, line in enumerate(maze_str.strip().split('\n')):
line = line.rstrip()
row = []
for x, c in enumerate(line):
if c in {' ', 'o', 'x'}:
row.append(1) # spaces are 1s
if c == 'o':
self._start = State(x, y)
if c == 'x':
self._goal = State(x, y)
else:
row.append(0) # walls are 0s
maze.append(row)
# self._render_maze = deepcopy(self._maze)
self._maze = maze
self._num_cols = len(maze[0])
self._num_rows = len(maze)
self._ax = None
self._image = None
def _get_next_state(self, memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]]) -> D.T_state:
if action == Action.left:
next_state = State(memory.x - 1, memory.y)
if action == Action.right:
next_state = State(memory.x + 1, memory.y)
if action == Action.up:
next_state = State(memory.x, memory.y - 1)
if action == Action.down:
next_state = State(memory.x, memory.y + 1)
# If candidate next state is valid
if 0 <= next_state.x < self._num_cols and 0 <= next_state.y < self._num_rows and self._maze[next_state.y][
next_state.x] == 1:
return next_state
else:
return memory
def _get_transition_value(self, memory: D.T_memory[D.T_state], action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None) -> D.T_agent[TransitionValue[D.T_value]]:
if next_state.x == memory.x and next_state.y == memory.y:
cost = 2 # big penalty when hitting a wall
else:
cost = abs(next_state.x - memory.x) + abs(next_state.y - memory.y) # every move costs 1
return TransitionValue(cost=cost)
def _is_terminal(self, state: D.T_state) -> bool:
return self._is_goal(state)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return EnumSpace(Action)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
return ListSpace([self._goal])
def _get_initial_state_(self) -> D.T_state:
return self._start
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self._num_cols, self._num_rows])
def _render_from(self, memory: D.T_memory[D.T_state], **kwargs: Any) -> Any:
if self._ax is None:
# fig = plt.gcf()
fig, ax = plt.subplots(1)
fig.canvas.set_window_title('Maze')
# ax = plt.axes()
ax.set_aspect('equal') # set the x and y axes to the same scale
plt.xticks([]) # remove the tick marks by setting to an empty list
plt.yticks([]) # remove the tick marks by setting to an empty list
ax.invert_yaxis() # invert the y-axis so the first row of data is at the top
self._ax = ax
plt.ion()
maze = deepcopy(self._maze)
maze[self._goal.y][self._goal.x] = 0.7
maze[memory.y][memory.x] = 0.3
if self._image is None:
self._image = self._ax.imshow(maze)
else:
self._image.set_data(maze)
# self._ax.pcolormesh(maze)
# plt.draw()
plt.pause(0.001)
```
#### File: domain/simple_grid_world/simple_grid_world.py
```python
from __future__ import annotations
from enum import Enum
from typing import NamedTuple, Optional
from skdecide import DeterministicPlanningDomain, TransitionValue, Space
from skdecide.builders.domain import UnrestrictedActions
from skdecide.hub.space.gym import ListSpace, EnumSpace, MultiDiscreteSpace
class State(NamedTuple):
x: int
y: int
class Action(Enum):
up = 0
down = 1
left = 2
right = 3
class D(DeterministicPlanningDomain, UnrestrictedActions):
T_state = State # Type of states
T_observation = T_state # Type of observations
T_event = Action # Type of events
T_value = float # Type of transition values (rewards or costs)
T_info = None # Type of additional information given as part of an environment outcome
class SimpleGridWorld(D):
def __init__(self, num_cols=10, num_rows=10):
self.num_cols = num_cols
self.num_rows = num_rows
def _get_next_state(self, memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]]) -> D.T_state:
if action == Action.left:
next_state = State(max(memory.x - 1, 0), memory.y)
if action == Action.right:
next_state = State(min(memory.x + 1, self.num_cols - 1), memory.y)
if action == Action.up:
next_state = State(memory.x, max(memory.y - 1, 0))
if action == Action.down:
next_state = State(memory.x, min(memory.y + 1, self.num_rows - 1))
return next_state
def _get_transition_value(self, memory: D.T_memory[D.T_state], action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None) -> D.T_agent[TransitionValue[D.T_value]]:
if next_state.x == memory.x and next_state.y == memory.y:
cost = 2 # big penalty when hitting a wall
else:
cost = abs(next_state.x - memory.x) + abs(next_state.y - memory.y) # every move costs 1
return TransitionValue(cost=cost)
def _is_terminal(self, state: D.T_state) -> bool:
return self._is_goal(state)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return EnumSpace(Action)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
return ListSpace([State(x=self.num_cols - 1, y=self.num_rows - 1)])
def _get_initial_state_(self) -> D.T_state:
return State(x=0, y=0)
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self.num_cols, self.num_rows])
```
#### File: solver/ars/ars.py
```python
from __future__ import annotations
import numpy as np
import gym
from typing import Callable
from collections.abc import Iterable
from skdecide import Domain, Solver
from skdecide.hub.solver.cgp import cgp
from skdecide.builders.solver import Policies, Restorable
from skdecide.builders.domain import SingleAgent, Sequential, Environment, UnrestrictedActions, Initializable, History, \
PartiallyObservable, Rewards
class D(Domain, SingleAgent, Sequential, Environment, UnrestrictedActions, Initializable, History, PartiallyObservable,
Rewards):
pass
#for normalizing states
class Normalizer():
def __init__(self, nb_inputs):
self.n = np.zeros(nb_inputs)
self.mean = np.zeros(nb_inputs)
self.mean_diff = np.zeros(nb_inputs)
self.var = np.zeros(nb_inputs)
def observe(self, x):
self.n += 1.
last_mean = self.mean.copy()
self.mean += (x - self.mean) / self.n
self.mean_diff += (x - last_mean) * (x - self.mean)
self.var = (self.mean_diff / self.n).clip(min=1e-2)
def normalize(self, inputs):
if self.n[0] <= 1:
return inputs
obs_mean = self.mean
obs_std = np.sqrt(self.var)
return (inputs - obs_mean) / obs_std
def flatten(c):
"""
Generator flattening the structure
"""
for x in c:
if isinstance(x, str) or not isinstance(x, Iterable):
yield x
else:
yield from flatten(x)
class AugmentedRandomSearch(Solver, Policies, Restorable):
T_domain = D
def __init__(self,
n_epochs=1000,
epoch_size=1000,
directions = 10,
top_directions = 3,
learning_rate = 0.02,
policy_noise = 0.03,
reward_maximization = True
) -> None:
self.env = None
self.n_epochs = n_epochs
self.learning_rate = learning_rate
self.epoch_size = epoch_size
self.directions = directions
self.top_directions = top_directions
self.policy = None
self.policy_noise = policy_noise
self.reward_maximization = reward_maximization
assert self.top_directions <= self.directions
def evaluate_policy(self, state, delta=None, direction=None):
if direction is None:
return self.policy.dot(state)
elif direction == "positive":
return (self.policy + self.policy_noise * delta).dot(state)
else:
return (self.policy - self.policy_noise * delta).dot(state)
def explore(self, normalizer, direction=None, delta=None):
state = self.env.reset()
done = False
num_plays = 0.
sum_rewards = 0
while not done and num_plays < self.epoch_size:
state = cgp.norm_and_flatten(state, self.env.get_observation_space().unwrapped())
action = self.evaluate_policy(state, delta, direction)
action = cgp.denorm(action, self.env.get_action_space().unwrapped())
state, transition_value, done, _ = self.env.step(action).astuple()
reward = transition_value[0]
reward = max(min(reward, 1), -1)
if not np.isnan(reward):
sum_rewards += reward
num_plays += 1
return sum_rewards
def update_policy(self, rollouts, sigma_r):
step = np.zeros(self.policy.shape)
for r_pos, r_neg, d in rollouts:
step += (r_pos - r_neg) * d
if self.top_directions == 0 or sigma_r == 0:
return
self.policy += self.learning_rate / (self.top_directions * sigma_r) * step
def get_dimension_space(self, space):
if isinstance(space, gym.spaces.Tuple):
dim = 0
for element in space:
dim += self.get_dimension_space(element)
return dim
elif isinstance(space, gym.spaces.Discrete):
return 1
else:
return space.shape[0]
def generate_perturbations(self, space):
if isinstance(space, gym.spaces.Tuple):
perturbations = []
for element in space:
perturbations += self.generate_perturbations(element)
return perturbations
if isinstance(space, gym.spaces.Discrete):
return 2*np.random.random_integers(space.n) / space.n -1
else:
return 2*np.random.random_sample()-1
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self.env = domain_factory()
np.random.seed(0)
input_size = self.get_dimension_space(self.env.get_observation_space().unwrapped())
output_size = self.get_dimension_space(self.env.get_action_space().unwrapped())
self.policy = np.zeros((output_size, input_size))
normalizer = Normalizer(input_size)
for step in range(self.n_epochs):
# Initializing the perturbations deltas and the positive/negative rewards
deltas = [2* np.random.random_sample(self.policy.shape)-1 for _ in range(self.directions)]
positive_rewards = [0] * self.directions
negative_rewards = [0] * self.directions
# Getting the positive rewards in the positive directions
for k in range(self.directions):
positive_rewards[k] = self.explore(normalizer, direction="positive", delta=deltas[k])
# Getting the negative rewards in the negative/opposite directions
for k in range(self.directions):
negative_rewards[k] = self.explore(normalizer, direction="negative", delta=deltas[k])
# Gathering all the positive/negative rewards to compute the standard deviation of these rewards
all_rewards = np.array(positive_rewards + negative_rewards)
sigma_r = all_rewards.std()
# Sorting the rollouts by the max(r_pos, r_neg) and selecting the best directions
scores = {k: max(r_pos, r_neg) for k, (r_pos, r_neg) in enumerate(zip(positive_rewards, negative_rewards))}
order = sorted(scores.keys(), key=lambda x: scores[x], reverse=self.reward_maximization)[:self.top_directions]
rollouts = [(positive_rewards[k], negative_rewards[k], deltas[k]) for k in order]
# Updating our policy
self.update_policy(rollouts, sigma_r)
# Printing the final reward of the policy after the update
reward_evaluation = self.explore(normalizer)
print('Step:', step, 'Reward:', reward_evaluation, 'Policy', self.policy)
print('Final Reward:', reward_evaluation, 'Policy', self.policy)
def _sample_action(self, observation: D.T_agent[D.T_observation]) -> D.T_agent[D.T_concurrency[D.T_event]]:
#print('observation', observation, 'Policy', self.policy)
action = self.policy.dot(cgp.norm_and_flatten(observation, self.env.get_observation_space().unwrapped()))
action = cgp.denorm(action, self.env.get_action_space().unwrapped())
return action
```
#### File: cgp/pycgp/cgpes.py
```python
import os
import numpy as np
from .cgp import CGP
from .evaluator import Evaluator
from joblib import Parallel, delayed
class CGPES:
def __init__(self, num_offsprings, mutation_rate_nodes, mutation_rate_outputs, father, evaluator, folder='genomes', num_cpus = 1):
self.num_offsprings = num_offsprings
self.mutation_rate_nodes = mutation_rate_nodes
self.mutation_rate_outputs = mutation_rate_outputs
self.father = father
#self.num_mutations = int(len(self.father.genome) * self.mutation_rate)
self.evaluator = evaluator
self.num_cpus = num_cpus
self.folder = folder
if self.num_cpus > 1:
self.evaluator_pool = []
for i in range(self.num_offsprings):
self.evaluator_pool.append(self.evaluator.clone())
def run(self, num_iteration):
if not os.path.isdir(self.folder):
os.mkdir(self.folder)
self.logfile = open(self.folder + '/out.txt', 'w')
self.current_fitness = self.evaluator.evaluate(self.father, 0)
self.father.save(self.folder + '/cgp_genome_0_' + str(self.current_fitness) + '.txt')
self.offsprings = np.empty(self.num_offsprings, dtype=CGP)
self.offspring_fitnesses = np.zeros(self.num_offsprings, dtype=float)
for self.it in range(1, num_iteration + 1):
#generate offsprings
if self.num_cpus == 1:
for i in range(0, self.num_offsprings):
self.offsprings[i] = self.father.clone()
#self.offsprings[i].mutate(self.num_mutations)
self.offsprings[i].mutate_per_gene(self.mutation_rate_nodes, self.mutation_rate_outputs)
self.offspring_fitnesses[i] = self.evaluator.evaluate(self.offsprings[i], self.it)
else:
for i in range(self.num_offsprings):
self.offsprings[i] = self.father.clone()
#self.offsprings[i].mutate(self.num_mutations)
self.offsprings[i].mutate_per_gene(self.mutation_rate_nodes, self.mutation_rate_outputs)
def offspring_eval_task(offspring_id):
return self.evaluator_pool[offspring_id].evaluate(self.offsprings[offspring_id], self.it)
self.offspring_fitnesses = Parallel(n_jobs = self.num_cpus)(delayed(offspring_eval_task)(i) for i in range(self.num_offsprings))
#get the best fitness
best_offspring = np.argmax(self.offspring_fitnesses)
#compare to father
self.father_was_updated = False
if self.offspring_fitnesses[best_offspring] >= self.current_fitness:
self.current_fitness = self.offspring_fitnesses[best_offspring]
self.father = self.offsprings[best_offspring]
self.father_was_updated = True
# display stats
print(self.it, '\t', self.current_fitness, '\t', self.father_was_updated, '\t', self.offspring_fitnesses)
self.logfile.write(str(self.it) + '\t' + str(self.current_fitness) + '\t' + str(self.father_was_updated) + '\t' + str(self.offspring_fitnesses) + '\n')
self.logfile.flush()
print('====================================================')
if self.father_was_updated:
#print(self.father.genome)
self.father.save(self.folder + '/cgp_genome_' + str(self.it) + '_' + str(self.current_fitness) + '.txt')
```
|
{
"source": "jeromerony/adversarial-library",
"score": 2
}
|
#### File: utils/lagrangian_penalties/test_penalty_functions.py
```python
import pytest
import torch
from torch.autograd import grad, gradcheck
from adv_lib.utils.lagrangian_penalties import all_penalties
@pytest.mark.parametrize('penalty', list(all_penalties.values()))
def test_grad(penalty) -> None:
y = torch.randn(512, dtype=torch.double, requires_grad=True)
ρ = torch.randn(512, dtype=torch.double).abs().clamp_min(1e-3)
μ = torch.randn(512, dtype=torch.double).abs().clamp_min(1e-6)
ρ.requires_grad_(True)
μ.requires_grad_(True)
# check if gradients are correct compared to numerical approximations using finite differences
assert gradcheck(penalty, inputs=(y, ρ, μ))
@pytest.mark.parametrize('penalty,value', [(all_penalties['P2'], 1), (all_penalties['P3'], 1)])
@pytest.mark.parametrize('dtype', [torch.float32, torch.float64])
def test_nan_grad(penalty, value, dtype) -> None:
y = torch.full((1,), value, dtype=dtype, requires_grad=True)
ρ = torch.full((1,), value, dtype=dtype)
μ = torch.full((1,), value, dtype=dtype)
out = penalty(y, ρ, μ)
g = grad(out, y, only_inputs=True)[0]
assert torch.isnan(g).any() == False # check nan in gradients of penalty
```
#### File: utils/lagrangian_penalties/test_univariate_functions.py
```python
import pytest
import torch
from torch.autograd import grad, gradcheck
from adv_lib.utils.lagrangian_penalties import univariate_functions
@pytest.mark.parametrize('univariate', univariate_functions.__all__)
def test_grad(univariate) -> None:
t = torch.randn(512, dtype=torch.double, requires_grad=True)
# check if gradients are correct compared to numerical approximations using finite differences
assert gradcheck(univariate_functions.__dict__[univariate](), inputs=t)
@pytest.mark.parametrize('univariate,value', [('LogExp', 1), ('LogQuad_1', 1), ('HyperExp', 1), ('HyperQuad', 1),
('LogBarrierQuad', 0), ('HyperBarrierQuad', 0), ('HyperLogQuad', 0),
('HyperLogQuad', 1)])
@pytest.mark.parametrize('dtype', [torch.float32, torch.float64])
def test_nan_grad(univariate, value, dtype) -> None:
t = torch.full((1,), value, dtype=dtype, requires_grad=True)
univariate_func = univariate_functions.__dict__[univariate]()
out = univariate_func(t)
g = grad(out, t, only_inputs=True)[0]
assert torch.isnan(g).any() == False # check nan in gradients of penalty
```
|
{
"source": "jeromerony/augmented_lagrangian_adversarial_attacks",
"score": 2
}
|
#### File: augmented_lagrangian_adversarial_attacks/attacks/foolbox.py
```python
from foolbox.attacks import EADAttack
from foolbox.criteria import Misclassification, TargetedMisclassification
from foolbox.models import PyTorchModel
from torch import nn, Tensor
def ead_attack(model: nn.Module,
inputs: Tensor,
labels: Tensor,
targeted: bool = False,
**kwargs) -> Tensor:
fmodel = PyTorchModel(model=model, bounds=(0, 1))
attack = EADAttack(**kwargs)
if targeted:
criterion = TargetedMisclassification(target_classes=labels),
else:
criterion = Misclassification(labels=labels)
adv_inputs = attack(model=fmodel, inputs=inputs, criterion=criterion, epsilons=None)[0]
return adv_inputs
```
#### File: jeromerony/augmented_lagrangian_adversarial_attacks/utils.py
```python
from typing import Tuple
from torch import Tensor
def robust_accuracy_curve(distances: Tensor,
successes: Tensor,
worst_distance: float = float('inf')) -> Tuple[Tensor, Tensor]:
worst_case_distances = distances.clone()
worst_case_distances[~successes] = worst_distance
unique_distances = worst_case_distances.unique()
robust_accuracies = (worst_case_distances.unsqueeze(0) > unique_distances.unsqueeze(1)).float().mean(1)
return unique_distances, robust_accuracies
```
|
{
"source": "jeromerony/dml_cross_entropy",
"score": 3
}
|
#### File: jeromerony/dml_cross_entropy/prepare_data.py
```python
import os
import tarfile
import zipfile
from os import path
from sacred import Experiment
from scipy.io import loadmat
from torchvision.datasets.utils import download_url
ex1 = Experiment('Prepare CUB')
@ex1.config
def config():
cub_dir = path.join('data', 'CUB_200_2011')
cub_url = 'http://www.vision.caltech.edu.s3-us-west-2.amazonaws.com/visipedia-data/CUB-200-2011/CUB_200_2011.tgz'
images_file = 'images.txt'
train_file = 'train.txt'
test_file = 'test.txt'
@ex1.capture
def download_extract_cub(cub_dir, cub_url):
download_url(cub_url, root=path.dirname(cub_dir))
filename = path.join(path.dirname(cub_dir), path.basename(cub_url))
with tarfile.open(filename, 'r:gz') as tar:
tar.extractall(path=path.dirname(cub_dir))
@ex1.capture
def generate_cub_train_test(cub_dir, images_file, train_file, test_file):
images_file = path.join(cub_dir, images_file)
train_file = path.join(cub_dir, train_file)
test_file = path.join(cub_dir, test_file)
train = []
test = []
with open(images_file) as f_images:
lines_images = f_images.read().splitlines()
for line in lines_images:
image_path = line.split()[1]
label = int(image_path.split('.')[0]) - 1
file_line = ','.join((path.join('images', image_path), str(label)))
if label < 100:
train.append(file_line)
else:
test.append(file_line)
with open(train_file, 'w') as f:
f.write('\n'.join(train))
with open(test_file, 'w') as f:
f.write('\n'.join(test))
@ex1.main
def prepare_cub():
download_extract_cub()
generate_cub_train_test()
ex2 = Experiment('Prepare CARS-196')
@ex2.config
def config():
cars_dir = path.join('data', 'CARS_196')
cars_url = 'http://imagenet.stanford.edu/internal/car196/car_ims.tgz'
cars_annotations_url = 'http://imagenet.stanford.edu/internal/car196/cars_annos.mat'
train_file = 'train.txt'
test_file = 'test.txt'
@ex2.capture
def download_extract_cars(cars_dir, cars_url, cars_annotations_url):
download_url(cars_annotations_url, root=cars_dir)
download_url(cars_url, root=cars_dir)
filename = path.join(cars_dir, path.basename(cars_url))
with tarfile.open(filename, 'r:gz') as tar:
tar.extractall(path=cars_dir)
return path.join(cars_dir, path.basename(cars_annotations_url))
@ex2.capture
def generate_cars_train_test(cars_dir, annotation_file, train_file, test_file):
train_file = path.join(cars_dir, train_file)
test_file = path.join(cars_dir, test_file)
train = []
test = []
annotations = loadmat(annotation_file)
label_dict = {anno[0][0]: anno[5][0][0] - 1 for anno in annotations['annotations'][0]}
for image_path, label in label_dict.items():
file_line = ','.join((image_path, str(label)))
if label < 98:
train.append(file_line)
else:
test.append(file_line)
with open(train_file, 'w') as f:
f.write('\n'.join(train))
with open(test_file, 'w') as f:
f.write('\n'.join(test))
@ex2.main
def prepare_cars():
annotation_file = download_extract_cars()
generate_cars_train_test(annotation_file=annotation_file)
ex3 = Experiment('Prepare SOP')
@ex3.config
def config():
sop_dir = path.join('data', 'Stanford_Online_Products')
sop_url = 'ftp://cs.stanford.edu/cs/cvgl/Stanford_Online_Products.zip'
train_file = 'train.txt'
test_file = 'test.txt'
@ex3.capture
def download_extract_sop(sop_dir, sop_url):
download_url(sop_url, root=path.dirname(sop_dir))
filename = path.join(path.dirname(sop_dir), path.basename(sop_url))
with zipfile.ZipFile(filename) as zipf:
zipf.extractall(path=path.dirname(sop_dir))
@ex3.capture
def generate_sop_train_test(sop_dir, train_file, test_file):
original_train_file = path.join(sop_dir, 'Ebay_train.txt')
original_test_file = path.join(sop_dir, 'Ebay_test.txt')
train_file = path.join(sop_dir, train_file)
test_file = path.join(sop_dir, test_file)
with open(original_train_file) as f_images:
train_lines = f_images.read().splitlines()[1:]
with open(original_test_file) as f_images:
test_lines = f_images.read().splitlines()[1:]
train = [','.join((l.split()[-1], str(int(l.split()[1]) - 1))) for l in train_lines]
test = [','.join((l.split()[-1], str(int(l.split()[1]) - 1))) for l in test_lines]
with open(train_file, 'w') as f:
f.write('\n'.join(train))
with open(test_file, 'w') as f:
f.write('\n'.join(test))
@ex3.main
def prepare_sop():
download_extract_sop()
generate_sop_train_test()
ex4 = Experiment('Prepare InShop')
@ex4.config
def config():
inshop_dir = path.join('data', 'InShop')
train_file = 'train.txt'
test_query_file = 'test_query.txt'
test_gallery_file = 'test_gallery.txt'
@ex4.main
def generate_inshop_train_test(inshop_dir, train_file, test_query_file, test_gallery_file):
"""
The data needs to be downloaded and extracted manually for InShop at
https://drive.google.com/drive/folders/0B7EVK8r0v71pVDZFQXRsMDZCX1E.
Specifically, the img.zip and list_eval_partition.txt files.
"""
original_sample_file = path.join(inshop_dir, 'list_eval_partition.txt')
train_file = path.join(inshop_dir, train_file)
test_query_file = path.join(inshop_dir, test_query_file)
test_gallery_file = path.join(inshop_dir, test_gallery_file)
with open(original_sample_file) as f:
sample_lines = f.read().splitlines()[2:]
sample_lines = [l.split() for l in sample_lines]
class_ids = [class_id for (_, class_id, _) in sample_lines]
class_map, max_class_index = {}, 0
for class_id in class_ids:
if class_id not in class_map.keys():
class_map[class_id] = max_class_index
max_class_index += 1
train_samples = [(l[0], class_map[l[1]]) for l in sample_lines if l[2] == 'train']
test_query_samples = [(l[0], class_map[l[1]]) for l in sample_lines if l[2] == 'query']
test_gallery_samples = [(l[0], class_map[l[1]]) for l in sample_lines if l[2] == 'gallery']
train = [','.join((l[0], str(l[1]))) for l in train_samples]
test_query = [','.join((l[0], str(l[1]))) for l in test_query_samples]
test_gallery = [','.join((l[0], str(l[1]))) for l in test_gallery_samples]
with open(train_file, 'w') as f:
f.write('\n'.join(train))
with open(test_query_file, 'w') as f:
f.write('\n'.join(test_query))
with open(test_gallery_file, 'w') as f:
f.write('\n'.join(test_gallery))
if __name__ == '__main__':
os.makedirs('data', exist_ok=True)
ex1.run()
ex2.run()
ex3.run()
ex4.run()
```
|
{
"source": "Jeromeschmidt/CS-1.3-Core-Data-Structures",
"score": 4
}
|
#### File: CS-1.3-Core-Data-Structures/Code/set.py
```python
from binarytree import BinarySearchTree
class Set:
def __init__(self, elements=None):
self.tree = BinarySearchTree()
self.size = 0
if elements is not None:
for elm in elements:
self.add(elm)
def size(self):
return self.tree.size
def __iter__(self):
return self
def contains(self, element):
if self.tree.contains(element):
return True
return False
def add(self, element):
if self.contains(element):
raise ValueError("Cannot add element to Set again")
else:
self.tree.insert(element)
self.size += 1
def remove(self, element):
self.tree.delete(element)
self.size -= 1
def union(self, other_set):
"""TODO: Running time: O(n*k), have to visit every node
TODO: Memory usage: O(n+k) nodes are stored on stack"""
result = self.tree.items_in_order()
for elm in other_set.tree.items_in_order():
if elm not in result:
result.append(elm)
return Set(result)
def intersection(self, other_set):
"""TODO: Running time: O(n), have to visit every node
TODO: Memory usage: O(n+k) nodes are stored on stack"""
result = Set()
for elm in self.tree.items_in_order():
if other_set.contains(elm):
result.add(elm)
return result
def difference(self, other_set):
"""TODO: Running time: O(n), have to visit every node
TODO: Memory usage: O(n+k) nodes are stored on stack"""
result = Set()
for elm in self.tree.items_in_order():
if not other_set.contains(elm):
result.add(elm)
for elm in other_set.tree.items_in_order():
if elm in result.tree.items_in_order():
result.remove(elm)
return result
def is_subset(self, other_set):
"""TODO: Running time: O(n) worst, O(1 best), have to visit every node
TODO: Memory usage: O(n+k) nodes are stored on stack"""
if self.size > other_set.size:
return False
for elm in self.tree.items_in_order():
if not other_set.tree.contains(elm):
return False
return True
```
|
{
"source": "Jeromeschmidt/Guru",
"score": 2
}
|
#### File: guru/users/models.py
```python
from django.contrib.auth.models import AbstractUser
from django.db.models import (BooleanField, CASCADE, CharField, FloatField,
IntegerField, ManyToManyField, Model,
OneToOneField, PositiveSmallIntegerField)
from django.contrib.postgres.fields import ArrayField
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = CharField(_("Name of User"), blank=True, max_length=255)
# is_customer = BooleanField(default=True) #
# user = OneToOneField(User, on_delete=CASCADE, primary_key=True)
skills = ArrayField(CharField(max_length=10, blank=True),
size=8, null=True,
)
# ArrayField(_("A list of skills that user can help with"), null=True,
# base_field=CharField(max_length=255))
classes_taken = ArrayField(null=True,
base_field=CharField(max_length=255),
size=20)
is_teachingassistant = BooleanField(default=False)
rating = IntegerField(null=True, blank=True)
avg_reponse = FloatField(null=True, blank=True)
is_online = BooleanField(default=False)
messages_received = IntegerField(null=True, blank=True)
bio = CharField(blank=True, max_length=500)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
```
|
{
"source": "Jeromeschmidt/Reinforcement-Learning-Agent",
"score": 3
}
|
#### File: alpaca-api/streaming-market-data/stream_ticks_into_sql.py
```python
import websocket
import os
import json
import sqlite3
import datetime as dt
endpoint = "wss://data.alpaca.markets/stream"
headers = json.loads(open("account.json", 'r').read())
streams = ["T.PLTR", "T.NIO", "T.AMZN", "T.TWTR", "Q.CELH", "Q.SSPK", "T.OPTT"]
# create a database
db1 = sqlite3.connect('/Users/andrewilliams/Documents/Dev/Alpaca-Api/streaming-market-data/trades_ticks.db')
db2 = sqlite3.connect('/Users/andrewilliams/Documents/Dev/Alpaca-Api/streaming-market-data/quotes_ticks.db')
def return_tickers(streams, tick_type="trades"):
tickers = []
if tick_type == 'quotes':
for symbol in streams:
tt, ticker = symbol.split(".")
if tt == 'Q' and ticker not in tickers:
tickers.append(ticker)
elif tick_type == 'quotes':
for symbol in streams:
t, ticker = symbol.split(".")
if t == 'T' and ticker not in tickers:
tickers.append(ticker)
return tickers
def insert_tickers(tick):
if tick["stream"].split(".")[0] == "T":
c = db1.cursor()
for ms in range(100):
# add a milisecond value to a data timestep
try:
table = tick["stream"].split(".")[-1]
# insert unique time stamps into sql by converting nano seconds to seconds
vals = [dt.datetime.fromtimestamp(int(tick['data']['t'])/10**9)+dt.timedelta(milliseconds=ms), tick['data']['p'], tick['data']['s']]
query = "INSERT INTO t{} (timestamp, price, volume) VALUES (?,?,?)".format(table)
c.execute(query, vals)
break
except Exception as e:
print(e)
try:
db1.commit()
except:
db1.rollback()
if tick["stream"].split(".")[0] == "Q":
c = db2.cursor()
for ms in range(100):
# add a milisecond value to a data timestep
try:
table = tick["stream"].split(".")[-1]
# insert unique time stamps into sql by converting nano seconds to seconds
vals = [dt.datetime.fromtimestamp(int(tick['data']['t'])/10**9)+dt.timedelta(milliseconds=ms), tick['data']['p'], tick['data']['P'], tick['data']['s'], tick['data']['S']]
query = "INSERT INTO q{} (timestamp, bid_price, ask_price, bid_volume, ask_volume) VALUES (?,?,?)".format(table)
c.execute(query, vals)
break
except Exception as e:
print(e)
try:
db2.commit()
except:
db2.rollback()
def create_tables(db, tickers, tick_type):
'''Creates db tables from real-time market stream'''
# to make changes to db
c = db.cursor()
if tick_type == 'trades':
for ticker in tickers:
c.execute("CREATE TABLE IF NOT EXISTS t{} (timestamp datetime primary key, price real(15, 5), volume integer)".format(ticker))
elif tick_type == 'quotes':
for ticker in tickers:
c.execute("CREATE TABLE IF NOT EXISTS t{} (timestamp datetime primary key, ask_price real(15, 5), bid_price real(15, 5), ask_volume integer, bid_volume integer)".format(ticker))
try:
# add to db
db.commit()
# if there's a problem
except:
# show any problems w/ change
db.rollback()
create_tables(db1, return_tickers(streams, "trades"), "trades")
create_tables(db2, return_tickers(streams, "trades"), "trades")
def on_open(ws):
auth = {
"action": "authenticate",
"data": {"key_id":headers['APCA-API-KEY-ID'], "secret_key": headers['APCA-API-SECRET']}
}
ws.send(json.dumps(auth))
message = {
"action": "listen",
"data":
{
"streams": streams
}
}
def on_message(ws, message):
'''Adds tick data to api '''
print(message)
tick = json.loads(message)
insert_tickers(tick)
ws = websocket.WebSocketApp("wss://data.alpaca.markets/stream", on_open=on_open, on_message=on_message)
ws.run_forever()
```
#### File: Reinforcement-Learning-Agent/trading_agent/run_DRL.py
```python
import pandas as pd
import numpy as np
import time
from stable_baselines.common.vec_env import DummyVecEnv
# preprocessor
from preprocessing.preprocessors import *
from preprocessing.alpaca_api import *
from preprocessing.GetStocks import *
# config
from config.config import *
# model
from model.models import *
import os
def run_model(tickers, start="2020-01-01T09:30:00-04:00", end="2020-12-31T09:30:00-04:00") -> None:
"""Train the model."""
# # read and preprocess data
# preprocessed_path = "done_data.csv"
# if os.path.exists(preprocessed_path):
# data = pd.read_csv(preprocessed_path, index_col=0)
# else:
# data = preprocess_data()
# data = calcualte_adjcp(data)
# data = add_turbulence(data)
# data.to_csv(preprocessed_path)
# tickers = get_highest_movers()
# print(tickers)
data = preprocess_data(tickers, start, end)
data = data.drop_duplicates()
# data = calcualte_adjcp(data)
print(data)
# data = add_turbulence(data)
# data.to_csv(preprocessed_path)
# print(data.head())
# print(data.size)
# 2015/10/01 is the date that validation starts
# 2016/01/01 is the date that real trading starts
# unique_trade_date needs to start from 2015/10/01 for validation purpose
# unique_trade_date = data[(data.datadate > 20151001)&(data.datadate <= 20200707)].datadate.unique()
# end = data["datadate"].max()
# start = end - 10000
unique_trade_date = data[(data.datadate > 20200631)&(data.datadate <= 20201231)].datadate.unique()
# print(unique_trade_date)
# rebalance_window is the number of months to retrain the model
# validation_window is the number of months to validation the model and select for trading
# rebalance_window = 63
# validation_window = 63
rebalance_window = 30
validation_window = 30
# print(data)
## Ensemble Strategy
model = run_ensemble_strategy(df=data,
unique_trade_date= unique_trade_date,
rebalance_window = rebalance_window,
validation_window=validation_window)
#_logger.info(f"saving model version: {_version}")
return model
if __name__ == "__main__":
tickers = ['AMCR', 'CCL', 'ETSY', 'OXY', 'NCLH', 'FLS', 'SIVB', 'V', 'FANG', 'DG', 'MCHP', 'ENPH', 'MRO', 'BBY', 'CB', 'APA', 'DISCK', 'XRX', 'NKE', 'DISCA']
run_model(tickers,start="2020-01-01T09:30:00-04:00", end="2020-12-31T09:30:00-04:00")
```
|
{
"source": "Jeromeschmidt/SPD-2.31-Testing-and-Architecture",
"score": 3
}
|
#### File: pytest/pytest-tut/exercise_2.py
```python
import math
import pytest
T_HALF = 5730
DECAY_CONSTANT = -0.693
def get_age_carbon_14_dating(carbon_14_ratio):
"""Returns the estimated age of the sample in year.
carbon_14_ratio: the percent (0 < percent < 1) of carbon-14
in the sample conpared to the amount in living
tissue (unitless).
"""
if carbon_14_ratio <= 0:
raise ValueError
return math.log(carbon_14_ratio) / DECAY_CONSTANT * T_HALF
# TODO: Write a unit test which feed 0.35 to the function.
# The result should be '8680.34'. Does the function handles
# every possible input correctly? What if the input is zero
# or negative?
# Add the necessary logic to make sure the function handle
# every possible input properly. Then write a unit test againt
# this special case.
def test_carbon_dating():
assert round(get_age_carbon_14_dating(0.35), 2) == 8680.35
with pytest.raises(ValueError):
get_age_carbon_14_dating(0)
with pytest.raises(ValueError):
get_age_carbon_14_dating(-0.35)
```
#### File: lab/refactoring/extract_class2.py
```python
class Actor:
def __init__(self, first_name, last_name, birth_year, movies, email):
self.first_name = first_name
self.last_name = last_name
self.birth_year = birth_year
self.movies = movies
self.email = email
def send_hiring_email(self):
print("email sent to: ", self.email)
actor1 = Actor('elizabeth', 'debicki', 1990, ['Tenet', 'Vita & Virgina', 'Guardians of the Galexy', 'The Great Gatsby'], '<EMAIL>')
actor2 = Actor('Jim', 'Carrey', 1962, ['Ace Ventura', 'The Mask', 'Dubm and Dumber', 'The Truman Show', 'Yes Man'], '<EMAIL>')
actors = [actor1, actor2]
for actor in actors:
if actor.birth_year > 1985:
print(actor.first_name, actor.last_name)
print('Movies Played: ', end='')
for m in actor.movies:
print(m, end=', ')
print()
actor.send_hiring_email()
```
#### File: lab/refactoring/extract_method3.py
```python
import math
def calcualte_distance(x_1, x_2, y_1, y_2):
"""Calculate the distance between the two circle."""
distance = math.sqrt((x_1-x_2)**2 + (y_1 - y_2)**2)
print('distance', distance)
def calcualte_length(x_1, x_2, y_1, y_2):
"""calcualte the length of vector AB vector which is a vector between A and B points."""
length = math.sqrt((x_1-x_2)*(x_1-x_2) + (y_1-y_2)*(y_1-y_2))
print('length', length)
calcualte_distance(4, 53, 4.25, -5.35)
calcualte_length(-36, .34, 97, .91)
```
|
{
"source": "jeromeshan/mimas",
"score": 3
}
|
#### File: mimas/tests/cube_generator_test.py
```python
import unittest
from pbgca import CubeGenerator
import numpy as np
class CubeGeneratorTestCase(unittest.TestCase):
def test_2d(self):
"""Test 2d cube generation"""
cube = CubeGenerator.n_dim_cube(2)
self.assertEqual(cube.tolist(), np.transpose(np.array([[ -0.5, -0.5, 0.5, 0.5], [ -0.5, 0.5, 0.5, -0.5]])).tolist())
def test_3d(self):
"""Test 3d cube generation"""
cube = CubeGenerator.n_dim_cube(3)
self.assertEqual(cube.tolist(), [[-0.5, -0.5, -0.5],[-0.5, 0.5, -0.5],[ 0.5, 0.5, -0.5],[ 0.5, -0.5, -0.5],[-0.5, -0.5, 0.5],[-0.5, 0.5, 0.5],[ 0.5, 0.5, 0.5],[ 0.5, -0.5, 0.5]])
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jerome-t/nxos-scp-upload",
"score": 2
}
|
#### File: jerome-t/nxos-scp-upload/scp-multi-upload.py
```python
from getpass import getpass
from argparse import ArgumentParser
import csv
import os.path
import sys
from time import time
from concurrent.futures import ProcessPoolExecutor, wait
from netmiko import ConnectHandler, file_transfer
from netmiko.ssh_exception import NetMikoAuthenticationException, NetMikoTimeoutException
from paramiko.ssh_exception import AuthenticationException
# --- Define the threads
MAX_THREADS = 8
# --- Check file exists function
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return(arg)
# --- Confirmation function
def confirm(prompt=None, resp=False):
if prompt is None:
prompt = 'Confirm'
if resp:
prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')
else:
prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')
while True:
ans = input(prompt)
if not ans:
return resp
if ans not in ['y', 'Y', 'n', 'N']:
print ('please enter y or n.')
continue
if ans == 'y' or ans == 'Y':
return True
if ans == 'n' or ans == 'N':
return False
# --- Upload Netmiko function
def upload_nemiko(net_device):
print("Upload on:", HOST)
# Create the Netmiko SSH connection
try:
ssh_conn = ConnectHandler(**net_device)
transfer_dict = {}
transfer_dict = file_transfer(ssh_conn,
source_file=SOURCE_FILE,
dest_file=SOURCE_FILE,
)
print(80*"=")
print('Results for', HOST+':')
print('File exists already: ',transfer_dict['file_exists'])
print('File transferred: ',transfer_dict['file_transferred'])
print('MD5 verified :',transfer_dict['file_verified'])
except NetMikoTimeoutException:
print(80*"=")
print('Results for', HOST+':')
print('Skipped: SSH Timed out')
#continue
except (AuthenticationException, NetMikoAuthenticationException):
print(80*"=")
print('Results for', HOST+':')
print('Skipped: Authentication failed')
#continue
# --- Init argparse
parser = ArgumentParser()
parser.add_argument("filename", help="The file to upload", metavar='FILE', type=lambda x: is_valid_file(parser, x))
args = parser.parse_args()
# --- Define the OS file to upload
SOURCE_FILE = (args.filename)
# --- Check the hosts.csv file and get the list of hosts
VENDORS_TYPES = ["cisco_ios", "arista_eos", "juniper_junos", "cisco_nxos"]
VENDOR_TYPE = ''
HOSTS_LIST = []
with open("./hosts.csv", 'r') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
for row in csv_reader:
if VENDOR_TYPE in VENDORS_TYPES not in str(row[1]):
print('Invalid CSV, please check the vendor types. Must be: cisco_ios, arista_eos, juniper_junos or cisco_nxos')
sys.exit()
HOSTS_LIST.append(row[0])
# --- Ask confirmation
print(80*"=")
print('Please, confirm the upload of',SOURCE_FILE+' on: ')
print(*HOSTS_LIST, sep ='\n')
prompt = str("Proceed?")
if confirm(prompt=prompt, resp=False) == True:
# --- Get credentials
print(80*"-")
USERNAME = input('Please insert your username: ')
print("And your password")
PASSWORD = <PASSWORD>()
print(80*"-")
# --- Get the time for timing
start_time = time()
# --- Set the number of threads
pool = ProcessPoolExecutor(MAX_THREADS)
# --- SCP itself, in multi-threads
SW_LIST = []
FUTURE_LIST = []
with open("./hosts.csv", 'r') as csvfile:
SW_LIST = csv.reader(csvfile, delimiter=',')
for CSV_ROW in SW_LIST:
HOST = CSV_ROW[0]
DEVICE_TYPE = CSV_ROW[1]
net_device = {
'device_type': DEVICE_TYPE,
'host': HOST,
'username': USERNAME,
'password': PASSWORD,
}
FUTURE = pool.submit(upload_nemiko, net_device)
FUTURE_LIST.append(FUTURE)
wait(FUTURE_LIST)
# --- All done confirmation
print(80*"=")
print("Uploads completed in {} seconds.".format(time() - start_time))
print(80*"=")
else:
print("Operation aborted, goodbye.")
print(80*"=")
```
|
{
"source": "jerometrie/algoTP1",
"score": 4
}
|
#### File: jerometrie/algoTP1/TP1.py
```python
from lecture import *
def miseEnForme1():
""" Fonction qui restitue un fichier texte mis en forme sur l'écran """
# Variable a: stocke les '\' et b stocke le caractère suivant
a = lire()
b = lire()
s = 1 # Numéro de section
i = 1 # Numéro de sous-section
while (not(a == '\\' and b == '.')):
# Gestion des passages à la ligne et des sections/sous-sections
if(a == '\\'):
if(b == '\\'):
a = b
b = lire()
elif(b == 'n'):
nouvelle_ligne()
a = lire()
b = lire()
elif(b == 's'):
section(s, 3)
s = s +1
i = 1
a = lire()
b = lire()
elif(b == 'i'):
sous_section(s, i, 6)
i = i +1
a = lire()
b = lire()
else:
ecrire(b)
a = lire()
b = lire()
# Cas de la vigule
elif(a == ","):
if(b == " "):
ecrire(a)
ecrire(b)
a = lire()
b = lire()
else:
ecrire(a)
ecrire(" ")
ecrire(b)
a = lire()
b = lire()
# Cas du passage à la ligne
elif(a == "\n"):
a = b
b = lire()
# Cas général
else:
ecrire(a)
a = b
b = lire()
print()
def miseEnForme():
""" Fonction qui restitue un fichier texte mis en forme sur l'écran """
# Variable a: stocke les '\' et b stocke le caractère suivant
a = lire()
b = lire()
s = 1 # Numéro de section
i = 1 # Numéro de sous-section
while (not(a == '\\' and b == '.')):
# Gestion des passages à la ligne et des sections/sous-sections
if(a == '\\'):
if(b == '\\'):
ecrire(a) # Pour afficher les \ qui ne font pas partie d'une instruction
a = b
b = lire()
elif(b == 'n'):
nouvelle_ligne()
a = lire()
b = lire()
elif(b == 's'):
section(s, 3)
s = s + 1
i = 1
a = lire()
b = lire()
elif(b == 'i'):
sous_section(s - 1, i, 6)
i = i +1
a = lire()
b = lire()
else:
ecrire(b)
a = lire()
b = lire()
# Cas de plusieurs espaces consécutifs
elif(a == ' ' and b == ' '):
a = b
b = lire()
# Cas de la vigule
elif(a == ","):
if(b == " "):
ecrire(a)
ecrire(b)
a = lire()
b = lire()
else:
ecrire(a)
ecrire(" ")
ecrire(b)
a = lire()
b = lire()
# Cas du passage à la ligne
elif(a == "\n"):
a = b
b = lire()
# Cas général
else:
ecrire(a)
a = b
b = lire()
print()
#miseEnForme1()
miseEnForme()
"""
Question 3:
- Texte avec \. (le "." du milieu ne doit pas être pris en compte)
Lorem ipsum dolor sit amet, consectetur adipisicing elit. Ipsa, nam\.
- Texte avec toutes les instructions (vérifie aussi l'incrémentation des sections et des sous-sections):
\sLorem \iipsum \idolor \ssit \iamet \iconsectetur adipisicing elit. Mollitia
deserunt\nlibero sed. Labore obcaecati aspernatur officiis, voluptas numquam ad, eos.\.
- Texte vérifiant des cas spéciaux:
- \ doit afficher un \
- \\s\\\i doit afficher \ et passer à une section, puis \ et passer à une sous-section.
\Lorem ipsum dolor sit amet, consectetur \\s\\iadipisicing elit.
Tenetur magni quia nesciunt sint pariatur iste harum officiis debitis, rem in.\.
- \. dès le début du fichier. Rien ne doit s'afficher.
- Passer à la ligne au milieu de "\s" par exemple doit bien afficher une section
\
s\.
- Texte avec n\\n\. : doit afficher n\ et passer à la ligne (n\ ne doit pas être pris pour une instruction, contrairement
à \n). Ce cas teste aussi \\, qui ne doit pas être considéré ici comme un affichage de \.
- Cas non géré (mais vraiment très aux limites):
\ \.
n'affiche pas \
"""
```
|
{
"source": "jerometwell/algorithmia-python",
"score": 3
}
|
#### File: algorithmia-python/Test/datafile_test.py
```python
import sys
# look in ../ BEFORE trying to import Algorithmia. If you append to the
# you will load the version installed on the computer.
sys.path = ['../'] + sys.path
import unittest, os, uuid
import Algorithmia
from Algorithmia.datafile import DataFile, LocalDataFile
class DataFileTest(unittest.TestCase):
def setUp(self):
self.client = Algorithmia.client()
def test_get_nonexistant(self):
df = self.client.file('data://.my/nonexistant/nonreal')
try:
df.getFile()
retrieved_file = True
except Exception as e:
retrieved_file = False
self.assertFalse(retrieved_file)
def test_get_str(self):
df = self.client.file('data://.my/nonexistant/nonreal')
try:
print(df.getString())
retrieved_file = True
except Exception as e:
retrieved_file = False
self.assertFalse(retrieved_file)
def test_set_attributes(self):
df = DataFile(self.client, 'data://.my/empty')
try:
df.set_attributes({
'last_modified': '2019-01-09T22:44:31.632Z',
'size': 0
})
except Exception as e:
self.fail("set_attributes failed with exception: " + str(e))
class LocalFileTest(unittest.TestCase):
DUMMY_TEXT = 'this file gets populated during testing'
EXISTING_TEXT = 'this file exists before testing'
def setUp(self):
self.client = Algorithmia.client()
# Make a file that DOES exist and has contents,
self.EXISTING_FILE = 'file://'+str(uuid.uuid1())+'.txt'
f = open(self.EXISTING_FILE.replace('file://', ''), 'w')
f.write(self.EXISTING_TEXT)
f.close()
# We need a dummy file that doesnt currently exist
self.DUMMY_FILE = 'file://'+str(uuid.uuid1())+'.txt'
if os.path.isfile(self.DUMMY_FILE): os.remove(self.DUMMY_FILE)
def tearDown(self):
os.remove(self.EXISTING_FILE.replace('file://', ''))
if os.path.isfile(self.DUMMY_FILE): os.remove(self.DUMMY_FILE.replace('file://', ''))
def test_local_remote(self):
self.assertTrue(isinstance(self.client.file(self.DUMMY_FILE), LocalDataFile))
self.assertTrue(isinstance(self.client.file('data://foo'), DataFile))
def test_exists_or_not(self):
self.assertTrue(self.client.file(self.EXISTING_FILE).exists())
self.assertFalse(self.client.file(self.DUMMY_FILE).exists())
def test_get_nonexistant(self):
df = self.client.file(self.DUMMY_FILE)
try:
df.getFile()
retrieved_file = True
except Exception as e:
retrieved_file = False
self.assertFalse(retrieved_file)
def test_put_and_read_and_delete(self):
f = self.client.file(self.DUMMY_FILE)
f.put(self.DUMMY_TEXT)
# Check getString
txt = self.client.file(self.DUMMY_FILE).getString()
self.assertEqual(txt, self.DUMMY_TEXT)
# Check delete
deletion_status = self.client.file(self.DUMMY_FILE).delete()
self.assertTrue(deletion_status)
def test_read_types(self):
# Check getBytes
txt = self.client.file(self.EXISTING_FILE).getBytes().decode('utf-8')
self.assertEqual(txt, self.EXISTING_TEXT)
# Check getFile
txt = self.client.file(self.EXISTING_FILE).getFile().read()
self.assertEqual(txt, self.EXISTING_TEXT)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jerometwell/praisebot",
"score": 3
}
|
#### File: jerometwell/praisebot/app.py
```python
import tweepy
import os
import sys
import re
import logging
import random
LOG_LEVEL = os.environ.get("LOG_LEVEL") or logging.INFO
API_KEY = os.environ["API_KEY"]
API_SECRET = os.environ["API_SECRET"]
ACCESS_KEY_TOKEN = os.environ["ACCESS_KEY_TOKEN"]
ACCESS_KEY_SECRET = os.environ["ACCESS_KEY_SECRET"]
logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL)
logger = logging.getLogger(__name__)
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_KEY_TOKEN, ACCESS_KEY_SECRET)
api = tweepy.API(auth)
logger.info("Connecting to Twitter API...")
bot_user = api.verify_credentials()
praises = [
"you're the best 👏👏👏",
"you are incredible! 😇",
"you are majestic ✨✨✨",
"please continue to be fantastic ✨",
"your presence brings us joy ⭐️",
"don't you need a license for that level of awesomeness? ✨",
"you light up the room 💡",
"you are way cool",
"you were cool before cool was a thing",
"you are appreciated! 🌟",
"you're better than a triple-scoop ice cream. 🍦 (with sprinkles)",
]
def praise(target):
praise = random.sample(praises, 1)[0]
praise_tweet = f"@{target} {praise}"
logger.info("praising: %s", praise_tweet)
api.update_status(praise_tweet)
class PraiseStream(tweepy.Stream):
def on_status(self, status):
logger.info("@%s >> %s", status.user.screen_name, status.text)
api.create_favorite(status.id)
if re.search(r"\s+praise\s+me\s*$", status.text):
praise(status.user.screen_name)
for mention in re.finditer(r"\s@(\w+)\b", status.text):
target = mention.group(1)
if target == bot_user.screen_name:
logger.debug("skipping self mention")
continue
praise(target)
logger.info(f"Connected as @{bot_user.screen_name}")
mention_streams = PraiseStream(API_KEY, API_SECRET, ACCESS_KEY_TOKEN, ACCESS_KEY_SECRET)
mention_streams.filter(track=[f"@{bot_user.screen_name}"])
```
|
{
"source": "jeromevonk/alpha_vantage_stock",
"score": 3
}
|
#### File: alpha_vantage_stock/lambda_aws/lambda.py
```python
import json
from botocore.vendored import requests
from datetime import datetime, timedelta
# Required keys
REQUIRED = ['AtivoNome', 'DataPreco']
# Alpha Vantage API
API_TOKEN = "<KEY>"
FUNCTION = "TIME_SERIES_DAILY_ADJUSTED"
def lambda_handler(event, context):
print("Received event: " + json.dumps(event, indent=2))
for parameter in REQUIRED:
if not parameter in event:
return 'Missing required parameter: {}'.format(parameter)
stock = event['AtivoNome']
date = event['DataPreco']
# Convert to datetime object
try:
dt = datetime.strptime(date, ("%Y-%m-%d"))
except:
return { 'Error message:' : 'Invalid date format, should be YYYY-MM-DD' }
result, bRet = getStockValue(stock, dt)
return { 'AtivoNome': stock, 'DataPreco': date, "Preco": result }
def getStockValue(stock, dt):
'''Get stock value from Alpha Vantage API '''
to_return = ""
bRet = True
_size = 'compact'
# The date requested can't be in the future
if ( datetime.now() < dt ):
return "Data requested is in the future", False
# Do we need the 'compact' or the 'full' outputsize?
# The 'compact' one has the lastest 100 data points, which probably means more than 100 days.
delta = datetime.now() - dt
# To be on the safe side, ask for the full data when it's more the 100 days diff from today
if delta > timedelta(days=100):
_size = 'full'
# Make the request
try:
r = requests.get("https://www.alphavantage.co/query?function={}&symbol={}&outputsize={}&apikey={}&datatype=json".format(FUNCTION, stock, _size, API_TOKEN))
# Get the data and metadata
json_data = json.loads(r.text)
meta_data = json_data["Meta Data"]
data = json_data["Time Series (Daily)"]
# Convert date back to string
date = dt.strftime("%Y-%m-%d")
# Sorting can be time-consuming, but should not be significant
# compared to getting the data over the network
last_data_point = sorted(data, key=lambda kv: kv[1])[-1]
# Is there data for this date?
if ( dt < datetime.strptime(last_data_point, ("%Y-%m-%d")) ):
to_return = "Data for this date does not exist. Last data point is {}".format(last_data_point)
else:
while date not in data:
dt -= timedelta(days=1)
date = dt.strftime("%Y-%m-%d")
# Get data as a float, than format it as a string with 8 decimal places
fValue = float(data[date]['4. close'])
to_return = "{:.8f}".format(fValue)
except:
to_return = "No data for this stock"
bRet = False
return to_return, bRet
```
|
{
"source": "jeromevonk/candidates-api",
"score": 2
}
|
#### File: candidates-api/test/invalid_candidate.py
```python
import requests
import sys
import pytest
import copy
from random_candidates import getFakeEducation, getFakeExperience
#-------------------------------------------------------------------------------
# Helper function
#-------------------------------------------------------------------------------
def postInvalidCandidate(candidate):
try:
r = requests.post(url, json = candidate)
print(r.text)
assert r.status_code == 400
except requests.exceptions.RequestException as e:
print(e)
#-------------------------------------------------------------------------------
# Hosted locally or in heroku
#-------------------------------------------------------------------------------
LOCAL = 'http://localhost:5000/candidates/api/v2.0/'
HEROKU = 'https://candidates-api.herokuapp.com/candidates/api/v2.0/'
AWS = 'http://candidates-api.sa-east-1.elasticbeanstalk.com/candidates/api/v2.0/'
# Default to localhost
URL_BASE = LOCAL
# Parse command line argument
if len(sys.argv) > 1:
if 'heroku' == sys.argv[1]:
URL_BASE = HEROKU
if 'aws' == sys.argv[1]:
URL_BASE = AWS
#-------------------------------------------------------------------------------
# Insert invalid candidates
#-------------------------------------------------------------------------------
url = URL_BASE + 'candidates'
template = { "name" : "<NAME>", "picture" : "", "birthdate" : "01/02/1988", "gender" : 1,
"email" : "<EMAIL>", "phone" : "11912345678", "address" : "Avenida Paulista, 1",
"longitude": -12.75, "latitude": 45.11122, "tags" : [], "experience" : [], "education" : []}
#-------------------------------------------------------------------------------
# Invalid / missing name
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with invalid/missing name...")
invalid_name = copy.deepcopy(template)
# a) Empty
invalid_name['name'] = ''
postInvalidCandidate(invalid_name)
# b) Too big
invalid_name['name'] = 'Nome muito muito muito muito muito muito muito muito muito muito muito muito grande'
postInvalidCandidate(invalid_name)
# c) Missing name
invalid_name.pop('name', None)
postInvalidCandidate(invalid_name)
#-------------------------------------------------------------------------------
# Missing email
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with missing email...")
invalid_email = copy.deepcopy(template)
# a) Empty
invalid_email['email'] = ''
postInvalidCandidate(invalid_email)
# b) Too big
invalid_email['email'] = '<EMAIL>'
postInvalidCandidate(invalid_email)
# c) Without '.'
invalid_email['email'] = 'email<EMAIL>'
postInvalidCandidate(invalid_email)
# d) Without '@'
invalid_email['email'] = 'emaila.com'
postInvalidCandidate(invalid_email)
# e) Missing name
invalid_email.pop('email', None)
postInvalidCandidate(invalid_email)
#-------------------------------------------------------------------------------
# Invalid / missing gender (see https://en.wikipedia.org/wiki/ISO/IEC_5218)
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with invalid/missing gender...")
invalid_gender = copy.deepcopy(template)
# a) Invalid number
invalid_gender['gender'] = 3
postInvalidCandidate(invalid_gender)
# b) Strings are not accepted
invalid_gender['gender'] = 'Male'
postInvalidCandidate(invalid_gender)
# c) Missing gender
invalid_gender.pop('gender', None)
postInvalidCandidate(invalid_gender)
#-------------------------------------------------------------------------------
# Invalid / missing phone (format: 11912345678)
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with invalid/missing phone...")
invalid_phone = copy.deepcopy(template)
# a) Too small
invalid_phone['phone'] = '912345678'
postInvalidCandidate(invalid_phone)
# b) Too big
invalid_phone['phone'] = '5511912345678'
postInvalidCandidate(invalid_phone)
# c) Strings are not accepted
invalid_phone['phone'] = 'nove sete cinco um meia quatro meia dois'
postInvalidCandidate(invalid_phone)
# d) Missing phone
invalid_phone.pop('phone', None)
postInvalidCandidate(invalid_phone)
#-------------------------------------------------------------------------------
# Invalid / missing address (at least 5 characters)
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with invalid/missing address...")
invalid_address = copy.deepcopy(template)
# a) Too small
invalid_address['address'] = 'Rua'
postInvalidCandidate(invalid_address)
# b) Too big
invalid_address['address'] = 'Endereço muito muito muito muito muito muito muito muito muito muito muito muito muito muito muito muito muito muito grande'
postInvalidCandidate(invalid_address)
# c) Missing address
invalid_address.pop('address', None)
postInvalidCandidate(invalid_address)
#-------------------------------------------------------------------------------
# Invalid latitude (optional, but if present should be valid )
# (see https://en.wikipedia.org/wiki/Decimal_degrees)
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with invalid latitude...")
invalid_latitude = copy.deepcopy(template)
# a) Too small
invalid_latitude['latitude'] = -91.2
postInvalidCandidate(invalid_latitude)
# b) Too big
invalid_latitude['latitude'] = 93.2
postInvalidCandidate(invalid_latitude)
# c) Strings are not accepted
invalid_latitude['latitude'] = '45 degrees'
postInvalidCandidate(invalid_latitude)
# d) Empty
invalid_latitude['latitude'] = ''
postInvalidCandidate(invalid_latitude)
#-------------------------------------------------------------------------------
# Invalid longitude (optional, but if present should be valid )
# (see https://en.wikipedia.org/wiki/Decimal_degrees)
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with invalid longitude...")
invalid_longitude = copy.deepcopy(template)
# a) Too small
invalid_longitude['longitude'] = -181.22
postInvalidCandidate(invalid_longitude)
# b) Too big
invalid_longitude['longitude'] = 193.21
postInvalidCandidate(invalid_longitude)
# c) Strings are not accepted
invalid_longitude['longitude'] = '45 degrees'
postInvalidCandidate(invalid_longitude)
# d) Empty
invalid_longitude['longitude'] = ''
postInvalidCandidate(invalid_longitude)
#-------------------------------------------------------------------------------
# Invalid birthdate (optional, but if present should be valid )
# format is DD/MM/YYYY
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with invalid birthdate...")
invalid_birthdate = copy.deepcopy(template)
# a) Invalid day
invalid_birthdate['birthdate'] = "00/02/1988"
postInvalidCandidate(invalid_birthdate)
# b) Invalid month
invalid_birthdate['birthdate'] = "01/13/1988"
postInvalidCandidate(invalid_birthdate)
# c) Invalid year
invalid_birthdate['birthdate'] = "03/02/2048"
postInvalidCandidate(invalid_birthdate)
# d) Invalid day of month
invalid_birthdate['birthdate'] = "30/02/1988"
postInvalidCandidate(invalid_birthdate)
#-------------------------------------------------------------------------------
# Invalid picture (optional, but if present should be valid )
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with invalid picture...")
invalid_picture = copy.deepcopy(template)
# a) Invalid string
invalid_picture['picture'] = "Empty"
postInvalidCandidate(invalid_picture)
# b) PNG image
invalid_picture['picture'] = "ypBORw0KGg0KICAgDQpJSERSICAgByAgIAkIAiAgIHE6wrQgICABc1JHQiDPjhzpoKAgBGdBTUEgINGPC++/ve+/vSAgCXBIWXMgIA7DoCAOw4HIr8mkICAgEklEQVQYV2Pwn6+OyYbXqN26IEk3Z92oQDPMoCAgIElFTkTPgmA="
postInvalidCandidate(invalid_picture)
#-------------------------------------------------------------------------------
# Invalid experience (optional, but if present should be valid )
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with invalid experience...")
invalid_experience = copy.deepcopy(template)
# a) Not a list
invalid_experience['experience'] = ""
postInvalidCandidate(invalid_experience)
# b) List of non-strings
invalid_experience['experience'] = [1, 2, 3]
postInvalidCandidate(invalid_experience)
# c) Missing company name
invalid_experience['experience'].clear()
exp = getFakeExperience()
exp.pop('company', None)
invalid_experience['experience'].append(exp)
postInvalidCandidate(invalid_experience)
# d) Missing job title name
invalid_experience['experience'].clear()
exp = getFakeExperience()
exp.pop('job_title', None)
invalid_experience['experience'].append(exp)
postInvalidCandidate(invalid_experience)
# e) Invalid date_start
invalid_experience['experience'].clear()
invalid_experience['experience'].append( getFakeExperience())
invalid_experience['experience'][0]['date_start'] = "01/13/2027"
postInvalidCandidate(invalid_experience)
# e) Invalid date_end
invalid_experience['experience'].clear()
invalid_experience['experience'].append( getFakeExperience())
invalid_experience['experience'][0]['date_end'] = "01/12/2027"
postInvalidCandidate(invalid_experience)
# f) Invalid description
invalid_experience['experience'].clear()
invalid_experience['experience'].append( getFakeExperience())
invalid_experience['experience'][0]['description'] = []
postInvalidCandidate(invalid_experience)
#-------------------------------------------------------------------------------
# Invalid education (optional, but if present should be valid )
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with invalid education...")
invalid_education = copy.deepcopy(template)
# a) Not a list
invalid_education['education'] = ""
postInvalidCandidate(invalid_education)
# b) List of non-strings
invalid_education['education'] = [1, 2, 3]
postInvalidCandidate(invalid_education)
# c) Missing institution name
invalid_education['education'].clear()
exp = getFakeEducation()
exp.pop('institution', None)
invalid_education['education'].append(exp)
postInvalidCandidate(invalid_education)
# d) Missing degree name
invalid_education['education'].clear()
exp = getFakeEducation()
exp.pop('degree', None)
invalid_education['education'].append(exp)
postInvalidCandidate(invalid_education)
# e) Invalid date_start
invalid_education['education'].clear()
invalid_education['education'].append( getFakeEducation())
invalid_education['education'][0]['date_start'] = "32/12/2027"
postInvalidCandidate(invalid_education)
# e) Invalid date_end
invalid_education['education'].clear()
invalid_education['education'].append( getFakeEducation())
invalid_education['education'][0]['date_end'] = "01/12/1754"
postInvalidCandidate(invalid_education)
# f) Invalid description
invalid_education['education'].clear()
invalid_education['education'].append( getFakeEducation())
invalid_education['education'][0]['description'] = {}
postInvalidCandidate(invalid_education)
#-------------------------------------------------------------------------------
# Invalid tags (optional, but if present should be valid )
#-------------------------------------------------------------------------------
print("### Inserting candidate(s) with invalid tags...")
invalid_tags = copy.deepcopy(template)
# a) Invalid string
invalid_tags['tags'] = ""
postInvalidCandidate(invalid_tags)
# b) List of non-strings
invalid_tags['tags'] = [1, 2, 3]
postInvalidCandidate(invalid_tags)
```
|
{
"source": "jeromevonk/data_science",
"score": 3
}
|
#### File: data_science/Detect Enron fraud with Machine learning/dataset_investigation.py
```python
def exploreFeature(feature):
for key in enron_data.keys():
print enron_data[key][feature]
def plotFeature(name, data):
import matplotlib.pyplot as plt
plt.hist(data)
plt.xlabel(name)
#plt.show()
plt.savefig("{}.png".format(name))
plt.clf()
def computeFraction( poi_messages, all_messages ):
""" compute the fraction of messages to/from a person that are from/to a POI """
fraction = 0
if poi_messages == 'NaN' or all_messages == 'NaN':
fraction = 'NaN'
else:
fraction = float(poi_messages)/all_messages
return fraction
import sys
import pickle
sys.path.append("../tools/")
### Load the dictionary containing the dataset
with open("final_project_dataset.pkl", "r") as data_file:
enron_data = pickle.load(data_file)
# How many persons?
print 'Number of persons: ', len(enron_data)
for person in enron_data:
#print person
pass
# For each person, how many features?
features_names = []
print "List of features:"
for person in enron_data:
for feature, feature_value in enron_data[person].iteritems():
features_names.append(feature)
break
print "Number of features: ", len(features_names)
print features_names
# How many persons of interest?
count_poi = 0
for person in enron_data:
if enron_data[person]["poi"] == 1:
count_poi += 1
print "Persons of interest: ", count_poi
print "Non-POIs: ", len(enron_data) - count_poi
# Missing?
for feature in features_names:
print "NaN percentage for ", feature, " :", round(float(sum([1 for key in enron_data.keys() if enron_data[key][feature] == 'NaN']))/len(enron_data), 3)
# Is there someone with NaN for all features?
for person in enron_data:
valid = False
for feature, feature_value in enron_data[person].iteritems():
if feature_value != 'NaN' and feature != 'poi':
valid = True
break
if valid == False:
print "Found someone with NaN for all values: ", person
# Plots
from feature_format import featureFormat, targetFeatureSplit
features_list = ['poi','salary', 'to_messages', 'deferral_payments', 'total_payments', 'exercised_stock_options', 'bonus', 'restricted_stock', 'shared_receipt_with_poi', 'restricted_stock_deferred', 'total_stock_value', 'expenses', 'loan_advances', 'from_messages', 'other', 'from_this_person_to_poi', 'poi', 'director_fees', 'deferred_income', 'long_term_incentive', 'from_poi_to_this_person']
data = featureFormat(enron_data, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
#for i in range(1, len(features_list)):
#plotFeature(features_list[i], features[i-1] )
# Negative salary?
import numpy as np
np_features = np.array(features)
import pprint
#pprint.pprint(np_features[:,0])
# Pandas
import pandas as pd
df = pd.DataFrame.from_dict(enron_data, orient = 'index')
subzero = df[df < 0]
print len(subzero)
#for person in enron_data:
# for feature, feature_value in enron_data[person].iteritems():
# if feature_value != 'NaN' and feature_value < 0:
# print person, feature, feature_value
# As seen in class, we will compute the fraction of exchanged messages with POIs over the total messages
for name in enron_data:
person_dict = enron_data[name]
fraction_from_poi = computeFraction(person_dict['from_poi_to_this_person'], person_dict['to_messages'])
#print fraction_from_poi
person_dict["fraction_from_poi"] = fraction_from_poi
fraction_to_poi = computeFraction(person_dict['from_this_person_to_poi'], person_dict['from_messages'])
#print fraction_to_poi
person_dict["fraction_to_poi"] = fraction_to_poi
for feature in ["fraction_from_poi", "fraction_to_poi"]:
print "NaN percentage for ", feature, " :", round(float(sum([1 for key in enron_data.keys() if enron_data[key][feature] == 'NaN']))/len(enron_data), 3)
```
#### File: Wrangling OSM data for Madrid/2. Auditing/2_tag_potential_problems.py
```python
import xml.etree.cElementTree as ET
import pprint
import re
import os
import sys
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
DATA_PATH = "..\\1. Data\\Full\\Madrid_custom_11122017.osm"
SAMPLE_PATH = "..\\1. Data\\Sample\\Madrid_custom_11122017_sample_1.osm"
OUTPUT_FILE = "output\\2_tag_potential_problems.txt"
def key_type(element, keys):
"""For a given tag, look for potential problems"""
if element.tag == "tag":
if lower.search(element.attrib['v']):
#print(element.attrib['v'], "lower")
keys['lower'] += 1
elif lower_colon.search(element.attrib['v']):
#print(element.attrib['v'], 'lower_colon')
keys['lower_colon'] += 1
elif problemchars.search(element.attrib['v']):
#print(element.attrib['v'], 'problemchars')
keys['problemchars'] += 1
else:
#print(element.attrib['v'], 'other')
keys['other'] += 1
return keys
def process_map(filename):
"""Iterate through every tag in the data file"""
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
osm_file = open(filename, "r", encoding="utf8")
for _, element in ET.iterparse(osm_file):
keys = key_type(element, keys)
return keys
def test(dataset):
"""Perform the test on the selected dataset"""
print("Running 2_tag_potential_problems.py")
keys = {}
# Run against the sample or the full data?
if dataset == "sample":
keys = process_map(SAMPLE_PATH)
else:
keys = process_map(DATA_PATH)
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
with open(OUTPUT_FILE, "w") as fo:
pprint.pprint(keys, fo)
if __name__ == "__main__":
dataset = "full"
if len(sys.argv) > 1:
dataset = sys.argv[1]
test(dataset)
```
#### File: Wrangling OSM data for Madrid/4. Database/create_database_mongodb.py
```python
import json
def create_database():
"""Create database from the json file"""
# Connect to database
from pymongo import MongoClient
try:
client = MongoClient('localhost:27017')
except:
print("Could not connect to MongoDB")
return
# Database will be called Madrid
db = client.Madrid
# Collection will be called nodes_ways
collection = db.nodes_ways
with open('..\\3. Preparing_database\\output\\nodes_and_ways.json') as file:
for line in file:
# Convert from json to python format
collection.insert(json.loads(line))
print("Collection now has {} documents".format(collection.count() ) )
if __name__ == '__main__':
#print("Running create_database_mongodb.py")
create_database()
```
#### File: Wrangling OSM data for Madrid/4. Database/perform_queries.py
```python
import sqlite3
import csv
import os
def perform_query(cursor, columns, file_path, query):
"""Perfom a query and save the output in a csv file"""
cursor.execute(query)
results = cursor.fetchall()
# Save in a csv file
with open(file_path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(columns)
for row in results:
writer.writerow(row)
def perform_queries():
"""Perfom a list of queries on the database"""
# Connect to database
conn = sqlite3.connect("madrid.db")
# Create a cursor
cursor = conn.cursor()
# Make sure the output directory exists
os.makedirs(os.path.dirname("queries\\"), exist_ok=True)
# --------------------------------------------------------
# Perform the desired queries
# --------------------------------------------------------
# Retrieve the users that appear the most
results = perform_query(cursor,
["user", "id", "count"],
"queries\\users.csv",
"select user, uid, count(*) as count from nodes group by user order by count desc limit 10")
# What type of cuisine appears the most
results = perform_query(cursor,
["cuisine", "count"],
"queries\\cuisine.csv",
"select value, count(*) as count from node_tags where key='cuisine' group by value order by count desc")
# What postcode appears the most
results = perform_query(cursor,
["postcode", "count"],
"queries\\postcode.csv",
"select value, count(*) as count from way_tags where key='postcode' group by value order by count desc")
# Now, close the connection
conn.close()
if __name__ == '__main__':
print("Running perform_queries.py")
perform_queries()
```
|
{
"source": "jerome-wang/CarSpider",
"score": 3
}
|
#### File: buycar/spiders/CarSpider.py
```python
import scrapy
from buycar.items import CarItem
class CarSpider(scrapy.Spider):
name = "CarSpider"
allowed_domains = ["autohome.com.cn"]
start_urls = ["http://k.autohome.com.cn/spec/18890/"]
url_prefix = u'http://k.autohome.com.cn'
def parse(self, response):
for EachComment in response.xpath("//div[@class='mouthcon']"):
CarModel = EachComment.xpath(".//div[@class='choose-con mt-10']/dl[1]/dd/a[1]/text()").extract()[0].strip()
CarType = EachComment.xpath(".//div[@class='choose-con mt-10']/dl[1]/dd/a[2]/span/text()").extract()[0].strip()
PurchasedTime = EachComment.xpath(".//div[@class='choose-con mt-10']/dl[4]/dd/text()").extract()[0].strip()
PurchasedLocation = EachComment.xpath(".//div[@class='choose-con mt-10']/dl[2]/dd/text()").extract()[0].strip()
#PurchasedDealer = EachComment.xpath()
PurchasedPrice = EachComment.xpath(".//div[@class='choose-con mt-10']/dl[5]/dd/text()").extract()[0].strip()
#CurrentMiles = EachComment.xpath()
#CurrentFuel = EachComment.xpath()
#print CarModel, CarType, PurchasedTime, PurchasedPrice
carItem = CarItem()
carItem['CarModel'] = CarModel
carItem['CarType'] = CarType
carItem['PurchasedTime'] = PurchasedTime
carItem['PurchasedLocation'] = PurchasedLocation
carItem['PurchasedPrice'] = PurchasedPrice
yield carItem
next_page = response.xpath("//a[@class='page-item-next']/@href").extract()
if len(next_page) != 0 and next_page[0].strip() != u'###':
url = self.url_prefix + next_page[0].strip()
print '##### url = ', url
yield scrapy.Request(url, callback=self.parse)
```
|
{
"source": "jeromewu/tensorflow",
"score": 2
}
|
#### File: autograph/core/config.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
from tensorflow.python.autograph import utils
def _internal_name(name):
"""This function correctly resolves internal and external names."""
reference_name = utils.__name__
reference_root = 'tensorflow.'
# If the TF module is foo.tensorflow, then all other modules
# are then assumed to be prefixed by 'foo'.
if reference_name.startswith(reference_root):
return name
reference_begin = reference_name.find('.' + reference_root)
assert reference_begin > 0
root_prefix = reference_name[:reference_begin]
return root_prefix + '.' + name
class Rule(object):
"""Base class for conversion rules."""
def __init__(self, module_prefix):
self._prefix = module_prefix
def matches(self, module_name):
return (module_name.startswith(self._prefix + '.') or
module_name == self._prefix)
class Action(enum.Enum):
NONE = 0
CONVERT = 1
DO_NOT_CONVERT = 2
class DoNotConvert(Rule):
"""Indicates that this module should be not converted."""
def __str__(self):
return 'DoNotConvert rule for {}'.format(self._prefix)
def get_action(self, module):
if self.matches(module.__name__):
return Action.DO_NOT_CONVERT
return Action.NONE
class Convert(Rule):
"""Indicates that this module should be converted."""
def __str__(self):
return 'Convert rule for {}'.format(self._prefix)
def get_action(self, module):
if self.matches(module.__name__):
return Action.CONVERT
return Action.NONE
# This list is evaluated in order and stops at the first rule that tests True
# for a definitely_convert of definitely_bypass call.
CONVERSION_RULES = (
DoNotConvert('tensorflow'),
DoNotConvert(_internal_name('tensorflow')),
# TODO(b/133417201): Remove.
DoNotConvert('tensorflow_probability'),
DoNotConvert(_internal_name('tensorflow_probability')),
# TODO(b/130313089): Remove.
DoNotConvert('numpy'),
DoNotConvert('threading'),
)
```
|
{
"source": "jeromjoy/Correct-Project",
"score": 3
}
|
#### File: Correct-Project/Back_end/WorkerFiles.py
```python
import dateutil.parser as parser
import urllib.request
import requests
import json
from bs4 import BeautifulSoup
import time
import traceback
import psycopg2
import datetime
from urllib.parse import urlparse
from celery import Celery
from celery.schedules import crontab
app = Celery()
@app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
# Calls test('hello') every 3600 seconds.
sender.add_periodic_task(3600.0, workerTask.s('NEWS_API'), name='news_api')
sender.add_periodic_task(3600.0, workerTask.s('FAKE_NEWS'), name='fake_news')
@app.task
def workerTask(args):
if(args=='NEWS_API'):
newsApiCall('NEWS_API')
elif(args=='FAKE_NEWS'):
fakeNewsCall('FAKE_NEWS')
connectionString = "dbname='correctdb' user='postgres' host='localhost' password='<PASSWORD>'"
class NewsAPI:
Url = ''
Author = ''
Title = ''
Description = ''
ImageUrl = ''
PublishedAt = ''
def __init__(self, url, author, title, description, imageUrl, publishedAt):
self.Url = url
self.Author = author
self.Title = title
self.Description = description
self.ImageUrl = imageUrl
self.PublishedAt = publishedAt
class News:
Article = ''
Title = ''
Author = ''
OriginalContent = ''
CreatedDate = ''
FetchedDate = ''
ArticleUrl = ''
LastUsed = ''
def __init__(self, article , title, author, originalcontent, createddate, fetcheddate, articleurl, lastused):
self.Article = article
self.Title = title
self.OriginalContent = originalcontent
self.CreatedDate = createddate
self.FetchedDate = fetcheddate
self.ArticleUrl = articleurl
self.LastUsed = lastused
self.Author = author
def uploadDBNewsApi(newsApi):
try:
conn = psycopg2.connect(connectionString)
cur = conn.cursor()
# publishedAt = time.mktime(datetime.datetime.strptime(PublishedAt, "%Y-%m-%d").timetuple())
cur.execute("INSERT INTO NewsFetchedApi (Url, Author, Title, Description, ImageUrl, PublishedAt) VALUES (%s, %s, %s, %s, %s, %s)", (newsApi.Url, newsApi.Author, newsApi.Title, newsApi.Description, newsApi.ImageUrl, newsApi.PublishedAt))
conn.commit();
cur.close()
conn.close()
except psycopg2.Error as e:
print("I am unable to connect to the database")
print(e)
print(e.pgcode)
print(e.pgerror)
print(traceback.format_exc())
return
def uploadDBNews(news):
try:
# createdDate = time.mktime(datetime.datetime.strptime(CreatedDate, "%d/%m/%Y").timetuple())
# fetchedDate = time.mktime(datetime.datetime.strptime(FetchedDate, "%d/%m/%Y").timetuple())
# lastUsed = time.mktime(datetime.datetime.strptime(LastUsed, "%d/%m/%Y").timetuple())
conn = psycopg2.connect(connectionString)
cur = conn.cursor()
cur.execute("INSERT INTO News (Article, Title, Author, OriginalContent, CreatedDate, FetchedDate,ArticleUrl,LastUsed) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)", (news.Article, news.Title, news.Author, news.OriginalContent, news.CreatedDate, news.FetchedDate, news.ArticleUrl, news.LastUsed))
conn.commit();
cur.close()
conn.close()
except psycopg2.Error as e:
print("I am unable to connect to the database")
print(e)
print(e.pgcode)
print(e.pgerror)
print(traceback.format_exc())
return
def checkUrlInDB(href):
try:
conn = psycopg2.connect(connectionString)
cur = conn.cursor()
query = "select ArticleUrl from news where ArticleUrl = '" + href+"'"
cur.execute(query)
rows = cur.fetchall()
cur.close()
conn.close()
if(len(rows)>0):
return 0
else:
return 1
except psycopg2.Error as e:
print("I am unable to connect to the database")
print(e)
print(e.pgcode)
print(e.pgerror)
print(traceback.format_exc())
return
def get_and_write_data(link):
for i in range(len(link)):
try:
response = urllib.request.urlopen(link[i])
html = response.read().decode()
data = json.loads(html)
except ValueError:
print ('Decoding JSON has failed')
continue
news = data['articles']
for i in range(len(news)):
author = news[i]['author']
title = news[i]['title']
description = news[i]['description']
url = news[i]['url']
image = news[i]['urlToImage']
publishedAt = news[i]['publishedAt']
newsApi = NewsAPI(url, author, title, description, image, publishedAt)
uploadDBNewsApi(newsApi)
get_news_content_api(url)
def newsApiCall(args):
link = ["https://newsapi.org/v1/articles?source=the-guardian-uk&sortBy=top&apiKey=<KEY>",
"https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=<KEY>",
"https://newsapi.org/v1/articles?source=abc-news-au&sortBy=top&apiKey=<KEY>",
"https://newsapi.org/v1/articles?source=cnn&sortBy=top&apiKey=<KEY>",
"https://newsapi.org/v1/articles?source=independent&sortBy=top&apiKey=<KEY>",
"https://newsapi.org/v1/articles?source=reuters&sortBy=top&apiKey=<KEY>",
"https://newsapi.org/v1/articles?source=time&sortBy=top&apiKey=<KEY>"]
get_and_write_data(link)
def get_news_firstpage(link):
for i in range(len(link)):
link[i] = 'http://' + link[i]
headers = {'user-agent' : 'Mozilla/5.0'}
try:
source = requests.get(link[i], headers = headers)
soup = BeautifulSoup(source.content, "lxml")
except requests.exceptions.ConnectionError:
print ('Connection refused!!!')
if (soup.findAll('article')):
for article in soup.findAll('article'):
if (article.find('a')):
href = article.find('a').get('href')
if href.startswith('http'):
href = href
else:
href = link[i] + href
get_news_content(href)
else:
print ('not able to get <a>!!')
else:
print ('not able to get <article>!!')
def get_news_content(href):
print("New Data\n\n\n\n\n\n",href)
if(checkUrlInDB(href)==0):
print("Link already in DB")
return
headers = {'user-agent' : 'Mozilla/5.0'}
title = None
article = None
author = None
orginalContent = None
createdDate = None
date= None
try:
source = requests.get(href, headers = headers)
except requests.exceptions.ConnectionError:
source.status_code = "Connection refused"
if (BeautifulSoup(source.content, "lxml").body):
soup = BeautifulSoup(source.content, "lxml").body
if (soup.findAll('p')):
article = []
for paragraph in soup.findAll('p'):
article.append(paragraph.text)
else:
article = 'Unknown'
date = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
fetchedDate = date
lastUsed = date
orginalContent = source.content
if (urlparse(href).netloc == 'americannews.com'):
if (soup.findAll('h1')):
for title in soup.findAll('h1'):
title = title.text
else:
title = 'Unknown'
if (soup.find('time',{'class': 'rpwe-time published'})):
createdDate = soup.find('time',{'class': 'rpwe-time published'}).text
createdDate = (parser.parse(createdDate)).isoformat()
else:
createdDate = None
if (soup.findAll('b')):
for author in soup.findAll('b'):
author = author.text
else:
author = 'Unknown'
elif (urlparse(href).netloc == 'www.activistpost.com'):
if (soup.findAll('span', {'class': 'entry-meta-date updated'})):
for createdDate in soup.findAll('span', {'class': 'entry-meta-date updated'}):
createdDate = (parser.parse(createdDate.text)).isoformat()
else:
createdDate = None
if (soup.findAll('h1', {'class': 'entry-title'})):
for title in soup.findAll('h1', {'class': 'entry-title'}):
title = title.text
else:
title = 'Unknown'
for authors in soup.findAll('p'):
if (authors.findAll('a')):
for author in authors.findAll('a'):
author = author.text
else:
author = 'Unknown'
elif (urlparse(href).netloc == 'www.thedailysheeple.com'):
if (soup.findAll('time', {'class': 'entry-date'})):
for createdDate in soup.findAll('time', {'class': 'entry-date'}):
createdDate = (parser.parse(createdDate.text)).isoformat()
else:
createdDate = None
if (soup.findAll('h1', {'class': 'entry-title'})):
for title in soup.findAll('h1', {'class': 'entry-title'}):
title = title.text
else:
title = 'Unknown'
if (soup.findAll('span', {'class': 'author vcard'})):
for author in soup.findAll('span', {'class': 'author vcard'}):
author = author.text
else:
author = 'Unknown'
elif (urlparse(href).netloc == 'waterfordwhispersnews.com'):
if (soup.findAll('p', {'class': 'byline byline-left '})):
for createdDate in soup.findAll('p', {'class': 'byline byline-left '}):
createdDate = None
else:
createdDate = None
if (soup.findAll('h1', {'class': 'entry-title'})):
for title in soup.findAll('h1', {'class': 'entry-title'}):
title = title.text
else:
title = 'Unknown'
if (soup.findAll('span', {'class': 'author vcard'})):
for author in soup.findAll('span', {'class': 'author vcard'}):
author = author.text
else:
author = 'Unknown'
elif (urlparse(href).netloc == 'www.clickhole.com'):
if (soup.findAll('div', {'class': 'pub_date'})):
for createdDate in soup.findAll('div', {'class': 'pub_date'}):
createdDate = (parser.parse(createdDate.text)).isoformat()
else:
createdDate = None
if (soup.findAll('h1', {'class': 'headline'})):
for title in soup.findAll('h1', {'class': 'headline'}):
title = title.text
else:
title = 'Unknown'
if (soup.findAll('span', {'class': 'author vcard'})):
for author in soup.findAll('span', {'class': 'author vcard'}):
author = author.text
else:
author = 'Unknown'
elif (urlparse(href).netloc == 'theonion.com'):
if (soup.findAll('span', {'class': 'content-published-mobile'})):
for createdDate in soup.findAll('span', {'class': 'content-published-mobile'}):
createdDate = (parser.parse(createdDate.text)).isoformat()
else:
createdDate = None
if (soup.findAll('header', {'class': 'content-header'})):
for title in soup.findAll('header', {'class': 'content-header'}):
title = title.text
else:
title = 'Unknown'
if (soup.findAll('span', {'class': 'author vcard'})):
for author in soup.findAll('span', {'class': 'author vcard'}):
author = author.text
else:
author = 'Unknown'
else:
title = 'Unknown'
author = 'Unknown'
createdDate = None
else:
print ('can not get connent')
return
news = News(article, title, author, orginalContent, createdDate, date, href, date)
uploadDBNews(news)
def fakeNewsCall(args):
link = [
'americannews.com',
'thedailysheeple.com',
'theonion.com',
'clickhole.com',
'activistpost.com'
'waterfordwhispersnews.com'
]
get_news_firstpage(link)
def get_news_content_api(href):
print("New Data\n\n\n\n\n\n",href)
if(checkUrlInDB(href)==0):
print("Link already in DB")
return
headers = {'user-agent' : 'Mozilla/5.0'}
try:
source = requests.get(href, headers = headers)
except requests.exceptions.ConnectionError:
source.status_code = "Connection refused"
if source.status_code == 200:
soup = BeautifulSoup(source.content, "lxml").body
print (href)
if (soup.findAll('p')):
for article in soup.findAll('p'):
article = article.text
else:
article = 'Unknown'
date = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
fetchedDate = date
lastUsed = date
orginalContent = source.content
if (soup.findAll('h1')):
for title in soup.findAll('h1'):
title = title.text
else:
title = 'Unknown'
if (urlparse(href).netloc) == 'www.theguardian.com':
try:
for createdDate in soup.findAll('time',{'itemprop': 'datePublished'}):
ccreatedDate = (parser.parse(createdDate.text)).isoformat()
except:
createdDate = None
if(soup.findAll('a', {'class':'tone-colour'})):
for author in soup.findAll('a', {'class':'tone-colour'}):
author = author.text
else:
author = 'Unknown'
elif (urlparse(href).netloc) == 'www.bbc.co.uk':
if (soup.find('div', {'class': 'date date--v2'})):
for createdDate in soup.findAll('div', {'class': 'date date--v2'}):
createdDate = (parser.parse(createdDate.text)).isoformat()
else:
createdDate = None
if (soup.findAll('a', {'class':'tone-colour'})):
for author in soup.findAll('a', {'class':'tone-colour'}):
author = author.text
else:
author = 'Unknown'
elif (urlparse(href).netloc) == 'www.abc.net.au':
if (soup.findAll('span', {'class': 'print'})):
for createdDate in soup.findAll('span', {'class': 'print'}):
if (createdDate):
createdDate = (parser.parse(createdDate.text)).isoformat()
else:
createdDate = None
else:
createdDate = None
if (soup.find('a', {'target':'_self'})):
author = soup.find('a', {'target':'_self'}).text
else:
author = 'Unknown'
elif (urlparse(href).netloc) == 'www.cnn.com':
try:
createdDate = soup.find('p', {'class': 'update-time'})
createdDate = (parser.parse(createdDate.text)).isoformat()
except:
createdDate = None
if (soup.find('span', {'class':'metadata_byline_author'})):
for author in soup.find('span', {'class':'metadata_byline_author'}):
author = author.string
else:
author = 'Unknown'
elif (urlparse(href).netloc) == 'www.independent.co.uk':
try:
createdDate = soup.find('time')
createdDate = (parser.parse(createdDate.text)).isoformat()
except:
createdDate = None
if (soup.find('span', {'itemprop':'name'})):
for author in soup.find('span', {'itemprop':'name'}):
author = author.string
else:
author = 'Unknown'
elif (urlparse(href).netloc) == 'www.reuters.com':
try:
createdDate = soup.find('span', {'class': 'timestamp'})
createdDate = parser.parse(createdDate.text)
except:
createdDate = None
if (soup.find('span', {'class':'author'})):
for author in soup.find('span', {'class':'author'}):
author = author.string
else:
author = 'Unknown'
elif (urlparse(href).netloc) == 'time.com':
try:
createdDate = soup.find('div', {'class': 'row text font-accent size-1x-small color-darker-gray'})
createdDate = (parser.parse(createdDate.text)).isoformat()
except:
createdDate = None
if (soup.find('a', {'class':'text font-accent color-brand size-1x-small _1HynphR0'})):
for author in soup.find('a', {'class':'text font-accent color-brand size-1x-small _1HynphR0'}):
author = author.string
else:
author = 'Unknown'
else:
createdDate = None
author = 'Unknown'
else:
print ('can not get connent')
return
news = News(article, title, author, orginalContent, createdDate, date, href, date)
uploadDBNews(news)
# fakeNewsCall('FAKE_NEWS')
#newsApiCall('NEWS_API')
```
|
{
"source": "jeromlu/amino_acids_repo",
"score": 2
}
|
#### File: amino_acids_repo/amino_acids/__main__.py
```python
__version__ = "1.0.0"
import sys
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QIcon
from amino_acids.amino_acids_main_window import AminoAcidsUI
from amino_acids import qrc_resources
def main():
app = QApplication(sys.argv)
app.setWindowIcon(QIcon(":/main_window_icon.png"))
main_frame = AminoAcidsUI()
main_frame.show()
app.exec_()
if __name__ == '__main__':
main()
```
#### File: amino_acids_repo/amino_acids/settings_dialog.py
```python
import sys
# ****************************Third party****************************************
from PyQt5.QtWidgets import QApplication, QDialog, QLabel, QLineEdit, QBoxLayout
from PyQt5.QtWidgets import QWidget, QComboBox, QVBoxLayout, QDialogButtonBox
from PyQt5.QtWidgets import QCheckBox
from PyQt5.QtGui import QRegExpValidator
from PyQt5.QtCore import QRegExp
# Global constants
LEFT, ABOVE = range(2)
class SettingsDialog(QDialog):
def __init__(self, settings, parent=None):
super(SettingsDialog, self).__init__(parent)
# data
self.settings = settings
# UI initialization
self.create_dialog()
self.setWindowTitle("Settings")
# connections
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def create_dialog(self):
# defines what to show, all information or just specific
self.lle_time = LabelledLineEdit("Time you have for answer\nin seconds", ABOVE)
self.lle_num_of_AAs = LabelledLineEdit("Number of aminoacids to test", ABOVE)
rx = r"[0-9]{3}"
reg_exp = QRegExp(rx)
self.lle_time.line_edit.setValidator(QRegExpValidator(reg_exp))
self.lle_time.line_edit.setText(str(self.settings["seconds"]))
rx = r"[0-9]|1[0-9]|2[0-2]"
reg_exp = QRegExp(rx)
self.lle_num_of_AAs.line_edit.setValidator(QRegExpValidator(reg_exp))
self.lle_num_of_AAs.line_edit.setText(str(self.settings["AA_num_to_test"]))
self.lcb_show = LabelledComboBox("Show", ABOVE)
show_options = ["AA_name", "Letter_label", "Short_label", "Skeletal_formula"]
self.lcb_show.combo_box.addItems(show_options)
self.lcb_show.combo_box.setCurrentText("Letter_label")
self.repetition_cb = QCheckBox("No repetition of amino acids")
self.repetition_cb.setChecked(self.settings["AA_repetition"])
self.buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
vbox = QVBoxLayout(self)
vbox.addWidget(self.lle_time)
vbox.addWidget(self.lle_num_of_AAs)
vbox.addWidget(self.lcb_show)
vbox.addWidget(self.repetition_cb)
vbox.addWidget(self.buttonBox)
def accept(self):
self.settings["show_only"] = self.lcb_show.combo_box.currentText()
self.settings["seconds"] = int(self.lle_time.line_edit.text())
self.settings["AA_repetition"] = self.repetition_cb.isChecked()
self.settings["AA_num_to_test"] = int(self.lle_num_of_AAs.line_edit.text())
QDialog.accept(self)
class LabelledLineEdit(QWidget):
def __init__(self, labelText="", position=LEFT, parent=None):
super(LabelledLineEdit, self).__init__(parent)
self.label = QLabel(labelText)
self.line_edit = QLineEdit()
self.label.setBuddy(self.line_edit)
layout = QBoxLayout(
QBoxLayout.LeftToRight if position == LEFT else QBoxLayout.TopToBottom
)
layout.addWidget(self.label)
layout.addWidget(self.line_edit)
self.setLayout(layout)
class LabelledComboBox(QWidget):
def __init__(self, labelText="", position=LEFT, parent=None):
super(LabelledComboBox, self).__init__(parent)
self.label = QLabel(labelText)
self.combo_box = QComboBox()
self.label.setBuddy(self.combo_box)
layout = QBoxLayout(
QBoxLayout.LeftToRight if position == LEFT else QBoxLayout.TopToBottom
)
layout.addWidget(self.label)
layout.addWidget(self.combo_box)
self.setLayout(layout)
if __name__ == "__main__":
settings = {
"show_only": "All",
"seconds": 30,
"AA_repetition": True,
"AA_num_to_test": 10,
}
def run_app():
app = QApplication(sys.argv)
dialog = SettingsDialog(settings)
dialog.show()
app.exec_()
run_app()
```
|
{
"source": "Jero-M/nuke",
"score": 2
}
|
#### File: Jero-M/nuke/nuke_change_feather_falloff.py
```python
def changeFeather(value):
for s in nuke.selectedNodes():
selNode = nuke.selectedNode()
if s.Class() == "Roto" or s.Class() == "RotoPaint":
for item in s['curves'].rootLayer:
attr = item.getAttributes()
attr.set('ff',value)
feather_value = 0
changeFeather(feather_value)
```
|
{
"source": "jeronimobarea/Programming-Language-Interpreter",
"score": 3
}
|
#### File: Programming-Language-Interpreter/tests/lexer_test.py
```python
from typing import List
from unittest import TestCase
from src.lexer.token import Token, TokenType
from src.lexer.lexer import Lexer
class LexerTest(TestCase):
def test_illegal(self) -> None:
source: str = '¡¿@'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(len(source)):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.ILLEGAL, '¡'),
Token(TokenType.ILLEGAL, '¿'),
Token(TokenType.ILLEGAL, '@'),
]
self.assertEqual(tokens, expected_tokens)
def test_one_character_operators(self) -> None:
source: str = '=+-/*<>!'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(len(source)):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.ASSIGN, '='),
Token(TokenType.PLUS, '+'),
Token(TokenType.MINUS, "-"),
Token(TokenType.DIVISION, "/"),
Token(TokenType.MULTIPLICATION, "*"),
Token(TokenType.LT, "<"),
Token(TokenType.GT, ">"),
Token(TokenType.NEGATION, "!"),
]
self.assertEqual(tokens, expected_tokens)
def test_eof(self) -> None:
source: str = '+'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(len(source) + 1):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.PLUS, '+'),
Token(TokenType.EOF, ''),
]
self.assertEqual(tokens, expected_tokens)
def test_delimiters(self) -> None:
source: str = '(){},;'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(len(source)):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.LPAREN, '('),
Token(TokenType.RPAREN, ')'),
Token(TokenType.LBRACE, '{'),
Token(TokenType.RBRACE, '}'),
Token(TokenType.COMMA, ','),
Token(TokenType.SEMICOLON, ';'),
]
self.assertEqual(tokens, expected_tokens)
def test_assignment(self) -> None:
source: str = 'var num = 5;'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(5):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.VAR, 'var'),
Token(TokenType.IDENT, 'num'),
Token(TokenType.ASSIGN, '='),
Token(TokenType.INT, '5'),
Token(TokenType.SEMICOLON, ';'),
]
self.assertEqual(tokens, expected_tokens)
def test_function_declaration(self) -> None:
source: str = '''
var res = func(x, y) {
x + y;
};
'''
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(16):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.VAR, 'var'),
Token(TokenType.IDENT, 'res'),
Token(TokenType.ASSIGN, '='),
Token(TokenType.FUNCTION, 'func'),
Token(TokenType.LPAREN, '('),
Token(TokenType.IDENT, 'x'),
Token(TokenType.COMMA, ','),
Token(TokenType.IDENT, 'y'),
Token(TokenType.RPAREN, ')'),
Token(TokenType.LBRACE, '{'),
Token(TokenType.IDENT, 'x'),
Token(TokenType.PLUS, '+'),
Token(TokenType.IDENT, 'y'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.RBRACE, '}'),
Token(TokenType.SEMICOLON, ';'),
]
self.assertEqual(tokens, expected_tokens)
def test_function_call(self) -> None:
source: str = 'var res = sum(x, y);'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(10):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.VAR, 'var'),
Token(TokenType.IDENT, 'res'),
Token(TokenType.ASSIGN, '='),
Token(TokenType.IDENT, 'sum'),
Token(TokenType.LPAREN, '('),
Token(TokenType.IDENT, 'x'),
Token(TokenType.COMMA, ','),
Token(TokenType.IDENT, 'y'),
Token(TokenType.RPAREN, ')'),
Token(TokenType.SEMICOLON, ';'),
]
self.assertEqual(tokens, expected_tokens)
def test_control_statement(self) -> None:
source: str = '''
if (5 < 10) {
return true;
} else {
return false;
}
'''
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(17):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.IF, 'if'),
Token(TokenType.LPAREN, '('),
Token(TokenType.INT, '5'),
Token(TokenType.LT, '<'),
Token(TokenType.INT, '10'),
Token(TokenType.RPAREN, ')'),
Token(TokenType.LBRACE, '{'),
Token(TokenType.RETURN, 'return'),
Token(TokenType.TRUE, 'true'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.RBRACE, '}'),
Token(TokenType.ELSE, 'else'),
Token(TokenType.LBRACE, '{'),
Token(TokenType.RETURN, 'return'),
Token(TokenType.FALSE, 'false'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.RBRACE, '}'),
]
self.assertEqual(tokens, expected_tokens)
def test_two_character_operations(self) -> None:
source: str = '''
10 == 10;
10 != 9;
'''
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(8):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.INT, '10'),
Token(TokenType.EQ, '=='),
Token(TokenType.INT, '10'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.INT, '10'),
Token(TokenType.NOT_EQ, '!='),
Token(TokenType.INT, '9'),
Token(TokenType.SEMICOLON, ';'),
]
self.assertEqual(tokens, expected_tokens)
def test_complex_var_name(self) -> None:
source: str = 'var num_1 = 10;'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(5):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.VAR, 'var'),
Token(TokenType.IDENT, 'num_1'),
Token(TokenType.ASSIGN, '='),
Token(TokenType.INT, '10'),
Token(TokenType.SEMICOLON, ';'),
]
self.assertEqual(tokens, expected_tokens)
```
|
{
"source": "jeronimobarea/telegram_forwarder",
"score": 3
}
|
#### File: src/utils/message.py
```python
from src.utils.constants import Constants
class Message:
@staticmethod
def format_message(channel: str, message: str) -> str:
return f"from: @{channel} \n {message}"
@staticmethod
def chat_title_matches(
chat: str,
matches: list = Constants.CHAT_FORWARD_LIST.value
) -> bool:
return any(chat in match for match in matches)
```
|
{
"source": "JeronimoMendes/Tomatimer",
"score": 2
}
|
#### File: JeronimoMendes/Tomatimer/pref_win.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
import json
from PyQt5.QtGui import QIcon
class Ui_pref_win(object):
def __init__(self, window):
self.window = window
with open ("settings.json", "r") as settings:
self.data = json.load(settings)
settings.close()
self.main_time = self.data["main_time"]
self.big_time = self.data["long_break"]
self.small_time = self.data["short_break"]
def setupUi(self, pref_win):
pref_win.setObjectName("pref_win")
pref_win.resize(390, 251)
pref_win.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
pref_win.setWindowIcon(QIcon("material/images/tomato.png"))
self.centralwidget = QtWidgets.QWidget(pref_win)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 30, 81, 17))
self.label.setObjectName("label")
self.spinBox = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox.setGeometry(QtCore.QRect(150, 30, 48, 26))
self.spinBox.setProperty("value", self.main_time)
self.spinBox.setObjectName("spinBox")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(10, 80, 121, 17))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(10, 130, 121, 17))
self.label_3.setObjectName("label_3")
self.spinBox_2 = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox_2.setGeometry(QtCore.QRect(150, 130, 48, 26))
self.spinBox_2.setProperty("value", self.big_time)
self.spinBox_2.setObjectName("spinBox_2")
self.spinBox_3 = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox_3.setGeometry(QtCore.QRect(150, 80, 48, 26))
self.spinBox_3.setProperty("value", self.small_time)
self.spinBox_3.setObjectName("spinBox_3")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(50, 190, 89, 25))
self.pushButton.setObjectName("pushButton")
self.pushButton.pressed.connect(self.window.close)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(240, 190, 89, 25))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.pressed.connect(self.changeTime)
pref_win.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(pref_win)
self.menubar.setGeometry(QtCore.QRect(0, 0, 390, 22))
self.menubar.setObjectName("menubar")
pref_win.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(pref_win)
self.statusbar.setObjectName("statusbar")
pref_win.setStatusBar(self.statusbar)
self.retranslateUi(pref_win)
QtCore.QMetaObject.connectSlotsByName(pref_win)
def retranslateUi(self, pref_win):
_translate = QtCore.QCoreApplication.translate
pref_win.setWindowTitle(_translate("pref_win", "Tomatimer - Preferences"))
self.label.setText(_translate("pref_win", "Focus time"))
self.label_2.setText(_translate("pref_win", "Short break time"))
self.label_3.setText(_translate("pref_win", "Long break time"))
self.pushButton.setText(_translate("pref_win", "Cancel"))
self.pushButton_2.setText(_translate("pref_win", "Apply"))
def changeTime(self):
self.small_time = self.spinBox_3.value()
self.big_time = self.spinBox_2.value()
self.main_time = self.spinBox.value()
print("main time value changed to", self.main_time)
print("small time value changed to", self.small_time)
print("big time value changed to", self.big_time)
self.data["main_time"] = self.main_time
self.data["short_break"] = self.small_time
self.data["long_break"] = self.big_time
self.data["subject"] = self.data["subject"]
jsonFile = open("settings.json", "w+")
jsonFile.write(json.dumps(self.data))
jsonFile.close()
```
#### File: JeronimoMendes/Tomatimer/timer.py
```python
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import QTimer, QTime
from pypresence import Presence
import time, datetime
class PomoTimer:
def __init__(self, times, label, tray, rpc, subject):
self.subject = subject
self.tray = tray
self.label = label
self.main_time = times[0]
self.time = QTime(0,self.main_time,0)
self.timer = QTimer()
self.timer.setInterval(1000)
self.timer.timeout.connect(self.timerEvent)
self.rep = 0
self.RPC = rpc
self.short_break = self.Interval_timer(times[1], self.label, self.tray, self, self.RPC, self.subject)
self.long_break = self.Interval_timer(times[2], self.label, self.tray, self, self.RPC, self.subject)
self.round = 1
class Interval_timer:
def __init__(self, main_time, label, tray, outer_class, rpc, subject):
self.outer_class = outer_class
self.tray = tray
self.label = label
self.main_time = main_time
self.time = QTime(0,main_time,0)
self.RPC = rpc
self.subject = subject
def timerEvent(self):
self.time = self.time.addSecs(-1)
self.label.setText(self.time.toString("mm:ss"))
if self.time.secsTo(QTime(0,0,0)) == 0:
print("Break timer stopped")
self.tray.showMessage("Tomatime","Break time's up", self.tray.icon(), 4000)
self.clearTimer()
self.outer_class.timer.timeout.disconnect(self.timerEvent)
self.outer_class.timer.timeout.connect(self.outer_class.timerEvent)
print("Returning to focus timer")
self.outer_class.round += 1
self.outer_class.updateDiscord("Studying")
return print(self.time.toString("mm:ss"), " Break timer")
def startTimer(self):
print("Starting secondary timer")
self.outer_class.timer.timeout.connect(self.timerEvent)
self.outer_class.updateDiscord("Taking a break")
def clearTimer(self):
print("Clearing break timer")
self.time = QTime(0,self.main_time,0)
self.label.setText(self.time.toString("mm:ss"))
def timerEvent(self):
self.time = self.time.addSecs(-1)
self.label.setText(self.time.toString("mm:ss"))
if self.time.secsTo(QTime(0,0,0)) == 0:
self.rep += 1
self.timer.timeout.disconnect(self.timerEvent)
self.clearTimer()
print("Focus time's up")
self.tray.showMessage("Tomatime","Focus time's up", self.tray.icon(), 4000)
if self.rep ==3:
self.rep = 0
self.long_break.startTimer()
return print("Starting long break timer")
else:
self.short_break.startTimer()
return print("Starting short break timer")
return print(self.time.toString("mm:ss"), (" Focus Timer Ticking"))
def startTimer(self):
self.timer.start()
print(self.timer.interval())
self.updateDiscord("Studying")
def clearTimer(self):
self.time = QTime(0, self.main_time, 0)
def pauseTimer(self):
self.timer.stop()
try:
self.RPC.update(state=f"Studying - Round {self.round}", details="Paused", large_image="fsidfsd")
except:
print("No Discord app running")
def resetTimer(self):
self.pauseTimer()
self.short_break.clearTimer()
self.long_break.clearTimer()
self.clearTimer()
self.label.setText(str(self.main_time)+":00")
try:
self.timer.timeout.disconnect(self.short_break.timerEvent)
except:
pass
try:
self.timer.timeout.disconnect(self.long_break.timerEvent)
except:
pass
try:
self.timer.timeout.disconnect(self.timerEvent)
except:
pass
self.timer.timeout.connect(self.timerEvent)
def epochTime(self, mins, second):
orig = datetime.datetime.fromtimestamp(time.time())
new = orig + datetime.timedelta(minutes=mins, seconds=second)
return time.mktime(new.timetuple())
def updateDiscord(self, info):
try:
self.RPC.update(state=f"Studying {self.subject} - Round {self.round}", details=info, large_image="fsidfsd", end=self.epochTime(self.time.minute(), self.time.second()))
except:
print("No Discord app running")
```
#### File: JeronimoMendes/Tomatimer/tray.py
```python
from PyQt5.QtWidgets import QSystemTrayIcon, QAction, QMenu
from PyQt5.QtGui import QIcon
from timer import PomoTimer
from pypresence import Presence
class System_tray():
def __init__(self, tray, app, times, subject, pref_win):
self.times = times
self.main_time = times[0]
self.app = app
self.tray = tray
self.subject = subject
self.pref_win = pref_win
self.label = QAction(str(self.main_time)+":00")
try:
self.RPC = Presence("729011176477818890") # Initialize the Presence client
self.RPC.connect() # Start the handshake loop
except:
print("You don't have a discord app open")
def setupUi(self):
# Create Menu
self.menu = QMenu()
self.tray.setContextMenu(self.menu)
self.tray.setIcon(QIcon("material/images/tomato.png"))
self.tray.setVisible(True)
# Create and add Menu Actions
self.preferences_btt = QAction("Preferences")
self.preferences_btt.triggered.connect(self.preferences)
self.quit = QAction("Quit")
self.quit.triggered.connect(self.app.quit)
self.start_btt = QAction("Start")
self.start_btt.triggered.connect(self.start)
self.pause_btt = QAction("Pause")
self.pause_btt.triggered.connect(self.pause)
self.pause_btt.setVisible(False)
self.reset_btt = QAction("Reset")
self.reset_btt.triggered.connect(self.reset)
self.reset_btt.setVisible(False)
self.menu.addAction(self.label)
self.menu.addSeparator()
self.menu.addActions([self.start_btt, self.pause_btt, self.reset_btt, self.preferences_btt, self.quit])
self.menu.addMenu
def start(self):
print("Start")
self.timer_main.startTimer()
self.start_btt.setVisible(False)
self.pause_btt.setVisible(True)
self.reset_btt.setVisible(True)
def pause(self):
print("Pause")
self.timer_main.pauseTimer()
self.start_btt.setVisible(True)
self.pause_btt.setVisible(False)
def preferences(self):
print("Preferences")
self.pref_win.show()
def reset(self):
self.timer_main.resetTimer()
self.pause_btt.setVisible(False)
self.start_btt.setVisible(True)
self.reset_btt.setVisible(False)
def createTimer(self):
# Creates a timer
self.timer_main = PomoTimer(self.times, self.label, self.tray, self.RPC, self.subject)
```
|
{
"source": "jeronimopenha/grn2dot",
"score": 3
}
|
#### File: grn2dot/grn2dot/grn2dot.py
```python
import networkx as nx
class Grn2dot:
_instance = None
# def __new__(cls):
# # if cls._instance is None:
# cls._instance = super().__new__(cls)
# return cls._instance
def __init__(self, file_path):
self.nodes, self.edges, self.equations = self.process_grn_file(self.read_file(file_path))
self.digraph = nx.DiGraph()
for node in self.nodes:
if node in self.equations.keys():
self.digraph.add_node(node, equation=self.equations[node])
else:
self.digraph.add_node(node)
for key, edges_l in self.edges.items():
for edge in edges_l:
self.digraph.add_edge(edge, key)
def get_nodes_vector(self):
return self.nodes
def get_edges_dict(self):
return self.edges
def get_equations_dict(self):
return self.equations
def get_nx_nodes(self):
return self.digraph.nodes
def get_nx_edges(self):
return self.digraph.edges
def get_nx_digraph(self):
return self.digraph
def get_dot_str(self):
return nx.nx_agraph.to_agraph(self.digraph)
def get_num_nodes(self):
return len(self.nodes)
def get_num_equations(self):
return len(self.equations)
def get_grn_mem_specifications(self):
grn_mem_specifications = []
for i in range(self.get_num_nodes()):
grn_mem_specifications.append([self.get_nodes_vector()[i], i])
counter = 0
for key in self.get_equations_dict():
equation = self.get_equations_dict()[key]
eq_sp = []
for i in range(self.get_num_nodes()):
if self.get_nodes_vector()[i] in equation:
eq_sp.append(i)
eq_sp.sort(reverse=True)
grn_mem_specifications[counter].append(eq_sp)
counter = counter + 1
return grn_mem_specifications
@staticmethod
def read_file(file):
file = open(file)
content = file.read().split('\n')
file.close()
return content
@staticmethod
def process_grn_file(lines):
nodes = []
edges = {}
equations = {}
# treat the equations
for line in lines:
if line == '':
continue
line = line.strip()
# looking for equations:
node = ""
if '=' in line:
eq_parts = line.split("=")
node = eq_parts[0]
node = node.replace(' ', '')
node = ' ' + node + ' '
equations[node] = eq_parts[1]
else:
node = line
if node not in nodes:
nodes.append(node)
# looking for edges:
for key, equation in equations.items():
# equation = equation.replace(' ', '')
edges_l = equation
edges_l = edges_l.replace('(', ' ')
edges_l = edges_l.replace(')', ' ')
edges_l = edges_l.replace(' not ', ' ')
edges_l = edges_l.replace(' and ', ' ')
edges_l = edges_l.replace(' or ', ' ')
edges_l = edges_l.split(' ')
e = []
for edge in edges_l:
if edge != '':
edge = ' ' + edge + ' '
e.append(edge)
if edge not in nodes:
nodes.append(edge)
edges[key] = e
# processing equations
#equation = equation.replace(' ','')
equation = equation.replace(' and ', ' && ')
equation = equation.replace(' or ', ' || ')
equation = equation.replace(' not ', ' ! ')
equation = equation.replace(' || ', ' || ')
equation = equation.replace(' && ', ' && ')
equation = equation.replace(' ! ', ' ! ')
equation = equation.replace('(', ' ( ')
equation = equation.replace(')', ' ) ')
equation = " " + equation + " "
equation = equation.replace(" ", " ")
equation = equation.replace(" ", " ")
equations[key] = equation
return nodes, edges, equations
```
|
{
"source": "jerono-coder/2021-2022-Science-Fair-Project",
"score": 3
}
|
#### File: jerono-coder/2021-2022-Science-Fair-Project/main.py
```python
from time import sleep
import RPi.GPIO as GPIO
import os
import random
import serial
#end of importing necessary imports#
#set GPIO mode#
GPIO.setmode(GPIO.BCM)
#end of set GPIO mode#
#start of all major variables#
#import Micro:Bit from serial port#
z1baudrate = 115200
z1port = '/dev/ttyACM0'
z1serial = serial.Serial(port=z1port, baudrate=z1baudrate)
z1serial.timeout = 2
#end of import Micro:Bit form serial port#
# Touch Sensors, 2 to each pin #
touch1 = 14
touch2 = 15
touch3 = 18
# End of Touch Sensor Variables #
#motor1variables#
IN1 = 17
IN2 = 27
IN3 = 22
IN4 = 23
#end of motor 1 vairables#
#motor 2 variables#
motor2IN1 = 24
motor2IN2 = 16
motor2IN3 = 9
motor2IN4 = 25
#end of motor 2 vaiables#
#motor 3 variables #
motor3IN1 = 8
motor3IN2 =7
motor3IN3 = 11
motor3IN4 = 5
#end of motor 3 variables#
#motor 4 variables#
motor4IN1 = 12
motor4IN2 = 16
motor4IN3 = 20
motor4IN4 = 21
#end of motor 4 variables#
#motor 5 variables#
motor5IN1 = 6
motor5IN2 = 13
motor5IN3 = 19
motor5IN4 = 26
#end of motor 5 variables#
#end of all major variables#
time = 0.001
#set up touch sensors #
GPIO.setup(touch1, GPIO.IN) # Touch Switch
GPIO.setup(touch2, GPIO.IN) # Touch Swtich 2
GPIO.setup(touch3, GPIO.IN) # Touch Switch 3
#end of set up touch sensors#
#motor 1 setup#
GPIO.setup(IN1,GPIO.OUT)
GPIO.setup(IN2,GPIO.OUT)
GPIO.setup(IN3,GPIO.OUT)
GPIO.setup(IN4,GPIO.OUT)
#end of motor1 setup#
#motor 2 setup#
GPIO.setup(motor2IN1,GPIO.OUT)
GPIO.setup(motor2IN2,GPIO.OUT)
GPIO.setup(motor2IN3,GPIO.OUT)
GPIO.setup(motor2IN4,GPIO.OUT)
#end of motor2 setup#
#motor 3 setup#
GPIO.setup(motor3IN1,GPIO.OUT)
GPIO.setup(motor3IN2,GPIO.OUT)
GPIO.setup(motor3IN3,GPIO.OUT)
GPIO.setup(motor3IN4,GPIO.OUT)
#end of motor3 setup#
#motor 4 setup#
GPIO.setup(motor4IN1,GPIO.OUT)
GPIO.setup(motor4IN2,GPIO.OUT)
GPIO.setup(motor4IN3,GPIO.OUT)
GPIO.setup(motor4IN4,GPIO.OUT)
#end of motor4 setup#
#motor 5 setup#
GPIO.setup(motor5IN1,GPIO.OUT)
GPIO.setup(motor5IN2,GPIO.OUT)
GPIO.setup(motor5IN3,GPIO.OUT)
GPIO.setup(motor5IN4,GPIO.OUT)
#end of motor5 setup#
#setup false code numbers#
GPIO.setup(IN1, False)
GPIO.setup(IN2, False)
GPIO.setup(IN3, False)
GPIO.setup(IN4, False)
GPIO.setup(motor2IN1, False)
GPIO.setup(motor2IN2, False)
GPIO.setup(motor2IN3, False)
GPIO.setup(motor2IN4, False)
GPIO.setup(motor3IN1, False)
GPIO.setup(motor3IN2, False)
GPIO.setup(motor3IN3, False)
GPIO.setup(motor3IN4, False)
GPIO.setup(motor4IN1, False)
GPIO.setup(motor4IN2, False)
GPIO.setup(motor4IN3, False)
GPIO.setup(motor4IN4, False)
GPIO.setup(motor5IN1, False)
GPIO.setup(motor5IN2, False)
GPIO.setup(motor5IN3, False)
GPIO.setup(motor5IN4, False)
#end up setup false code numbers#
# main program
def main():
while True:
size = z1serial.inWaiting()
data = z1serial.read(size)
if data == b'2':
while not GPIO.input(touch1):
print ('touch on');
left(10);
#end of main program#
#MOTOR1 steps#
def Step1():
GPIO.output(IN4, True)
sleep (time)
GPIO.output(IN4, False)
def Step2():
GPIO.output(IN4, True)
GPIO.output(IN3, True)
sleep (time)
GPIO.output(IN4, False)
GPIO.output(IN3, False)
def Step3():
GPIO.output(IN3, True)
sleep (time)
GPIO.output(IN3, False)
def Step4():
GPIO.output(IN2, True)
GPIO.output(IN3, True)
sleep (time)
GPIO.output(IN2, False)
GPIO.output(IN3, False)
def Step5():
GPIO.output(IN2, True)
sleep (time)
GPIO.output(IN2, False)
def Step6():
GPIO.output(IN1, True)
GPIO.output(IN2, True)
sleep (time)
GPIO.output(IN1, False)
GPIO.output(IN2, False)
def Step7():
GPIO.output(IN1, True)
sleep (time)
GPIO.output(IN1, False)
def Step8():
GPIO.output(IN4, True)
GPIO.output(IN1, True)
sleep (time)
GPIO.output(IN4, False)
GPIO.output(IN1, False)
def left(step):
for i in range (step):
Step1()
Step2()
Step3()
Step4()
Step5()
Step6()
Step7()
Step8()
print ("Step left: ",i)
def right(step):
for i in range (step):
Step8()
Step7()
Step6()
Step5()
Step4()
Step3()
Step2()
Step1()
print ("Step right: ",i)
# end of motor 1 steps#
#MOTOR2 steps#
def motor2Step1():
GPIO.output(motor2IN4, True)
sleep (time)
GPIO.output(motor2IN4, False)
def motor2Step2():
GPIO.output(motor2IN4, True)
GPIO.output(motor2IN3, True)
sleep (time)
GPIO.output(motor2IN4, False)
GPIO.output(motor2IN3, False)
def motor2Step3():
GPIO.output(motor2IN3, True)
sleep (time)
GPIO.output(motor2IN3, False)
def motor2Step4():
GPIO.output(motor2IN2, True)
GPIO.output(motor2IN3, True)
sleep (time)
GPIO.output(motor2IN2, False)
GPIO.output(motor2IN3, False)
def motor2Step5():
GPIO.output(motor2IN2, True)
sleep (time)
GPIO.output(motor2IN2, False)
def motor2Step6():
GPIO.output(motor2IN1, True)
GPIO.output(motor2IN2, True)
sleep (time)
GPIO.output(motor2IN1, False)
GPIO.output(motor2IN2, False)
def motor2Step7():
GPIO.output(motor2IN1, True)
sleep (time)
GPIO.output(motor2IN1, False)
def motor2Step8():
GPIO.output(motor2IN4, True)
GPIO.output(motor2IN1, True)
sleep (time)
GPIO.output(motor2IN4, False)
GPIO.output(motor2IN1, False)
def motor2left(step):
for i in range (step):
motor2Step1()
motor2Step2()
motor2Step3()
motor2Step4()
motor2Step5()
motor2Step6()
motor2Step7()
motor2Step8()
print ("Step left: ",i)
def motor2right(step):
for i in range (step):
motor2Step8()
motor2Step7()
motor2Step6()
motor2Step5()
motor2Step4()
motor2Step3()
motor2Step2()
motor2Step1()
print ("Step right: ",i)
# end of motor 2 steps#
#MOTOR3 steps#
def motor3Step1():
GPIO.output(motor3IN4, True)
sleep (time)
GPIO.output(motor3IN4, False)
def motor3Step2():
GPIO.output(motor3IN4, True)
GPIO.output(motor3IN3, True)
sleep (time)
GPIO.output(motor3IN4, False)
GPIO.output(motor3IN3, False)
def motor3Step3():
GPIO.output(motor3IN3, True)
sleep (time)
GPIO.output(motor3IN3, False)
def motor3Step4():
GPIO.output(motor3IN2, True)
GPIO.output(motor3IN3, True)
sleep (time)
GPIO.output(motor3IN2, False)
GPIO.output(motor3IN3, False)
def motor3Step5():
GPIO.output(motor3IN2, True)
sleep (time)
GPIO.output(motor3IN2, False)
def motor3Step6():
GPIO.output(motor3IN1, True)
GPIO.output(motor3IN2, True)
sleep (time)
GPIO.output(motor3IN1, False)
GPIO.output(motor3IN2, False)
def motor3Step7():
GPIO.output(motor3IN1, True)
sleep (time)
GPIO.output(motor3IN1, False)
def motor3Step8():
GPIO.output(motor3IN4, True)
GPIO.output(motor3IN1, True)
sleep (time)
GPIO.output(motor3IN4, False)
GPIO.output(motor3IN1, False)
def motor3left(step):
for i in range (step):
motor3Step1()
motor3Step2()
motor3Step3()
motor3Step4()
motor3Step5()
motor3Step6()
motor3Step7()
motor3Step8()
print ("Step left: ",i)
def motor3right(step):
for i in range (step):
motor3Step8()
motor3Step7()
motor3Step6()
motor3Step5()
motor3Step4()
motor3Step3()
motor3Step2()
motor3Step1()
print ("Step right: ",i)
# end of motor 3 steps#
#MOTOR4 steps#
def motor4Step1():
GPIO.output(motor4IN4, True)
sleep (time)
GPIO.output(motor4IN4, False)
def motor4Step2():
GPIO.output(motor4IN4, True)
GPIO.output(motor4IN3, True)
sleep (time)
GPIO.output(motor4IN4, False)
GPIO.output(motor4IN3, False)
def motor4Step3():
GPIO.output(motor4IN3, True)
sleep (time)
GPIO.output(motor4IN3, False)
def motor4Step4():
GPIO.output(motor4IN2, True)
GPIO.output(motor4IN3, True)
sleep (time)
GPIO.output(motor4IN2, False)
GPIO.output(motor4IN3, False)
def motor4Step5():
GPIO.output(motor4IN2, True)
sleep (time)
GPIO.output(motor4IN2, False)
def motor4Step6():
GPIO.output(motor4IN1, True)
GPIO.output(motor4IN2, True)
sleep (time)
GPIO.output(motor4IN1, False)
GPIO.output(motor4IN2, False)
def motor4Step7():
GPIO.output(motor4IN1, True)
sleep (time)
GPIO.output(motor4IN1, False)
def motor4Step8():
GPIO.output(motor4IN4, True)
GPIO.output(motor4IN1, True)
sleep (time)
GPIO.output(motor4IN4, False)
GPIO.output(motor4IN1, False)
def motor4left(step):
for i in range (step):
motor4Step1()
motor4Step2()
motor4Step3()
motor4Step4()
motor4Step5()
motor4Step6()
motor4Step7()
motor4Step8()
print ("Step left: ",i)
def motor4right(step):
for i in range (step):
motor4Step8()
motor4Step7()
motor4Step6()
motor4Step5()
motor4Step4()
motor4Step3()
motor4Step2()
motor4Step1()
print ("Step right: ",i)
# end of motor 4 steps#
#MOTOR5 steps#
def motor5Step1():
GPIO.output(motor5IN4, True)
sleep (time)
GPIO.output(motor5IN4, False)
def motor5Step2():
GPIO.output(motor5IN4, True)
GPIO.output(motor5IN3, True)
sleep (time)
GPIO.output(motor5IN4, False)
GPIO.output(motor5IN3, False)
def motor5Step3():
GPIO.output(motor5IN3, True)
sleep (time)
GPIO.output(motor5IN3, False)
def motor5Step4():
GPIO.output(motor5IN2, True)
GPIO.output(motor5IN3, True)
sleep (time)
GPIO.output(motor5IN2, False)
GPIO.output(motor5IN3, False)
def motor5Step5():
GPIO.output(motor5IN2, True)
sleep (time)
GPIO.output(motor5IN2, False)
def motor5Step6():
GPIO.output(motor5IN1, True)
GPIO.output(motor5IN2, True)
sleep (time)
GPIO.output(motor5IN1, False)
GPIO.output(motor5IN2, False)
def motor5Step7():
GPIO.output(motor5IN1, True)
sleep (time)
GPIO.output(motor5IN1, False)
def motor5Step8():
GPIO.output(motor5IN4, True)
GPIO.output(motor5IN1, True)
sleep (time)
GPIO.output(motor5IN4, False)
GPIO.output(motor5IN1, False)
def motor5left(step):
for i in range (step):
motor5Step1()
motor5Step2()
motor5Step3()
motor5Step4()
motor5Step5()
motor5Step6()
motor5Step7()
motor5Step8()
print ("Step left: ",i)
def motor5right(step):
for i in range (step):
motor5Step8()
motor5Step7()
motor5Step6()
motor5Step5()
motor5Step4()
motor5Step3()
motor5Step2()
motor5Step1()
print ("Step right: ",i)
# end of motor 5 steps#
main()
```
|
{
"source": "jerowe/sgkit",
"score": 2
}
|
#### File: sgkit/stats/hwe.py
```python
from typing import Hashable, Optional
import dask.array as da
import numpy as np
import xarray as xr
from numba import njit
from numpy import ndarray
from xarray import Dataset
def hardy_weinberg_p_value(obs_hets: int, obs_hom1: int, obs_hom2: int) -> float:
"""Exact test for HWE as described in Wigginton et al. 2005 [1].
Parameters
----------
obs_hets : int
Number of heterozygotes with minor variant.
obs_hom1 : int
Number of reference/major homozygotes.
obs_hom2 : int
Number of alternate/minor homozygotes.
Returns
-------
float
P value in [0, 1]
References
----------
- [1] Wigginton, <NAME>., <NAME>, and <NAME>. 2005.
“A Note on Exact Tests of Hardy-Weinberg Equilibrium.” American Journal of
Human Genetics 76 (5): 887–93.
Raises
------
ValueError
If any observed counts are negative.
"""
if obs_hom1 < 0 or obs_hom2 < 0 or obs_hets < 0:
raise ValueError("Observed genotype counts must be positive")
obs_homc = obs_hom2 if obs_hom1 < obs_hom2 else obs_hom1
obs_homr = obs_hom1 if obs_hom1 < obs_hom2 else obs_hom2
obs_mac = 2 * obs_homr + obs_hets
obs_n = obs_hets + obs_homc + obs_homr
het_probs = np.zeros(obs_mac + 1, dtype=np.float64)
if obs_n == 0:
return np.nan # type: ignore[no-any-return]
# Identify distribution midpoint
mid = int(obs_mac * (2 * obs_n - obs_mac) / (2 * obs_n))
if (obs_mac & 1) ^ (mid & 1):
mid += 1
het_probs[mid] = 1.0
prob_sum = het_probs[mid]
# Integrate downward from distribution midpoint
curr_hets = mid
curr_homr = int((obs_mac - mid) / 2)
curr_homc = obs_n - curr_hets - curr_homr
while curr_hets > 1:
het_probs[curr_hets - 2] = (
het_probs[curr_hets]
* curr_hets
* (curr_hets - 1.0)
/ (4.0 * (curr_homr + 1.0) * (curr_homc + 1.0))
)
prob_sum += het_probs[curr_hets - 2]
curr_homr += 1
curr_homc += 1
curr_hets -= 2
# Integrate upward from distribution midpoint
curr_hets = mid
curr_homr = int((obs_mac - mid) / 2)
curr_homc = obs_n - curr_hets - curr_homr
while curr_hets <= obs_mac - 2:
het_probs[curr_hets + 2] = (
het_probs[curr_hets]
* 4.0
* curr_homr
* curr_homc
/ ((curr_hets + 2.0) * (curr_hets + 1.0))
)
prob_sum += het_probs[curr_hets + 2]
curr_homr -= 1
curr_homc -= 1
curr_hets += 2
if prob_sum <= 0: # pragma: no cover
return np.nan # type: ignore[no-any-return]
het_probs = het_probs / prob_sum
p = het_probs[het_probs <= het_probs[obs_hets]].sum()
p = max(min(1.0, p), 0.0)
return p # type: ignore[no-any-return]
# Benchmarks show ~25% improvement w/ fastmath on large (~10M) counts
hardy_weinberg_p_value_jit = njit(hardy_weinberg_p_value, fastmath=True)
def hardy_weinberg_p_value_vec(
obs_hets: ndarray, obs_hom1: ndarray, obs_hom2: ndarray
) -> ndarray:
arrs = [obs_hets, obs_hom1, obs_hom2]
if len(set(map(len, arrs))) != 1:
raise ValueError("All arrays must have same length")
if list(set(map(lambda x: x.ndim, arrs))) != [1]:
raise ValueError("All arrays must be 1D")
n = len(obs_hets)
p = np.empty(n, dtype=np.float64)
for i in range(n):
p[i] = hardy_weinberg_p_value_jit(obs_hets[i], obs_hom1[i], obs_hom2[i])
return p
hardy_weinberg_p_value_vec_jit = njit(hardy_weinberg_p_value_vec, fastmath=True)
def hardy_weinberg_test(
ds: Dataset, genotype_counts: Optional[Hashable] = None
) -> Dataset:
"""Exact test for HWE as described in Wigginton et al. 2005 [1].
Parameters
----------
ds : Dataset
Dataset containing genotype calls or precomputed genotype counts.
genotype_counts : Optional[Hashable], optional
Name of variable containing precomputed genotype counts, by default
None. If not provided, these counts will be computed automatically
from genotype calls. If present, must correspond to an (`N`, 3) array
where `N` is equal to the number of variants and the 3 columns contain
heterozygous, homozygous reference, and homozygous alternate counts
(in that order) across all samples for a variant.
Warnings
--------
This function is only applicable to diploid, biallelic datasets.
Returns
-------
Dataset
Dataset containing (N = num variants):
variant_hwe_p_value : (N,) ArrayLike
P values from HWE test for each variant as float in [0, 1].
References
----------
- [1] Wigginton, <NAME>., <NAME>, and <NAME>. 2005.
“A Note on Exact Tests of Hardy-Weinberg Equilibrium.” American Journal of
Human Genetics 76 (5): 887–93.
Raises
------
NotImplementedError
If ploidy of provided dataset != 2
NotImplementedError
If maximum number of alleles in provided dataset != 2
"""
if ds.dims["ploidy"] != 2:
raise NotImplementedError("HWE test only implemented for diploid genotypes")
if ds.dims["alleles"] != 2:
raise NotImplementedError("HWE test only implemented for biallelic genotypes")
# Use precomputed genotype counts if provided
if genotype_counts is not None:
obs = list(da.asarray(ds[genotype_counts]).T)
# Otherwise compute genotype counts from calls
else:
# TODO: Use API genotype counting function instead, e.g.
# https://github.com/pystatgen/sgkit/issues/29#issuecomment-656691069
M = ds["call_genotype_mask"].any(dim="ploidy")
AC = xr.where(M, -1, ds["call_genotype"].sum(dim="ploidy")) # type: ignore[no-untyped-call]
cts = [1, 0, 2] # arg order: hets, hom1, hom2
obs = [da.asarray((AC == ct).sum(dim="samples")) for ct in cts]
p = da.map_blocks(hardy_weinberg_p_value_vec_jit, *obs)
return xr.Dataset({"variant_hwe_p_value": ("variants", p)})
```
#### File: sgkit/stats/regenie.py
```python
from typing import Any, Dict, Hashable, Optional, Sequence, Tuple, Union
import dask.array as da
import numpy as np
import xarray as xr
from dask.array import Array
from numpy import ndarray
from xarray import Dataset
from ..typing import ArrayLike
from ..utils import split_array_chunks
from .utils import (
assert_array_shape,
assert_block_shape,
assert_chunk_shape,
concat_2d,
r2_score,
)
def index_array_blocks(
x: Union[ArrayLike, Sequence[int]], size: int
) -> Tuple[ndarray, ndarray]:
"""Generate indexes for blocks that partition an array within groups.
Given an array with monotonic increasing group assignments (as integers),
this function will generate the indexes of blocks within those groups that
are of at most `size` elements.
Parameters
----------
x : Union[ArrayLike, Sequence[int]]
Vector of group assignments, must be monotonic increasing.
Resulting blocks will never cross these group assignments
and the resulting `index` and `sizes` values constitute
covering slices for any array of the same size as `x`.
size : int
Maximum block size.
Examples
--------
>>> from sgkit.stats.regenie import index_array_blocks
>>> index_array_blocks([0, 0, 0], 2)
(array([0, 2]), array([2, 1]))
>>> index_array_blocks([0, 0, 1, 1, 1], 2)
(array([0, 2, 4]), array([2, 2, 1]))
Returns
-------
index : ndarray
Array of indexes for each block start
sizes : ndarray
Size of block such that `x[index[0]:(index[0] + sizes[0])]` contains
every element in block 0
Raises
------
ValueError
If `x` is not 1D.
ValueError
If `size` is <= 0.
ValueError
If `x` does not contain integers.
ValueError
If `x` is not monotonic increasing.
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(0, dtype=int), np.empty(0, dtype=int)
if x.ndim != 1:
raise ValueError(f"Array shape {x.shape} is not 1D")
if size <= 0:
raise ValueError(f"Block size {size} must be > 0")
if not np.issubdtype(x.dtype, np.integer):
raise ValueError("Array to partition must contain integers")
if np.any(np.diff(x) < 0):
raise ValueError("Array to partition must be monotonic increasing")
breaks = np.argwhere(np.diff(x, prepend=x[0]))[:, 0]
breaks = np.concatenate(([0], breaks, [x.size]))
index = np.concatenate(
[np.arange(breaks[i], breaks[i + 1], size) for i in range(breaks.size - 1)]
)
sizes = np.diff(index, append=x.size)
assert index.size == sizes.size
return index, sizes
def index_block_sizes(
sizes: Union[ArrayLike, Sequence[int]]
) -> Tuple[ndarray, ndarray]:
"""Generate indexes for blocks of specific sizes.
Parameters
----------
sizes : Union[ArrayLike, Sequence[int]]
Block sizes to generate indexes for.
Examples
--------
>>> from sgkit.stats.regenie import index_block_sizes
>>> index_block_sizes([3, 4, 5])
(array([0, 3, 7]), array([3, 4, 5]))
Returns
-------
index : ndarray
Array of indexes for each block start.
sizes : ndarray
Size of block such that `x[index[0]:(index[0] + sizes[0])]` contains
every element in block 0.
Raises
------
ValueError
If any value in `sizes` is <= 0.
ValueError
If `sizes` does not contain integers.
"""
sizes = np.asarray(sizes)
if np.any(sizes <= 0):
raise ValueError("All block sizes must be >= 0")
if not np.issubdtype(sizes.dtype, np.integer):
raise ValueError("Block sizes must be integers")
chunks = np.concatenate([np.array([0]), sizes])
index = np.cumsum(chunks)[:-1]
assert index.size == sizes.size
return index, sizes
def ridge_regression(
XtX: ArrayLike,
XtY: ArrayLike,
alphas: Union[ArrayLike, Sequence[float]],
n_zero_reg: Optional[int] = None,
dtype: Any = None,
) -> ArrayLike:
"""Multi-outcome, multi-parameter ridge regression from CV intermediates."""
if XtX.shape[0] != XtX.shape[1]:
raise ValueError(f"First argument must be symmetric (shape = {XtX.shape})")
if XtX.shape[0] != XtY.shape[0]:
raise ValueError("Array arguments must have same size in first dimension")
diags = []
n_alpha, n_obs, n_outcome = len(alphas), XtX.shape[0], XtY.shape[1]
for i in range(n_alpha):
diag = np.ones(XtX.shape[1]) * alphas[i]
if n_zero_reg:
# Optionally fix regularization for leading covariates
# TODO: This should probably be zero for consistency
# with orthogonalization, see:
# https://github.com/projectglow/glow/issues/266
diag[:n_zero_reg] = 1
diags.append(np.diag(diag))
diags = np.stack(diags)
B = np.linalg.inv(XtX + diags) @ XtY
B = B.astype(dtype or XtX.dtype)
assert_array_shape(B, n_alpha, n_obs, n_outcome)
return B
def get_alphas(
n_cols: int, heritability: Sequence[float] = [0.99, 0.75, 0.50, 0.25, 0.01]
) -> ndarray:
# https://github.com/projectglow/glow/blob/f3edf5bb8fe9c2d2e1a374d4402032ba5ce08e29/python/glow/wgr/linear_model/ridge_model.py#L80
return np.array([n_cols / h for h in heritability])
def stack(x: Array) -> Array:
"""Stack blocks as new leading array axis"""
return da.stack([x.blocks[i] for i in range(x.numblocks[0])])
def unstack(x: Array) -> Array:
"""Unstack leading array axis into blocks"""
return da.concatenate([x.blocks[i][0] for i in range(x.numblocks[0])])
def _ridge_regression_cv(
X: Array, Y: Array, alphas: ndarray, n_zero_reg: Optional[int] = None
) -> Tuple[Array, Array, Array, Array]:
assert alphas.ndim == 1
assert X.ndim == 2
assert Y.ndim == 2
assert X.numblocks[1] == 1
assert Y.numblocks[1] == 1
assert X.chunks[0] == Y.chunks[0]
n_block, n_obs, n_covar, n_outcome, n_alpha = (
X.numblocks[0],
X.shape[0],
X.shape[1],
Y.shape[1],
alphas.shape[0],
)
obs_chunks = X.chunks[0]
# Project samples and outcomes noting that resulting chunks are
# of fixed size even if the chunks along the observation dim
# are not uniform (i.e. |X.chunks[0]| != 1)
XtX = stack(da.map_blocks(lambda x: x.T @ x, X, chunks=(X.shape[1],) * 2))
assert_block_shape(XtX, n_block, 1, 1)
assert_chunk_shape(XtX, 1, n_covar, n_covar)
XtY = stack(da.map_blocks(lambda x, y: x.T @ y, X, Y, chunks=(n_covar, n_outcome)))
assert_block_shape(XtY, n_block, 1, 1)
assert_chunk_shape(XtY, 1, n_covar, n_outcome)
# Invert the projections in each block so that each
# contains data from all other blocks *except* itself
XtX = unstack(XtX.sum(axis=0) - XtX)
assert_block_shape(XtX, n_block, 1)
assert_chunk_shape(XtX, n_covar, n_covar)
XtY = unstack(XtY.sum(axis=0) - XtY)
assert_block_shape(XtY, n_block, 1)
assert_chunk_shape(XtY, n_covar, n_outcome)
assert XtX.numblocks == XtY.numblocks
# Regress for all outcomes/alphas and add new axis for ridge parameters
B = da.map_blocks(
ridge_regression,
XtX,
XtY,
chunks=(n_alpha, n_covar, n_outcome),
new_axis=[0],
alphas=alphas,
n_zero_reg=n_zero_reg,
)
assert_block_shape(B, 1, n_block, 1)
assert_chunk_shape(B, n_alpha, n_covar, n_outcome)
assert_array_shape(B, n_alpha, n_block * n_covar, n_outcome)
# Generate predictions for all outcomes/alphas
assert B.numblocks == (1,) + X.numblocks
YP = da.map_blocks(
lambda x, b: x @ b, X, B, chunks=(alphas.size, obs_chunks, n_outcome)
)
assert_block_shape(YP, 1, n_block, 1)
assert_chunk_shape(YP, n_alpha, obs_chunks[0], n_outcome)
assert_array_shape(YP, n_alpha, n_obs, n_outcome)
return XtX, XtY, B, YP
def _stage_1(G: Array, X: Array, Y: Array, alphas: Optional[ndarray] = None) -> Array:
"""Stage 1 - WGR Base Regression
This stage will predict outcomes separately for each alpha parameter and variant
block. This "compresses" the variant dimension into a smaller space that is
much more amenable to efficient blockwise regressions in stage 2. Another
interpretation for this operation is that all sample blocks are treated
as folds in a K-fold CV fit within one single variant block. Predictions for
any one combination of variant and sample block then correspond to a
regression model fit all across sample blocks for that range of variants
except for a single sample block. In other words, the predictions are
out of sample which enables training of a stage 2 regressor based on
these predictions, a technique commonly referred to as stacking.
For more details, see the level 0 regression model described in step 1
of [Mbatchou et al. 2020](https://www.biorxiv.org/content/10.1101/2020.06.19.162354v2).
"""
assert G.ndim == 2
assert X.ndim == 2
assert Y.ndim == 2
# Check that chunking across samples is the same for all arrays
assert G.shape[0] == X.shape[0] == Y.shape[0]
assert G.numblocks[0] == X.numblocks[0] == Y.numblocks[0]
assert G.chunks[0] == X.chunks[0] == Y.chunks[0]
assert X.numblocks[1] == Y.numblocks[1] == 1
if alphas is None:
alphas = get_alphas(G.shape[1])
# Extract shape statistics
n_sample = G.shape[0]
n_outcome = Y.shape[1]
n_alpha = alphas.size
n_sample_block = G.numblocks[0]
n_variant_block = G.numblocks[1]
sample_chunks = Y.chunks[0]
YP = []
for i in range(n_variant_block):
# Extract all sample blocks for one variant block
GB = G.blocks[:, i]
# Prepend covariates and chunk along first dim only
XGB = da.concatenate((X, GB), axis=1)
XGB = XGB.rechunk(chunks=(None, -1))
# Fit and predict folds for each parameter and outcome
YPB = _ridge_regression_cv(XGB, Y, alphas, n_zero_reg=X.shape[1])[-1]
assert_block_shape(YPB, 1, n_sample_block, 1)
assert_chunk_shape(YPB, n_alpha, sample_chunks[0], n_outcome)
assert_array_shape(YPB, n_alpha, n_sample, n_outcome)
YP.append(YPB)
# Stack as (n_variant_block, n_alpha, n_sample, n_outcome)
YP = da.stack(YP, axis=0)
assert_block_shape(YP, n_variant_block, 1, n_sample_block, 1)
assert_chunk_shape(YP, 1, n_alpha, sample_chunks[0], n_outcome)
assert_array_shape(YP, n_variant_block, n_alpha, n_sample, n_outcome)
return YP
def _stage_2(
YP: Array,
X: Array,
Y: Array,
alphas: Optional[ndarray] = None,
normalize: bool = True,
_glow_adj_alpha: bool = False,
_glow_adj_scaling: bool = False,
) -> Tuple[Array, Array]:
"""Stage 2 - WGR Meta Regression
This stage will train separate ridge regression models for each outcome
using the predictions from stage 1 for that same outcome as features. These
predictions are then evaluated based on R2 score to determine an optimal
"meta" estimator (see `_stage_1` for the "base" estimator description). Results
then include only predictions and coefficients from this optimal model.
For more details, see the level 1 regression model described in step 1
of [Mbatchou et al. 2020](https://www.biorxiv.org/content/10.1101/2020.06.19.162354v2).
"""
assert YP.ndim == 4
assert X.ndim == 2
assert Y.ndim == 2
# Check that chunking across samples is the same for all arrays
assert YP.numblocks[2] == X.numblocks[0] == Y.numblocks[0]
assert YP.chunks[2] == X.chunks[0] == Y.chunks[0]
# Assert single chunks for covariates and outcomes
assert X.numblocks[1] == Y.numblocks[1] == 1
# Extract shape statistics
n_variant_block, n_alpha_1 = YP.shape[:2]
n_sample_block = Y.numblocks[0]
n_sample, n_outcome = Y.shape
n_covar = X.shape[1]
n_indvar = n_covar + n_variant_block * n_alpha_1
sample_chunks = Y.chunks[0]
if normalize:
assert_block_shape(YP, n_variant_block, 1, n_sample_block, 1)
assert_chunk_shape(YP, 1, n_alpha_1, sample_chunks[0], n_outcome)
# See: https://github.com/projectglow/glow/issues/260
if _glow_adj_scaling:
YP = da.map_blocks(
lambda x: (x - x.mean(axis=2, keepdims=True))
/ x.std(axis=2, keepdims=True),
YP,
)
else:
YP = (YP - YP.mean(axis=2, keepdims=True)) / YP.std(axis=2, keepdims=True)
# Tranpose for refit on level 1 predictions
YP = YP.transpose((3, 2, 0, 1))
assert_array_shape(YP, n_outcome, n_sample, n_variant_block, n_alpha_1)
if alphas is None:
# See: https://github.com/projectglow/glow/issues/255
if _glow_adj_alpha:
alphas = get_alphas(n_variant_block * n_alpha_1 * n_outcome)
else:
alphas = get_alphas(n_variant_block * n_alpha_1)
n_alpha_2 = alphas.size
YR = []
BR = []
for i in range(n_outcome):
# Slice and reshape to new 2D covariate matrix;
# The order of raveling in trailing dimensions is important
# and later reshapes will assume variants, alphas order
XPB = YP[i].reshape((n_sample, n_variant_block * n_alpha_1))
# Prepend covariates and chunk along first dim only
XPB = da.concatenate((X, XPB), axis=1)
XPB = XPB.rechunk(chunks=(None, -1))
assert_array_shape(XPB, n_sample, n_indvar)
assert XPB.numblocks == (n_sample_block, 1)
# Extract outcome vector
YB = Y[:, [i]]
assert XPB.ndim == YB.ndim == 2
# Fit and predict folds for each parameter
BB, YPB = _ridge_regression_cv(XPB, YB, alphas, n_zero_reg=n_covar)[-2:]
assert_array_shape(BB, n_alpha_2, n_sample_block * n_indvar, 1)
assert_array_shape(YPB, n_alpha_2, n_sample, 1)
BR.append(BB)
YR.append(YPB)
# Concatenate predictions along outcome dimension
YR = da.concatenate(YR, axis=2)
assert_block_shape(YR, 1, n_sample_block, n_outcome)
assert_chunk_shape(YR, n_alpha_2, sample_chunks[0], 1)
assert_array_shape(YR, n_alpha_2, n_sample, n_outcome)
# Move samples to last dim so all others are batch
# dims for R2 calculations
YR = da.transpose(YR, (0, 2, 1))
assert_array_shape(YR, n_alpha_2, n_outcome, n_sample)
YR = YR.rechunk((-1, -1, None))
assert_block_shape(YR, 1, 1, n_sample_block)
assert YR.shape[1:] == Y.T.shape
# Concatenate betas along outcome dimension
BR = da.concatenate(BR, axis=2)
assert_block_shape(BR, 1, n_sample_block, n_outcome)
assert_chunk_shape(BR, n_alpha_2, n_indvar, 1)
assert_array_shape(BR, n_alpha_2, n_sample_block * n_indvar, n_outcome)
# Compute R2 scores within each sample block for each outcome + alpha
R2 = da.stack(
[
r2_score(YR.blocks[..., i], Y.T.blocks[..., i])
# Avoid warnings on R2 calculations for blocks with single rows
if YR.chunks[-1][i] > 1 else da.full(YR.shape[:-1], np.nan)
for i in range(n_sample_block)
]
)
assert_array_shape(R2, n_sample_block, n_alpha_2, n_outcome)
# Coerce to finite or nan before nan-aware mean
R2 = da.where(da.isfinite(R2), R2, np.nan)
# Find highest mean alpha score for each outcome across blocks
R2M = da.nanmean(R2, axis=0)
assert_array_shape(R2M, n_alpha_2, n_outcome)
# Identify index for the alpha value with the highest mean score
R2I = da.argmax(R2M, axis=0)
assert_array_shape(R2I, n_outcome)
# Choose the predictions corresponding to the model with best score
YRM = da.stack([YR[R2I[i], i, :] for i in range(n_outcome)], axis=-1)
YRM = YRM.rechunk((None, -1))
assert_block_shape(YRM, n_sample_block, 1)
assert_chunk_shape(YRM, sample_chunks[0], n_outcome)
assert_array_shape(YRM, n_sample, n_outcome)
# Choose the betas corresponding to the model with the best score
BRM = da.stack([BR[R2I[i], :, i] for i in range(n_outcome)], axis=-1)
BRM = BRM.rechunk((None, -1))
assert_block_shape(BRM, n_sample_block, 1)
assert_chunk_shape(BRM, n_indvar, n_outcome)
assert_array_shape(BRM, n_sample_block * n_indvar, n_outcome)
return BRM, YRM
def _stage_3(
B: Array,
YP: Array,
X: Array,
Y: Array,
contigs: Array,
variant_chunk_start: ndarray,
) -> Optional[Array]:
"""Stage 3 - Leave-one-chromosome-out (LOCO) Estimation
This stage will use the coefficients for the optimal model in
stage 2 to re-estimate predictions in a LOCO scheme. This scheme
involves omitting coefficients that correspond to all variant
blocks for a single chromosome in the stage 2 model and then
recomputing predictions without those coefficients.
For more details, see the "LOCO predictions" section of the Supplementary Methods
in [Mbatchou et al. 2020](https://www.biorxiv.org/content/10.1101/2020.06.19.162354v2).
"""
assert B.ndim == 2
assert YP.ndim == 4
assert X.ndim == 2
assert Y.ndim == 2
# Check that chunking across samples is the same for all arrays
assert B.numblocks[0] == YP.numblocks[2] == X.numblocks[0] == Y.numblocks[0]
assert YP.chunks[2] == X.chunks[0] == Y.chunks[0]
# Extract shape statistics
sample_chunks = Y.chunks[0]
n_covar = X.shape[1]
n_variant_block, n_alpha_1 = YP.shape[:2]
n_indvar = n_covar + n_variant_block * n_alpha_1
n_sample_block = Y.numblocks[0]
n_sample, n_outcome = Y.shape
# Determine unique contigs to create LOCO estimates for
contigs = np.asarray(contigs)
unique_contigs = np.unique(contigs)
n_contig = len(unique_contigs)
if n_contig <= 1:
# Return nothing w/o at least 2 contigs
return None
assert n_variant_block == len(variant_chunk_start)
# Create vector of size `n_variant_block` where value
# at index i corresponds to contig for variant block i
variant_block_contigs = contigs[variant_chunk_start]
# Transform coefficients (B) such that trailing dimensions
# contain right half of matrix product for prediction:
# (n_sample_block * n_indvar, n_outcome) ->
# (n_outcome, n_sample_block, n_indvar)
B = da.stack([B.blocks[i] for i in range(n_sample_block)], axis=0)
assert_block_shape(B, n_sample_block, 1, 1)
assert_chunk_shape(B, 1, n_indvar, n_outcome)
assert_array_shape(B, n_sample_block, n_indvar, n_outcome)
B = da.transpose(B, (2, 0, 1))
assert_block_shape(B, 1, n_sample_block, 1)
assert_chunk_shape(B, n_outcome, 1, n_indvar)
assert_array_shape(B, n_outcome, n_sample_block, n_indvar)
# Decompose coefficients (B) so that variant blocks can be sliced:
# BX -> (n_outcome, n_sample_block, n_covar)
# BYP -> (n_outcome, n_sample_block, n_variant_block, n_alpha_1)
BX = B[..., :n_covar]
assert_array_shape(BX, n_outcome, n_sample_block, n_covar)
BYP = B[..., n_covar:]
assert_array_shape(BYP, n_outcome, n_sample_block, n_variant_block * n_alpha_1)
BYP = BYP.reshape((n_outcome, n_sample_block, n_variant_block, n_alpha_1))
assert_block_shape(BYP, 1, n_sample_block, 1, 1)
assert_chunk_shape(BYP, n_outcome, 1, n_variant_block, n_alpha_1)
assert_array_shape(BYP, n_outcome, n_sample_block, n_variant_block, n_alpha_1)
# Transform base predictions (YP) such that trailing dimensions
# contain left half of matrix product for prediction as well
# as variant blocks to slice on:
# (n_variant_block, n_alpha_1, n_sample, n_outcome) ->
# (n_outcome, n_sample, n_variant_block, n_alpha_1)
YP = da.transpose(YP, (3, 2, 0, 1))
assert_block_shape(YP, 1, n_sample_block, n_variant_block, 1)
assert_chunk_shape(YP, n_outcome, sample_chunks[0], 1, n_alpha_1)
assert_array_shape(YP, n_outcome, n_sample, n_variant_block, n_alpha_1)
def apply(X: Array, YP: Array, BX: Array, BYP: Array) -> Array:
# Collapse selected variant blocks and alphas into single
# new covariate dimension
assert YP.shape[2] == BYP.shape[2]
n_group_covar = n_covar + BYP.shape[2] * n_alpha_1
BYP = BYP.reshape((n_outcome, n_sample_block, -1))
BG = da.concatenate((BX, BYP), axis=-1)
BG = BG.rechunk((-1, None, -1))
assert_block_shape(BG, 1, n_sample_block, 1)
assert_chunk_shape(BG, n_outcome, 1, n_group_covar)
assert_array_shape(BG, n_outcome, n_sample_block, n_group_covar)
YP = YP.reshape((n_outcome, n_sample, -1))
XYP = da.broadcast_to(X, (n_outcome, n_sample, n_covar))
XG = da.concatenate((XYP, YP), axis=-1)
XG = XG.rechunk((-1, None, -1))
assert_block_shape(XG, 1, n_sample_block, 1)
assert_chunk_shape(XG, n_outcome, sample_chunks[0], n_group_covar)
assert_array_shape(XG, n_outcome, n_sample, n_group_covar)
YG = da.map_blocks(
# Block chunks:
# (n_outcome, sample_chunks[0], n_group_covar) @
# (n_outcome, n_group_covar, 1) [after transpose]
lambda x, b: x @ b.transpose((0, 2, 1)),
XG,
BG,
chunks=(n_outcome, sample_chunks, 1),
)
assert_block_shape(YG, 1, n_sample_block, 1)
assert_chunk_shape(YG, n_outcome, sample_chunks[0], 1)
assert_array_shape(YG, n_outcome, n_sample, 1)
YG = da.squeeze(YG, axis=-1).T
assert_block_shape(YG, n_sample_block, 1)
assert_chunk_shape(YG, sample_chunks[0], n_outcome)
assert_array_shape(YG, n_sample, n_outcome)
return YG
# For each contig, generate predictions for all sample+outcome
# combinations using only betas from stage 2 results that
# correspond to *other* contigs (i.e. LOCO)
YC = []
for contig in unique_contigs:
# Define a variant block mask of size `n_variant_block`
# determining which blocks correspond to this contig
variant_block_mask = variant_block_contigs == contig
BYPC = BYP[:, :, ~variant_block_mask, :]
YPC = YP[:, :, ~variant_block_mask, :]
YGC = apply(X, YPC, BX, BYPC)
YC.append(YGC)
YC = da.stack(YC, axis=0)
assert_array_shape(YC, n_contig, n_sample, n_outcome)
return YC
def _variant_block_indexes(
variant_block_size: Union[int, Tuple[int, ...]], contigs: ArrayLike
) -> Tuple[ndarray, ndarray]:
if isinstance(variant_block_size, tuple):
return index_block_sizes(variant_block_size)
elif isinstance(variant_block_size, int):
return index_array_blocks(contigs, variant_block_size)
else:
raise ValueError(
f"Variant block size type {type(variant_block_size)} "
"must be tuple or int"
)
DESC_BASE_PRED = """Predictions from base ridge regressors for every variant block, alpha, sample and outcome"""
DESC_META_PRED = (
"""Predictions from best meta ridge model selected through CV over sample blocks"""
)
DESC_LOCO_PRED = """Predictions from best meta ridge model omitting coefficients for variant blocks within individual contigs (LOCO approximation)"""
def regenie_transform(
G: ArrayLike,
X: ArrayLike,
Y: ArrayLike,
contigs: ArrayLike,
*,
variant_block_size: Optional[Union[int, Tuple[int, ...]]] = None,
sample_block_size: Optional[Union[int, Tuple[int, ...]]] = None,
alphas: Optional[Sequence[float]] = None,
add_intercept: bool = True,
orthogonalize: bool = False,
normalize: bool = False,
_glow_adj_dof: bool = False,
_glow_adj_alpha: bool = False,
_glow_adj_scaling: bool = False,
) -> Dataset:
"""Regenie trait transformation.
Parameters
----------
G : (M, N) ArrayLike
Genotype data array, `M` samples by `N` variants.
X : (M, C) ArrayLike
Covariate array, `M` samples by `C` covariates.
Y : (M, O) ArrayLike
Outcome array, `M` samples by `O` outcomes.
contigs : (N,) ArrayLike
Variant contigs as monotonic increasting integer contig index.
See the `regenie` function for documentation on remaining fields.
Returns
-------
Dataset
A dataset containing the following variables:
- `base_prediction` (blocks, alphas, samples, outcomes): Stage 1
predictions from ridge regression reduction .
- `meta_prediction` (samples, outcomes): Stage 2 predictions from
the best meta estimator trained on the out-of-sample Stage 1
predictions.
- `loco_prediction` (contigs, samples, outcomes): LOCO predictions
resulting from Stage 2 predictions ignoring effects for variant
blocks on held out contigs. This will be absent if the
data provided does not contain at least 2 contigs.
Raises
------
ValueError
If `G`, `X`, and `Y` do not have the same size along
the first (samples) dimension.
"""
if not G.shape[0] == X.shape[0] == Y.shape[0]:
raise ValueError(
"All data arrays must have same size along first (samples) dimension "
f"(shapes provided: G={G.shape}, X={X.shape}, Y={Y.shape})"
)
n_sample = Y.shape[0]
n_variant = G.shape[1]
if alphas is not None:
alphas = np.asarray(alphas)
G, X, Y = da.asarray(G), da.asarray(X), da.asarray(Y)
contigs = da.asarray(contigs)
# Set default block sizes if not provided
if variant_block_size is None:
# Block in groups of 1000, unless dataset is small
# enough to default to 2 blocks (typically for tests)
variant_block_size = min(1000, n_variant // 2)
if sample_block_size is None:
# Break into 10 chunks of approximately equal size
sample_block_size = tuple(split_array_chunks(n_sample, min(10, n_sample)))
assert sum(sample_block_size) == n_sample
if normalize:
# See: https://github.com/projectglow/glow/issues/255
dof = 1 if _glow_adj_dof else 0
G = (G - G.mean(axis=0)) / G.std(axis=0, ddof=dof)
Y = (Y - Y.mean(axis=0)) / Y.std(axis=0)
X = (X - X.mean(axis=0)) / X.std(axis=0)
if add_intercept:
X = da.concatenate([da.ones((X.shape[0], 1), dtype=X.dtype), X], axis=1)
# TODO: Test this after finding out whether or not there was a good reason
# it was precluded in glow by unit covariate regularization:
# https://github.com/projectglow/glow/issues/266
if orthogonalize: # pragma: no cover
G = G - X @ da.linalg.lstsq(X, G)[0]
Y = Y - X @ da.linalg.lstsq(X, Y)[0]
G = G / G.std(axis=0)
Y = Y / Y.std(axis=0)
X = da.zeros(shape=(n_sample, 0), dtype=G.dtype)
variant_chunk_start, variant_chunk_size = _variant_block_indexes(
variant_block_size, contigs
)
G = G.rechunk(chunks=(sample_block_size, tuple(variant_chunk_size)))
X = X.rechunk(chunks=(sample_block_size, -1))
Y = Y.rechunk(chunks=(sample_block_size, -1))
YP1 = _stage_1(G, X, Y, alphas=alphas)
B2, YP2 = _stage_2(
YP1,
X,
Y,
alphas=alphas,
_glow_adj_alpha=_glow_adj_alpha,
_glow_adj_scaling=_glow_adj_scaling,
)
YP3 = _stage_3(B2, YP1, X, Y, contigs, variant_chunk_start)
data_vars: Dict[Hashable, Any] = {}
data_vars["base_prediction"] = xr.DataArray(
YP1,
dims=("blocks", "alphas", "samples", "outcomes"),
attrs={"description": DESC_BASE_PRED},
)
data_vars["meta_prediction"] = xr.DataArray(
YP2, dims=("samples", "outcomes"), attrs={"description": DESC_META_PRED}
)
if YP3 is not None:
data_vars["loco_prediction"] = xr.DataArray(
YP3,
dims=("contigs", "samples", "outcomes"),
attrs={"description": DESC_LOCO_PRED},
)
return xr.Dataset(data_vars)
def regenie(
ds: Dataset,
*,
dosage: str,
covariates: Union[str, Sequence[str]],
traits: Union[str, Sequence[str]],
variant_block_size: Optional[Union[int, Tuple[int, ...]]] = None,
sample_block_size: Optional[Union[int, Tuple[int, ...]]] = None,
alphas: Optional[Sequence[float]] = None,
add_intercept: bool = True,
normalize: bool = False,
orthogonalize: bool = False,
**kwargs: Any,
) -> Dataset:
"""Regenie trait transformation.
`REGENIE <https://github.com/rgcgithub/regenie>`_ is a whole-genome
regression technique that produces trait estimates for association
tests. These estimates are subtracted from trait values and
sampling statistics (p-values, standard errors, etc.) are evaluated
against the residuals. See the REGENIE preprint [1] for more details.
For a simpler technical overview, see [2] for a detailed description
of the individual stages and separate regression models involved.
Parameters
----------
dosage : str
Name of genetic dosage variable.
covariates : Union[str, Sequence[str]]
Names of covariate variables (1D or 2D).
traits : Union[str, Sequence[str]]
Names of trait variables (1D or 2D).
variant_block_size : Optional[Union[int, Tuple[int]]], optional
Number of variants in each block.
If int, this describes the number of variants in each block
but the last which may be smaller.
If Tuple[int, ...], this must describe the desired number of
variants in each block individually.
Defaults to 1000 or num variants // 2, whichever is smaller.
sample_block_size : Optional[Union[int, Tuple[int]]], optional
Number of samples in each block.
If int, this describes the number of samples in each block
but the last which may be smaller.
If Tuple[int, ...], this must describe the desired number of
samples in each block individually.
Defaults to 10 sample blocks split roughly across all possible
samples or the number of samples, if that number is < 10.
alphas : Optional[Sequence[float]], optional
List of alpha values to use for regularization, by default None.
If not provided, these will be set automatically based on
datasize and apriori heritability assumptions.
add_intercept : bool
Whether or not to add intercept to covariates, by default True.
normalize : bool
Rescale genotypes, traits, and covariates to have
mean 0 and stdev 1, by default False.
orthogonalize : bool
**Experimental**: Remove covariates through orthogonalization
of genotypes and traits, by default False.
Warnings
--------
Binary traits are not yet supported so all outcomes provided
must be continuous.
Returns
-------
Dataset
A dataset containing the following variables:
- `base_prediction` (blocks, alphas, samples, outcomes): Stage 1
predictions from ridge regression reduction .
- `meta_prediction` (samples, outcomes): Stage 2 predictions from
the best meta estimator trained on the out-of-sample Stage 1
predictions.
- `loco_prediction` (contigs, samples, outcomes): LOCO predictions
resulting from Stage 2 predictions ignoring effects for variant
blocks on held out contigs. This will be absent if the
data provided does not contain at least 2 contigs.
Raises
------
ValueError
If dosage, covariates, and trait arrays do not have the same number
of samples.
Examples
--------
>>> import numpy as np
>>> from sgkit.testing import simulate_genotype_call_dataset
>>> from sgkit.stats.regenie import regenie
>>> n_variant, n_sample, n_contig, n_covariate, n_trait, seed = 100, 50, 2, 3, 5, 0
>>> rs = np.random.RandomState(seed)
>>> ds = simulate_genotype_call_dataset(n_variant=n_variant, n_sample=n_sample, n_contig=n_contig, seed=seed)
>>> ds["call_dosage"] = (("variants", "samples"), rs.normal(size=(n_variant, n_sample)))
>>> ds["sample_covariate"] = (("samples", "covariates"), rs.normal(size=(n_sample, n_covariate)))
>>> ds["sample_trait"] = (("samples", "traits"), rs.normal(size=(n_sample, n_trait)))
>>> res = regenie(ds, dosage="call_dosage", covariates="sample_covariate", traits="sample_trait")
>>> res.compute() # doctest: +NORMALIZE_WHITESPACE
<xarray.Dataset>
Dimensions: (alphas: 5, blocks: 2, contigs: 2, outcomes: 5, samples: 50)
Dimensions without coordinates: alphas, blocks, contigs, outcomes, samples
Data variables:
base_prediction (blocks, alphas, samples, outcomes) float64 0.3343 ... -...
meta_prediction (samples, outcomes) float64 -0.4588 0.78 ... -0.3984 0.3734
loco_prediction (contigs, samples, outcomes) float64 0.4886 ... -0.01498
References
----------
[1] - <NAME>., <NAME>, <NAME>, and <NAME>cketta. 2020.
“Computationally Efficient Whole Genome Regression for Quantitative and Binary
Traits.” bioRxiv. https://www.biorxiv.org/content/10.1101/2020.06.19.162354v2.abstract.
[2] - https://glow.readthedocs.io/en/latest/tertiary/whole-genome-regression.html
"""
if isinstance(covariates, str):
covariates = [covariates]
if isinstance(traits, str):
traits = [traits]
G = ds[dosage]
X = da.asarray(concat_2d(ds[list(covariates)], dims=("samples", "covariates")))
Y = da.asarray(concat_2d(ds[list(traits)], dims=("samples", "traits")))
contigs = ds["variant_contig"]
return regenie_transform(
G.T,
X,
Y,
contigs,
variant_block_size=variant_block_size,
sample_block_size=sample_block_size,
alphas=alphas,
add_intercept=add_intercept,
normalize=normalize,
orthogonalize=orthogonalize,
**kwargs,
)
```
#### File: sgkit/tests/test_aggregation.py
```python
from typing import Any
import numpy as np
import xarray as xr
from xarray import Dataset
from sgkit.stats.aggregation import count_call_alleles, count_variant_alleles
from sgkit.testing import simulate_genotype_call_dataset
from sgkit.typing import ArrayLike
def get_dataset(calls: ArrayLike, **kwargs: Any) -> Dataset:
calls = np.asarray(calls)
ds = simulate_genotype_call_dataset(
n_variant=calls.shape[0], n_sample=calls.shape[1], **kwargs
)
dims = ds["call_genotype"].dims
ds["call_genotype"] = xr.DataArray(calls, dims=dims)
ds["call_genotype_mask"] = xr.DataArray(calls < 0, dims=dims)
return ds
def test_count_variant_alleles__single_variant_single_sample():
ds = count_variant_alleles(get_dataset([[[1, 0]]]))
assert "call_genotype" in ds
ac = ds["variant_allele_count"]
np.testing.assert_equal(ac, np.array([[1, 1]]))
def test_count_variant_alleles__multi_variant_single_sample():
ds = count_variant_alleles(get_dataset([[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]]))
ac = ds["variant_allele_count"]
np.testing.assert_equal(ac, np.array([[2, 0], [1, 1], [1, 1], [0, 2]]))
def test_count_variant_alleles__single_variant_multi_sample():
ds = count_variant_alleles(get_dataset([[[0, 0], [1, 0], [0, 1], [1, 1]]]))
ac = ds["variant_allele_count"]
np.testing.assert_equal(ac, np.array([[4, 4]]))
def test_count_variant_alleles__multi_variant_multi_sample():
ds = count_variant_alleles(
get_dataset(
[
[[0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 1]],
[[1, 1], [0, 1], [1, 0]],
[[1, 1], [1, 1], [1, 1]],
]
)
)
ac = ds["variant_allele_count"]
np.testing.assert_equal(ac, np.array([[6, 0], [5, 1], [2, 4], [0, 6]]))
def test_count_variant_alleles__missing_data():
ds = count_variant_alleles(
get_dataset(
[
[[-1, -1], [-1, -1], [-1, -1]],
[[-1, -1], [0, 0], [-1, 1]],
[[1, 1], [-1, -1], [-1, 0]],
[[1, 1], [1, 1], [1, 1]],
]
)
)
ac = ds["variant_allele_count"]
np.testing.assert_equal(ac, np.array([[0, 0], [2, 1], [1, 2], [0, 6]]))
def test_count_variant_alleles__higher_ploidy():
ds = count_variant_alleles(
get_dataset(
[
[[-1, -1, 0], [-1, -1, 1], [-1, -1, 2]],
[[0, 1, 2], [1, 2, 3], [-1, -1, -1]],
],
n_allele=4,
n_ploidy=3,
)
)
ac = ds["variant_allele_count"]
np.testing.assert_equal(ac, np.array([[1, 1, 1, 0], [1, 2, 2, 1]]))
def test_count_variant_alleles__chunked():
rs = np.random.RandomState(0)
calls = rs.randint(0, 1, size=(50, 10, 2))
ds = get_dataset(calls)
ac1 = count_variant_alleles(ds)
# Coerce from numpy to multiple chunks in all dimensions
ds["call_genotype"] = ds["call_genotype"].chunk(chunks=(5, 5, 1)) # type: ignore[arg-type]
ac2 = count_variant_alleles(ds)
xr.testing.assert_equal(ac1, ac2) # type: ignore[no-untyped-call]
def test_count_variant_alleles__no_merge():
ds = count_variant_alleles(get_dataset([[[1, 0]]]), merge=False)
assert "call_genotype" not in ds
ac = ds["variant_allele_count"]
np.testing.assert_equal(ac, np.array([[1, 1]]))
def test_count_call_alleles__single_variant_single_sample():
ds = count_call_alleles(get_dataset([[[1, 0]]]))
ac = ds["call_allele_count"]
np.testing.assert_equal(ac, np.array([[[1, 1]]]))
def test_count_call_alleles__multi_variant_single_sample():
ds = count_call_alleles(get_dataset([[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]]))
ac = ds["call_allele_count"]
np.testing.assert_equal(ac, np.array([[[2, 0]], [[1, 1]], [[1, 1]], [[0, 2]]]))
def test_count_call_alleles__single_variant_multi_sample():
ds = count_call_alleles(get_dataset([[[0, 0], [1, 0], [0, 1], [1, 1]]]))
ac = ds["call_allele_count"]
np.testing.assert_equal(ac, np.array([[[2, 0], [1, 1], [1, 1], [0, 2]]]))
def test_count_call_alleles__multi_variant_multi_sample():
ds = count_call_alleles(
get_dataset(
[
[[0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 1]],
[[1, 1], [0, 1], [1, 0]],
[[1, 1], [1, 1], [1, 1]],
]
)
)
ac = ds["call_allele_count"]
np.testing.assert_equal(
ac,
np.array(
[
[[2, 0], [2, 0], [2, 0]],
[[2, 0], [2, 0], [1, 1]],
[[0, 2], [1, 1], [1, 1]],
[[0, 2], [0, 2], [0, 2]],
]
),
)
def test_count_call_alleles__missing_data():
ds = count_call_alleles(
get_dataset(
[
[[-1, -1], [-1, -1], [-1, -1]],
[[-1, -1], [0, 0], [-1, 1]],
[[1, 1], [-1, -1], [-1, 0]],
[[1, 1], [1, 1], [1, 1]],
]
)
)
ac = ds["call_allele_count"]
np.testing.assert_equal(
ac,
np.array(
[
[[0, 0], [0, 0], [0, 0]],
[[0, 0], [2, 0], [0, 1]],
[[0, 2], [0, 0], [1, 0]],
[[0, 2], [0, 2], [0, 2]],
]
),
)
def test_count_call_alleles__higher_ploidy():
ds = count_call_alleles(
get_dataset(
[
[[-1, -1, 0], [-1, -1, 1], [-1, -1, 2]],
[[0, 1, 2], [1, 2, 3], [-1, -1, -1]],
],
n_allele=4,
n_ploidy=3,
)
)
ac = ds["call_allele_count"]
np.testing.assert_equal(
ac,
np.array(
[
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]],
[[1, 1, 1, 0], [0, 1, 1, 1], [0, 0, 0, 0]],
]
),
)
def test_count_call_alleles__chunked():
rs = np.random.RandomState(0)
calls = rs.randint(0, 1, size=(50, 10, 2))
ds = get_dataset(calls)
ac1 = count_call_alleles(ds)
# Coerce from numpy to multiple chunks in all dimensions
ds["call_genotype"] = ds["call_genotype"].chunk(chunks=(5, 5, 1)) # type: ignore[arg-type]
ac2 = count_call_alleles(ds)
xr.testing.assert_equal(ac1, ac2) # type: ignore[no-untyped-call]
```
#### File: sgkit/tests/test_association.py
```python
import warnings
from typing import Any, Dict, List, Optional, Sequence, Tuple
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from pandas import DataFrame
from xarray import Dataset
from sgkit.stats.association import gwas_linear_regression, linear_regression
from sgkit.typing import ArrayLike
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
# Ignore: DeprecationWarning: Using or importing the ABCs from 'collections'
# instead of from 'collections.abc' is deprecated since Python 3.3,
# and in 3.9 it will stop working
import statsmodels.api as sm
from statsmodels.regression.linear_model import RegressionResultsWrapper
def _generate_test_data(
n: int = 100,
m: int = 10,
p: int = 3,
e_std: float = 0.001,
b_zero_slice: Optional[slice] = None,
seed: Optional[int] = 1,
) -> Tuple[ArrayLike, ArrayLike, ArrayLike, ArrayLike]:
"""Test data simulator for multiple variant associations to a continuous outcome
Outcomes for each variant are simulated separately based on linear combinations
of randomly generated fixed effect covariates as well as the variant itself.
This does not add an intercept term in covariates.
Parameters
----------
n : int, optional
Number of samples
m : int, optional
Number of variants
p : int, optional
Number of covariates
e_std : float, optional
Standard deviation for noise term
b_zero_slice : slice
Variant beta values to zero out, defaults to `slice(m // 2)`
meaning that the first half will all be 0.
Set to `slice(0)` to disable.
Returns
-------
g : (n, m) array-like
Simulated genotype dosage
x : (n, p) array-like
Simulated covariates
bg : (m,) array-like
Variant betas
ys : (m, n) array-like
Outcomes for each column in genotypes i.e. variant
"""
if b_zero_slice is None:
b_zero_slice = slice(m // 2)
rs = np.random.RandomState(seed)
g = rs.uniform(size=(n, m), low=0, high=2)
x = rs.normal(size=(n, p))
bg = rs.normal(size=m)
bg[b_zero_slice or slice(m // 2)] = 0
bx = rs.normal(size=p)
e = rs.normal(size=n, scale=e_std)
# Simulate y values using each variant independently
ys = np.array([g[:, i] * bg[i] + x @ bx + e for i in range(m)])
return g, x, bg, ys
def _generate_test_dataset(**kwargs: Any) -> Dataset:
g, x, bg, ys = _generate_test_data(**kwargs)
data_vars = {}
data_vars["dosage"] = (["variants", "samples"], g.T)
for i in range(x.shape[1]):
data_vars[f"covar_{i}"] = (["samples"], x[:, i])
for i in range(ys.shape[0]):
# Traits are NOT multivariate simulations based on
# values of multiple variants; they instead correspond
# 1:1 with variants such that variant i has no causal
# relationship with trait j where i != j
data_vars[f"trait_{i}"] = (["samples"], ys[i])
attrs = dict(beta=bg, n_trait=ys.shape[0], n_covar=x.shape[1])
return xr.Dataset(data_vars, attrs=attrs) # type: ignore[arg-type]
@pytest.fixture(scope="module")
def ds() -> Dataset:
return _generate_test_dataset()
def _sm_statistics(
ds: Dataset, i: int, add_intercept: bool
) -> RegressionResultsWrapper:
X = []
# Make sure first independent variable is variant
X.append(ds["dosage"].values[i])
for v in [c for c in list(ds.keys()) if c.startswith("covar_")]:
X.append(ds[v].values)
if add_intercept:
X.append(np.ones(ds.dims["samples"]))
X = np.stack(X).T
y = ds[f"trait_{i}"].values
return sm.OLS(y, X, hasconst=True).fit()
def _get_statistics(
ds: Dataset, add_intercept: bool, **kwargs: Any
) -> Tuple[DataFrame, DataFrame]:
df_pred: List[Dict[str, Any]] = []
df_true: List[Dict[str, Any]] = []
for i in range(ds.dims["variants"]):
dsr = gwas_linear_regression(
ds,
dosage="dosage",
traits=[f"trait_{i}"],
add_intercept=add_intercept,
**kwargs,
)
res = _sm_statistics(ds, i, add_intercept)
df_pred.append(
dsr.to_dataframe() # type: ignore[no-untyped-call]
.rename(columns=lambda c: c.replace("variant_", ""))
.iloc[i]
.to_dict()
)
# First result in satsmodels RegressionResultsWrapper for
# [t|p]values will correspond to variant (not covariate/intercept)
df_true.append(dict(t_value=res.tvalues[0], p_value=res.pvalues[0]))
return pd.DataFrame(df_pred), pd.DataFrame(df_true)
def test_gwas_linear_regression__validate_statistics(ds):
# Validate regression statistics against statsmodels for
# exact equality (within floating point tolerance)
def validate(dfp: DataFrame, dft: DataFrame) -> None:
# Validate results at a higher level, looking only for recapitulation
# of more obvious inferences based on how the data was simulated
np.testing.assert_allclose(dfp["beta"], ds.attrs["beta"], atol=1e-3)
mid_idx = ds.dims["variants"] // 2
assert np.all(dfp["p_value"].iloc[:mid_idx] > 0.05)
assert np.all(dfp["p_value"].iloc[mid_idx:] < 0.05)
# Validate more precisely against statsmodels results
np.testing.assert_allclose(dfp["t_value"], dft["t_value"])
np.testing.assert_allclose(dfp["p_value"], dft["p_value"])
dfp, dft = _get_statistics(
ds, covariates=["covar_0", "covar_1", "covar_2"], add_intercept=True
)
validate(dfp, dft)
dfp, dft = _get_statistics(
ds.assign(covar_3=("samples", np.ones(ds.dims["samples"]))),
covariates=["covar_0", "covar_1", "covar_2", "covar_3"],
add_intercept=False,
)
validate(dfp, dft)
def test_gwas_linear_regression__multi_trait(ds):
def run(traits: Sequence[str]) -> Dataset:
return gwas_linear_regression(
ds,
dosage="dosage",
covariates=["covar_0"],
traits=traits,
add_intercept=True,
)
traits = [f"trait_{i}" for i in range(ds.attrs["n_trait"])]
# Run regressions on individual traits and concatenate resulting statistics
dfr_single = xr.concat([run([t]) for t in traits], dim="traits").to_dataframe() # type: ignore[no-untyped-call]
# Run regressions on all traits simulatenously
dfr_multi: DataFrame = run(traits).to_dataframe() # type: ignore[no-untyped-call]
pd.testing.assert_frame_equal(dfr_single, dfr_multi)
def test_gwas_linear_regression__scalar_vars(ds: xr.Dataset) -> None:
res_scalar = gwas_linear_regression(
ds, dosage="dosage", covariates="covar_0", traits="trait_0"
)
res_list = gwas_linear_regression(
ds, dosage="dosage", covariates=["covar_0"], traits=["trait_0"]
)
xr.testing.assert_equal(res_scalar, res_list) # type: ignore[no-untyped-call]
def test_linear_regression__raise_on_non_2D():
XL = np.ones((10, 5, 1)) # Add 3rd dimension
XC = np.ones((10, 5))
Y = np.ones((10, 3))
with pytest.raises(ValueError, match="All arguments must be 2D"):
linear_regression(XL, XC, Y)
def test_linear_regression__raise_on_dof_lte_0():
# Sample count too low relative to core covariate will cause
# degrees of freedom to be zero
XL = np.ones((2, 10))
XC = np.ones((2, 5))
Y = np.ones((2, 3))
with pytest.raises(ValueError, match=r"Number of observations \(N\) too small"):
linear_regression(XL, XC, Y)
```
|
{
"source": "jeroyang/aitx",
"score": 3
}
|
#### File: aitx/aitx/history.py
```python
import matplotlib.pyplot as plt
from collections import defaultdict
import matplotlib.patches as patches
class History:
def __init__(self):
self._history_log = []
self.human_accuracy = None
def update(self, history):
self._history_log.append(history)
def set_human_accuracy(self, score):
self.human_accuracy = score
def get_full_history(self):
full_history = defaultdict(list)
for history in self._history_log:
for k, v in history.history.items():
full_history[k].extend(v)
return full_history
def plot(self, train='acc', validation=None):
if validation is None:
validation = 'val_' + train
epoch_left = 0
for i, history_obj in enumerate(self._history_log):
train_history = history_obj.history
epochs = len(train_history[train])
epoch_right = epoch_left + epochs
if i % 2 == 1:
axes = plt.gca()
axes.add_patch(patches.Rectangle(
(epoch_left+0.5, 0), # (x,y)
epochs, # width
1.5, # height
color='#f6f6f6'
))
epoch_left = epoch_right
full_history = self.get_full_history()
full_epochs = len(full_history[train])
if self.human_accuracy is not None:
plt.plot([0, full_epochs], [self.human_accuracy, self.human_accuracy], linestyle='--', color='gray', label='human')
plt.plot(range(1, full_epochs+1), full_history[train], label='train')
plt.plot(range(1, full_epochs+1), full_history[validation], label='validation')
plt.title('Train history')
plt.ylabel(train)
plt.xlabel('Epoch')
plt.xticks(range(0, full_epochs+1))
plt.legend(loc='best')
axes.set_xlim([0.5, full_epochs+0.5])
axes.set_ylim([0, 1.1])
plt.show()
def show(self, train='acc', validation=None):
self.plot(train, validation)
def diagnosis(self, human_accuracy=None):
if human_accuracy is not None:
self.human_accuracy = human_accuracy
if self.human_accuracy is None:
print("Need human accuracy")
full_history = self.get_full_history()
hum_error = 1 - self.human_accuracy
acc_error = 1 - full_history['acc'][-1]
val_error = 1 - full_history['val_acc'][-1]
template = "Human level error:\t{:.3f}\nTraining set error:\t{:.3f}\nDev/Test set error:\t{:.3f}"
print(template.format(hum_error, acc_error, val_error))
bias = max(0, acc_error - hum_error)
variance = val_error - acc_error
print('Avoidable bias:\t{:.3f}\nVariance:\t{:.3f}'.format(bias, variance))
if bias >= variance:
print('Recommend train on a larger model or more time.')
else:
print('{:->30}'.format(''))
print('Recommend more regulation or dropout.')
```
|
{
"source": "jeroyang/automapper",
"score": 3
}
|
#### File: automapper/automapper/automapper.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import re
try:
# python 3.4+
from html import unescape
except:
# Python 2.6-2.7
from HTMLParser import HTMLParser
h = HTMLParser()
unescape = h.unescape
from collections import OrderedDict
from lxml import etree
from automapper.longest_common_subsequence import lcs
def strip_tags(markup):
"""
Remove tags from the markup string
>>> strip_tags('<a href="123">hell<b>o</b></a><br/>')
hello
"""
tag = r'</?\w+( .*?)?/?>'
return re.sub(tag, '', markup)
def peel(markup):
"""
Remove the outmost markup tags
"""
tagpair =r'(?s)^\s*<(\w+)( .*)?>(.*)</\1>\s*$'
content = re.sub(tagpair, r'\3', markup)
return content.strip()
def similarity(a, b):
"""
Return the similarity of two strings based on longest_common_subsequence
"""
return float(len(lcs(a, b))) / max(len(a), len(b))
def score(a, b):
"""
Return a herustic score of a and b
"""
return similarity(a, b)
filters = OrderedDict((
(0, lambda x: x),
(1, strip_tags),
(2, unescape)
))
class Model(dict):
"""
The model is dict which holds a flat representation of xml or html tree, mapping from xpath to content
"""
def search(self, query, n=None, threshold=None):
"""
Given content, return the first n results ranked by their scores.
If the threshold is set, only the results which score is greater than the threshold will be returned.
"""
candidates = []
for xpath, content in self.items():
score2filter = dict()
for name, filtfunc in list(filters.items())[::-1]:
the_score = score(query, filtfunc(content))
score2filter[the_score] = name
best_result = sorted(list(score2filter.items()), key=lambda i: i[0], reverse=True)[0]
best_score, best_filter = best_result
candidates.append((best_score, best_filter, xpath))
candidates.sort(key=lambda c: c[1])
candidates.sort(key=lambda c: c[0], reverse=True)
if threshold is not None:
candidates = [c for c in candidates if c[0] >=threshold]
if n is None:
return candidates
else:
return candidates[:n]
@classmethod
def fromlxml(cls, xml):
"""
A factory which returns a Model from an XML or HTML tree (from lxml)
"""
model = cls()
tree = etree.ElementTree(xml)
for node in tree.iter():
raw_content = etree.tostring(node, with_tail=False, encoding='unicode')
xpath = tree.getpath(node)
content = peel(raw_content)
model[xpath] = content
return model
```
|
{
"source": "jeroyang/carcd",
"score": 3
}
|
#### File: carcd/tests/test_name.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
"""
test_name
----------------------------------
Tests for `name` module.
"""
import unittest
from collections import OrderedDict
from carcd import name
class TestName(unittest.TestCase):
def setUp(self):
pass
def test_beautify(self):
data = '我是1個 大呆 !!'
result = name.beautify(data)
wanted = '我是 1 個 大呆!!'
def test_asciilize(self):
result = name.asciilize('12 34 何大一!')
wanted = '12 34 He Da Yi!'
self.assertEqual(result, wanted)
def test_name_split(self):
title = '12-2 ABC 大大一.mp3'
result = name.name_split(title)
wanted = OrderedDict([
('number', '12-2'),
('space', ' '),
('title', 'ABC 大大一'),
('ext', '.mp3')
])
self.assertEqual(result, wanted)
def test_name_join(self):
items = [
('12-2', 'number'),
(None, 'space'),
('ABC 大大一', 'title'),
('.mp3', 'ext')
]
wanted = '12-2ABC 大大一.mp3'
def test_number_format(self):
result = name.number_format('2', fill=2)
wanted = '02'
self.assertEqual(result, wanted)
result = name.number_format('2-2', fill=2)
wanted = '02-02'
self.assertEqual(result, wanted)
def test_name_handle(self):
title = '12-2 何大一.mp3'
result = name.name_handle(title)
wanted = '12-02 He Da Yi 何大一.mp3'
self.assertEqual(result, wanted)
def test_is_pinyined(self):
data = '<NAME> 何大一'
self.assertTrue(name.is_pinyined(data))
data = 'ABC 何大一'
self.assertFalse(name.is_pinyined(data))
def tearDown(self):
pass
```
|
{
"source": "jeroyang/cateye",
"score": 3
}
|
#### File: cateye/tests/test_cateye.py
```python
import unittest
from cateye import cateye
class TestCateye(unittest.TestCase):
def setUp(self):
pass
def test_gen_path(self):
base = 'test/path'
code = 'ABC3'
wanted = 'test/path'
result = cateye.gen_path(base, code)
self.assertEqual(wanted, result)
def test_clean(self):
s = 'Open wound to auricle of ear, without mention of complication'
wanted = 'Open wound to auricle of ear, '
result = cateye.clean(s)
self.assertEqual(wanted, result)
s = 'Diabetes mellitus without mention of complication, Type I'
wanted = 'Diabetes mellitus Type I'
def test_tokenize(self):
s = "Crohn's disease 克隆氏症"
wanted = ['Crohn', 'disease', '克隆氏症']
result = cateye.tokenize(s)
self.assertEqual(wanted, result)
def test_lemmatize(self):
tokens = ['Best', 'TEST', 'case']
wanted= ['best', 'test', 'case']
result = cateye.lemmatize(tokens)
self.assertEqual(wanted, result)
def test_filterout(self):
tokens = ['for', 'test', 'only']
stopwords = ['for']
wanted = ['test', 'only']
result = cateye.filterout(tokens, stopwords)
self.assertEqual(wanted, result)
def test_ed1(self):
token = 'abc'
wanted = {'aabc', 'abbc', 'acbc', 'adbc', 'aebc',
'abca', 'abcb', 'abcc', 'abcd',
'ab', 'ac',
'aac', 'acc', 'adc', 'aec', 'afc', 'agc',
'acb'}
result = cateye._ed1(token)
self.assertTrue(wanted <= result)
def tearDown(self):
pass
```
|
{
"source": "jeroyang/exptk",
"score": 3
}
|
#### File: exptk/exptk/report.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import defaultdict, namedtuple
from fractions import Fraction
TagBox = namedtuple('TagBox', 'content tag')
def pack_boxes(list_of_content, tag):
return [TagBox(content, tag) for content in list_of_content]
def get_numerator(ratio, max_denominator):
fraction = Fraction.from_float(ratio).limit_denominator(max_denominator)
return int(fraction.numerator * max_denominator / fraction.denominator)
def get_denominator(ratio, max_numerator):
return get_numerator(1/ratio, max_numerator)
class Report:
"""
Holding the results of experiment, presenting the precision, recall,
f1 score of the experiment.
"""
def __init__(self, tp=[], fp=[], fn=[], title=None):
"""
tp: the ture positive items
fp: the false positive items
fn: the false negative items
title: the title of this report
"""
self.tp = pack_boxes(tp, title)
self.fp = pack_boxes(fp, title)
self.fn = pack_boxes(fn, title)
self.title = title
def precision(self):
try:
return float(len(self.tp)) / (len(self.tp) + len(self.fp))
except ZeroDivisionError:
return 0.0
def recall(self):
try:
return float(len(self.tp)) / (len(self.tp) + len(self.fn))
except ZeroDivisionError:
return 0.0
def f1(self):
r = self.recall()
p = self.precision()
try:
return float(2 * r * p) / (r + p)
except ZeroDivisionError:
return 0.0
def __repr__(self):
r = self.recall()
p = self.precision()
f = self.f1()
syntax = 'Report<P{p:.3f} R{r:.3f} F{f:.3f} {t!r}>'
return syntax.format(p=p, r=r, f=f, t=self.title)
@classmethod
def from_reports(cls, reports, title):
meta_report = cls([], [], [], title)
for report in reports:
meta_report.tp.extend(pack_boxes(report.tp, title))
meta_report.fp.extend(pack_boxes(report.fp, title))
meta_report.fn.extend(pack_boxes(report.fn, title))
return meta_report
def split(self):
title2report = defaultdict(Report)
try:
for tagbox, _ in self.tp:
title2report[tagbox.tag].tp.append(tagbox.content)
for tagbox, _ in self.fp:
title2report[tagbox.tag].fp.append(tagbox.content)
for tagbox, _ in self.fn:
title2report[tagbox.tag].fn.append(tagbox.content)
for title, report in title2report.items():
report.title = title
except AttributeError:
raise AssertionError('The report cannot be split')
return list(title2report.values())
@classmethod
def from_scale(cls, gold_number, precision, recall, title):
tp_count = get_numerator(recall, gold_number)
positive_count = get_denominator(precision, tp_count)
fp_count = positive_count - tp_count
fn_count = gold_number - tp_count
scale_report = cls(['tp'] * tp_count,
['fp'] * fp_count,
['fn'] * fn_count,
title)
return scale_report
```
|
{
"source": "jeroyang/mcgocr",
"score": 2
}
|
#### File: mcgocr/ncgocr/concept.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import datetime
import time
import re
import pickle
from collections import namedtuple, defaultdict
from functools import partial
from copy import copy
import progressbar
from ncgocr import pattern_regex
class Index(dict):
def __init__(self):
self.use_default = True
def __add__(self, other):
result = copy(self)
for key, value_set in other.items():
result[key] |= value_set
return result
def __repr__(self):
template = '{}<{} key(s)>'
return template.format(self.__class__.__name__, len(self))
def __missing__(self, key):
if self.use_default:
self[key] = set()
return self[key]
else:
return set()
@classmethod
def join(cls, indices):
output = cls()
for index in indices:
output += index
return output
Trunk = namedtuple('Trunk', 'text type start end')
class Term(namedtuple('Term', 'lemma ref')):
def __repr__(self):
template = self.__class__.__name__ + '<{} {}>'
return template.format(self.lemma, self.ref)
def __hash__(self):
return hash(self.__class__.__name__) + hash(self.lemma) + hash(self.ref)
def __eq__(self, other):
if all([hash(self) == hash(other),
self.lemma == other.lemma,
self.ref == other.ref]):
return True
return False
class Entity(Term):
pass
class Pattern(Term):
pass
class Constraint(Term):
pass
class Evidence(namedtuple('Evidence', 'term text start end')):
def __sub__(self, other):
"""
Return distance between the start of this term to the end of another term
Notice: abs(a - b) != abs(b - a)
"""
return self.start - other.end
class Statement(namedtuple('Statement', 'statid evidences')):
"""
A Statement is a collection of evidences
"""
def __eq__(self, other):
try:
self.eq_terms(other)
return True
except:
return False
def __hash__(self):
return hash('statement:' + self.statid)
def __repr__(self):
template = 'Statement<{} {}>'
components = [repr(term) for term in self.terms()]
return template.format(self.statid, ' '.join(components))
def terms(self):
return [evidence.term for evidence in self.evidences]
def eq_terms(self, other):
terms = []
if len(self.evidences) != len(other.evidences):
raise ValueError('The two statements have different lengths of evidences')
for this_evidence, other_evidence in zip(self.evidences, other.evidences):
this_term, other_term = this_evidence.term, other_evidence.term
if type(this_term) != type(other_term):
raise ValueError('The two statements have different type sequences of terms')
elif all([isinstance(this_term, Pattern),
not this_term == other_term]):
raise ValueError('The two statements has different patterns')
elif not any([isinstance(this_term, Pattern),
this_term == other_term]):
terms.append((other_term, this_term))
return terms
def _clean(entity_frag):
"""
Clean the given trunk (text contains entity and pattern)
by removal of the leading and tailing puncutions and stopwords
"""
stopwords = '(of|in|to|and|or|the|a)'
head_regex = r'^\s*{}\b'.format(stopwords)
tail_regex = r'\b{}\s*$'.format(stopwords)
joined_regex = r'|'.join([head_regex, tail_regex])
return re.sub(joined_regex, '', entity_frag).strip(' -,')
def split_trunk(label):
"""
A GO label may contain two (or more) trunks,
split the trunk by regular expression.
TODO: split trunks correctly if there are more than two trunks
"""
regex_list = [
r'^(?P<main_trunk>.+?),?\s*\b(?:involved in|during|via|using|by|acting on|from|in)\s(?P<constraint_trunk>.*)$',
r'^(?P<main_trunk>.+?),\s*\b(?P<constraint_trunk>.*)[\ \-](?:related|dependent)$']
for regex in regex_list:
m = re.match(regex, label)
if m:
main_text = m.group('main_trunk')
main_start = label.index(main_text)
main_end = main_start + len(main_text)
main_trunk = Trunk(main_text, 'main', main_start, main_end)
constraint_text = m.group('constraint_trunk')
constraint_start = label.index(constraint_text)
constraint_end = constraint_start + len(constraint_text)
constraint_trunk = Trunk(constraint_text, 'constraint', constraint_start, constraint_end)
return [main_trunk, constraint_trunk]
return [Trunk(label, 'main', 0, len(label))]
def split_pattern(trunk, regex_in):
"""
Given a trunk, and regex_in from pattern_manager,
return the list of patterns and the list of entities
"""
pre_terms = []
text = trunk.text
start = trunk.start
if trunk.type == 'main':
for m in re.finditer(regex_in, text):
for lemma, token in m.groupdict().items():
if token is not None:
token_start = start + m.start()
token_end = start + m.end()
pre_terms.append((Pattern, lemma, token, token_start, token_end))
replaced = re.sub(regex_in, '###', text)
dirty_tokens = replaced.split('###')
for dirty_token in dirty_tokens:
clean_token = _clean(dirty_token)
if len(clean_token) > 0:
token_start = start + text.index(clean_token)
token_end = token_start + len(clean_token)
if trunk.type == 'main':
pre_terms.append((Entity, clean_token, clean_token, token_start, token_end))
else:
pre_terms.append((Constraint, clean_token, clean_token, token_start, token_end))
return pre_terms
def evidence_split(goid, label, regex_in):
"""
Given a goid and one of its label, return a list of terms
"""
pre_terms = [e for t in split_trunk(label) for e in split_pattern(t, regex_in)]
evidences = []
for pre_term in pre_terms:
if pre_term[0] is Pattern:
term = Pattern(*pre_term[1:2], ref='annotator')
evid = Evidence(term, *pre_term[2:])
evidences.append(evid)
else:
term = pre_term[0](*pre_term[1:2], ref=goid)
evid = Evidence(term, *pre_term[2:])
evidences.append(evid)
evidences.sort(key=lambda e: e.start)
return evidences
class Cluster(object):
def __init__(self, primary_term, terms=None):
self.primary_term = primary_term
if terms is None:
self.terms = set()
else:
self.terms = set(terms)
self.updated_fragments = False
self._fragments = set()
self._term_queue = list(self.terms)
def __hash__(self):
return hash(self.primary_term)
def __eq__(self, other):
return all([self.primary_term==other.primary_term,
self.terms==other.terms])
def __repr__(self):
return "Cluster({})<{} terms>".format(repr(self.primary_term), len(self.terms))
def __iter__(self):
for term in self.terms:
yield term
def fragments(self):
if self.updated_fragments:
return self._fragments
else:
for term in self._term_queue:
self._fragments |= set(term.lemma.split(' '))
self._term_queue = []
self.updated_fragments = True
return self._fragments
def add(self, term):
self.updated_fragments = False
self._term_queue.append(term)
self.terms.add(term)
def merge(self, other):
self.updated_fragments = False
self.terms |= other.terms
self._fragments |= other.fragments()
del other
class ClusterBook(object):
def __init__(self):
self.clusters = set()
self.index = dict()
def __repr__(self):
return 'ClusterBook <{} clusters, {} terms>'.format(len(self.clusters), len(self.index.keys()))
def preferred_term(self, term):
return self.index[term].primary_term
def add(self, cluster):
self.clusters.add(cluster)
for term in cluster:
self.index[term] = cluster
def merge(self, cluster1, cluster2):
if cluster1 in self.clusters:
for term in cluster2:
self.index[term] = cluster1
cluster1.merge(cluster2)
elif cluster2 in self.clusters:
for term in cluster1:
self.index[term] = cluster2
cluster2.merge(cluster1)
else:
raise ValueError
def merge_term(self, term1, term2):
cluster = self.index[term1]
cluster.add(term2)
self.index[term2] = cluster
def add_terms(self, terms):
for term in terms:
if not term in self.index:
primary_term = term
cluster = Cluster(primary_term, [term])
self.add(cluster)
def simplify(self, theshold):
"""
Return a new clusterbook, in which, a pair of the clusters has a
similarity greater than the theshold will be merged together
"""
result = ClusterBook()
Z = len(self.clusters)**2//10000
with progressbar.ProgressBar(max_value=Z) as bar:
for i, cluster in enumerate(self.clusters):
if len(result.clusters) == 0:
result.add(cluster)
for already_cluster in result.clusters:
if sim(cluster, already_cluster) >= theshold:
result.merge(already_cluster, cluster)
break
else:
result.add(cluster)
bar.update(i**2//10000)
return result
def has_common(cluster1, cluster2):
set1 = set([(t.__class__, t.lemma) for t in cluster1.terms])
set2 = set([(t.__class__, t.lemma) for t in cluster2.terms])
if len(set1 & set2) == 0:
return False
return True
def jaccard(cluster1, cluster2):
set1 = cluster1.fragments()
set2 = cluster2.fragments()
return len(set1 & set2)/len(set1 | set2)
def sim(cluster1, cluster2):
if not has_common(cluster1, cluster2):
return 0.0
else:
return jaccard(cluster1, cluster2)
class GoData(dict):
def __init__(self, obo_path):
self._regex_in = pattern_regex.regex_in
self._regex_out = pattern_regex.regex_out
self._date = None
self._read(obo_path)
self.goid2mindepth = dict()
self.goid2maxdepth = dict()
self._calculate_depth()
self.goid2above = dict()
self.goid2below = dict()
self.goid2density = dict()
self.clusterbook = None
self._raw_clusterbook = ClusterBook()
self._calculate_density()
self.biological_process = partial(self._get_namespace, 'biological_process')
self.cellular_component = partial(self._get_namespace, 'cellular_component')
self.molecular_function = partial(self._get_namespace, 'molecular_function')
self._digest()
def __repr__(self):
template = "GoData<{} concepts, {} statements, on {}>"
concept_count = len(self)
statement_count = sum([len(s.statements) for s in self.values()])
datestr = self.date.strftime('%Y/%m/%d')
return template.format(concept_count, statement_count, datestr )
def _read(self, obo_path):
"""Read GO data from OBO file"""
with open(obo_path) as f:
text = f.read()
blocks = text.split('\n\n')
basic_data = blocks[0]
term_blocks = filter(lambda block:block[0:6]=='[Term]', blocks)
dt = tuple(i.partition(': ')[2] for i in basic_data.split('\n') if i.partition(':')[0]=='date')[0]
self.date = datetime.datetime(*time.strptime(dt, "%d:%m:%Y %H:%M")[:6])
for term_block in term_blocks:
goid = None
name = None
namespace = None
synonym_list = list()
parent_list = list()
if 'is_obsolete: true' in term_block:
continue
lines = term_block.split('\n')
for line in lines[1:]:
key, sep, value = line.partition(':')
if key == 'id':
goid = value.strip()
if key == 'name':
name = value.strip()
if key == 'synonym':
synotext = value.strip()
synonym = re.findall(r'"(.*?)"', synotext)[0]
synonym_list.append(synonym)
if key == 'namespace':
namespace = value.strip()
if key == 'is_a':
parent_id, sep , parent_name = value.partition('!')
parent_list.append(parent_id.strip())
concept = Concept(goid, name, namespace, synonym_list, parent_list)
self[goid] = concept
def _calculate_depth(self):
cache = dict()
def _calc_depth(goid, func):
if goid in {'GO:0003674', 'GO:0008150', 'GO:0005575'}:
return 1
try:
return cache[goid]
except KeyError:
concept = self[goid]
return func(_calc_depth(parent_id, func) for parent_id in concept.parent_list) + 1
for goid in self.keys():
self.goid2maxdepth[goid] = _calc_depth(goid, max)
self.goid2mindepth[goid] = _calc_depth(goid, min)
def _calculate_density(self):
above_cache = self.goid2above
def _above(goid):
if goid in {'GO:0003674', 'GO:0008150', 'GO:0005575'}:
return set()
try:
return above_cache[goid]
except KeyError:
concept = self[goid]
above = set.union(*[_above(parent_id) for parent_id in concept.parent_list])
above |= set(concept.parent_list)
above_cache[goid] = above
return above
for goid in self.keys():
above_cache[goid] = _above(goid)
below_cache = defaultdict(set)
for goid, above in above_cache.items():
for parent_id in above:
below_cache[parent_id].add(goid)
self.goid2below = below_cache
total = len(self)
for goid in self.keys():
below = self.goid2below.get(goid, set())
self.goid2density[goid] = float(len(below) + 1) / total
for concept in self.values():
goid = concept.goid
concept.density = self.goid2density[goid]
def _get_namespace(self, namespace):
for goid, concept in self.items():
if concept.namespace == namespace:
yield concept
def _digest(self):
"""
Digest the labels of concepts, write the statements,
and aggregate clusters, save into the self.clusterbook
"""
regex_in=self._regex_in
with progressbar.ProgressBar(max_value=len(self)) as bar:
for j, (goid, concept) in enumerate(self.items()):
for i, label in enumerate(concept.labels):
statid = '%'.join([goid, str(i).zfill(3)])
evidences = evidence_split(goid, label, regex_in)
terms = [evidence.term for evidence in evidences]
statement = Statement(statid, evidences)
if len(concept.statements) == 0:
concept.statements.append(statement)
self._raw_clusterbook.add_terms(terms)
continue
for already_statement in concept.statements:
try:
eq_terms = statement.eq_terms(already_statement)
for term1, term2 in eq_terms:
self._raw_clusterbook.merge_term(term1, term2)
break
except ValueError:
pass
else:
concept.statements.append(statement)
self._raw_clusterbook.add_terms(terms)
bar.update(j)
if self.clusterbook is None:
self.clusterbook = self._raw_clusterbook
def _rewrite_statements(self):
for goid, concept in self.items():
statements = concept.statements
new_statements = []
for statement in statements:
statid = statement.statid
old_evidences = statement.evidences
new_evidences = []
for old_evidence in old_evidences:
old_term = old_evidence.term
text = old_term.lemma
new_term = self.clusterbook.preferred_term(old_term)
start = old_evidence.start
end = old_evidence.end
new_evidences.append(Evidence(new_term, text, start, end))
new_statements.append(Statement(statid, new_evidences))
concept.statements = new_statements
def compression(self, theshold):
simple_book = self._raw_clusterbook.simplify(theshold)
self.clusterbook = simple_book
self._rewrite_statements()
def get_Ie(self):
cb = self.clusterbook
Ie = Index()
for c in cb.clusters:
for term in c.terms:
Ie[term.lemma].add(c.primary_term)
Ie.use_default = False
return Ie
def get_Im(self):
Im = Index()
for goid, concept in self.items():
statements = concept.statements
for statement in statements:
if all([isinstance(term, Pattern) for term in statement.terms()]):
for term in statement.terms():
Im[term].add(statement)
else:
for term in statement.terms():
if isinstance(term, Entity):
Im[term].add(statement)
return Im
def save(self, filepath):
with open(filepath, 'wb') as f:
pickle.dump(self, f, protocol=2)
@classmethod
def load(cls, filepath):
with open(filepath, 'rb') as f:
godata = pickle.load(f)
return godata
class Concept(object):
def __init__(self, goid, name, namespace, synonym_list, parent_list, density=-1):
self.goid = goid
self.name = name
self.namespace = namespace
self.ns = {'biological_process': 'BP',
'cellular_component': 'CC',
'molecular_function': 'MF'}[namespace]
self.synonym_list = synonym_list
self.labels = [name] + synonym_list
self.parent_list = parent_list
self.statements = []
self.density = density
def __repr__(self):
return 'Concept<{} {} {}>'.format(self.goid, self.ns, self.name)
```
#### File: mcgocr/ncgocr/gopattern.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from txttk.nlptools import slim_stem
from txttk import retools
from collections import defaultdict
import re
def _regex(snippet):
return snippet.replace('_', r'\S{0,6}').replace(' ', r'[\ \-]?')
def _unit_regex_out(lemma, snippets):
regs = [_regex(snippet) for snippet in snippets]
regs.sort(key=len, reverse=True)
large_snippet = retools.nocatch(retools.parallel(regs))
return r'(?P<{}>{})'.format(lemma, large_snippet)
def _unit_regex_in(lemma, snippets):
medium_regs = []
for snippet in snippets:
reg = _regex(snippet)
medium_regs.append(r'^(?:{0})\b'.format(reg))
medium_regs.append(r'\b(?:{0})$'.format(reg))
large_snippet = retools.nocatch(retools.parallel(medium_regs))
return r'(?P<{}>{})'.format(lemma, large_snippet)
class PatternManager(object):
def __init__(self):
self.lemma2snippets = defaultdict(set)
self.lemma2broad = defaultdict(set)
def _sorted_lemma_n_snippets(self):
lemma_n_snippets = self.lemma2snippets.items()
key = lambda item: len(item[0])
return sorted(lemma_n_snippets, key=key, reverse=True)
def add_snippet(self, snippet, lemma):
"""
Add a snippet to the lemma
"""
self.lemma2snippets[lemma].add(snippet)
def add_snippets(self, snippets, lemma):
for snippet in snippets:
self.add_snippet(snippet, lemma)
def regex_out(self, lemma2snippet_more=dict()):
"""
Return a long regex for pattern extraction in journal paper
"""
lemma_n_snippets = self._sorted_lemma_n_snippets()
regex = retools.parallel([_unit_regex_out(lemma, sorted(list(snippets))) for lemma, snippets in lemma_n_snippets], sort=True)
wrapper = r'(?i)\b(?:{0})\b'
return wrapper.format(regex)
def regex_in(self):
"""
Return a long regex for pattern extraction in GO definition
"""
lemma_n_snippet = self._sorted_lemma_n_snippets()
regex = retools.parallel([_unit_regex_in(lemma, snippet) for lemma, snippet in lemma_n_snippet])
wrapper = r'(?:{0})'
return wrapper.format(regex)
def get_extractor(self):
"""
Make a soft extractor based on given regular exrepssion. We use regex_out from PatternManager
"""
regex = self.regex_out()
def extractor(sentence, start):
"""
Extract soft concepts (pattern) from given sentnece
"""
results = []
for m in re.finditer(regex, sentence):
lemma = list(filter(lambda item: item[1] is not None, m.groupdict().items()))[0][0]
pattern_start = m.start()
pattern_end = m.end()
results.append((lemma, start+pattern_start, start+pattern_end))
return results
return extractor
@classmethod
def from_definition(cls, definition_fp):
"""
Return a PatternManager ojbect from give pattern difinition file
"""
pm = cls()
with open(definition_fp) as f:
pattern_lines = filter(lambda x: any([len(x) == 0, x[0] != '#']), f.read().strip().split('\n'))
for line in pattern_lines:
lemma, sep, tail = line.partition(':')
snippets = tail.split(';')
pm.add_snippets(snippets, lemma)
return pm
def write_code(self, filepath='ncgocr/pattern_regex.py'):
template = """#These big regular expressions were automatically
#generated by the gopattern.py, just don't edit it.
regex_in = {}
regex_out = {}"""
with open(filepath, 'w') as f:
code = template.format(repr(self.regex_in()), repr(self.regex_out()))
f.write(code)
if __name__ == '__main__':
pm = PatternManager.from_definition('../data/pattern_definition.txt')
pm.write_code('../pattern_regex.py')
```
#### File: mcgocr/ncgocr/learning.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import OrderedDict, defaultdict, ChainMap
from intervaltree import Interval, IntervalTree
import numpy as np
from txttk.report import Report
from txttk.corpus import Annotation
"""
The measurements of features
"""
def concept_measurements(candidate, godata):
"""
Measure the ceoncept features: GOID, STATID, NAMESPACE
from the given candidate
"""
statement = candidate.statement
statid = statement.statid
goid, sep, sulfix = statid.partition('%')
namespace = godata[goid].namespace
measurements = OrderedDict([('GOID', goid),
('STATID', statid),
('NAMESPACE', namespace)])
return measurements
def evidence_measurements(candidate):
"""
Measure the evidence features: LENGTH, TEXT and
TEXT[:3], TEXT[-3:] from the given candidate
"""
evidences = candidate.evidences
sentence_text = candidate.sentence.text
offset = candidate.sentence.offset
starts = [e.start for e in evidences]
ends = [e.end for e in evidences]
raw_start = min(starts) - offset
raw_end = max(ends) - offset
length = raw_end - raw_start
text = sentence_text[raw_start:raw_end].lower()
boostscore = {'boost':1, 'boost2': 100}
boostlevel = max([boostscore.get(term.ref, 0) for term in candidate.statement.terms()])
measurements = OrderedDict([('LENGTH', length),
('TEXT=' + text, True),
('TEXT[:3]=' + text[:3], True),
('TEXT[-3:]=' + text[-3:], True),
('BOOST', boostlevel)])
return measurements
def bias_measurements(candidate):
"""
Measure the bias features: OMIT, SATURATION from the
given candidate
"""
measurements = OrderedDict()
statement = candidate.statement
evidences = candidate.evidences
terms_in_evidences = set([e.term for e in evidences])
for term in statement.terms():
if term in terms_in_evidences:
continue
key = 'OMIT=' + term.lemma
measurements[key] = True
measurements['SATURATION'] = len(evidences) / len(statement.evidences)
return measurements
def all_measurements(candidate, godata):
"""
Return all the measurements from the given candidate
"""
measurements = OrderedDict()
measurements.update(concept_measurements(candidate, godata))
measurements.update(evidence_measurements(candidate))
measurements.update(bias_measurements(candidate))
return measurements
def bulk_measurements(candidates, godata):
result = []
for candidate in candidates:
result.append(all_measurements(candidate, godata))
return result
class LabelMarker(object):
"""
Handeling the labels from given goldstandard
"""
def __init__(self, goldstandard):
self.goldstandard = goldstandard
forest = defaultdict(IntervalTree)
for pmid, goid, start, end, text in goldstandard:
t = forest[pmid]
t[start:end] = (goid, text)
self.forest = dict(forest)
def mark(self, candidate):
pmid = candidate.sentence.docid
statid = candidate.statement.statid
evidences = candidate.evidences
goid = statid.partition('%')[0]
starts = [e.start for e in evidences]
ends = [e.end for e in evidences]
start = min(starts)
end = max(ends)
span = (start, end)
gold_goids = {iv.data[0] for iv in self.forest[pmid][slice(*span)]}
if goid in gold_goids:
return 1
return 0
def markall(self, candidates):
labels = []
for candidate in candidates:
labels.append(self.mark(candidate))
return labels
def process(self, candidates):
return np.array(self.markall(candidates))
def recover(candidates, y):
result = Annotation()
for candidate, label in zip(candidates, y):
if label == 0:
continue
pmid = candidate.sentence.docid
statid = candidate.statement.statid
goid = statid.partition('%')[0]
start = min([e.start for e in candidate.evidences])
end = max([e.end for e in candidate.evidences])
raw_start = start - candidate.sentence.offset
raw_end = end - candidate.sentence.offset
text = candidate.sentence.text[raw_start:raw_end]
result.add((pmid, goid, start, end, text))
return result
def evaluate(system, goldstandard, message):
slim_system = {i[:4] for i in system}
slim_goldstandard = {i[:4] for i in goldstandard}
slim2gold = ChainMap({i[:4]: i for i in goldstandard},
{i[:4]: i for i in system})
slim_tp = slim_system & slim_goldstandard
slim_fp = slim_system - slim_goldstandard
slim_fn = slim_goldstandard - slim_system
tp = {slim2gold[i] for i in slim_tp}
fp = {slim2gold[i] for i in slim_fp}
fn = {slim2gold[i] for i in slim_fn}
return Report(tp, fp, fn, message)
```
#### File: mcgocr/tests/test_corpus.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
"""
test_corpus
----------------------------------
Tests for `corpus` module.
"""
import unittest
from txttk import corpus as c
class TestFunctions(unittest.TestCase):
def test_is_title(self):
positive = ['Jesus Loves Me',
'Bring to the Front!',
'The Basic Idea of Games']
negative = ['Who is the first lady?',
'The function of BRCA1',
'Windows XP never die']
for case in positive:
self.assertTrue(c.is_title(case))
for case in negative:
self.assertFalse(c.is_title(case))
def test_is_abbr(self):
positive = ['BRCA1',
'EFR2',
'CA3',
'TXTs']
negative = ['home',
'Basic',
'Hello']
for case in positive:
self.assertTrue(c.is_abbr(case))
for case in negative:
self.assertFalse(c.is_abbr(case))
def test_is_word(self):
positive = ['Dream',
'best',
'talk',
'A']
negative = ['brca1',
'NBA',
'A*']
for case in positive:
self.assertTrue(c.is_word(case))
for case in negative:
self.assertFalse(c.is_word(case))
def test_normalize(self):
cases = ['Dream',
'Hello',
'NBA',
'BRCA1',
'best']
targets = ['dream',
'hello',
'NBA',
'BRCA1',
'best']
for case, target in zip(cases, targets):
self.assertEqual(c.normalize(case), target)
def test_normalize_sent(self):
cases = ['Jesus Loves Me',
'Bring to the Front!',
'The Basic Idea of Games',
'Who is the first lady?',
'The function of BRCA1',
'Windows XP never die']
targets = ['jesus loves me',
'bring to the front!',
'the basic idea of games',
'who is the first lady?',
'the function of BRCA1',
'windows XP never die']
for case, target in zip(cases, targets):
self.assertEqual(c.normalize_sent(case), target)
class TestSentence(unittest.TestCase):
def setUp(self):
self.sentence = c.Sentence('<NAME>', 15, 'diary')
def test_init(self):
text = self.sentence.text
wanted = 'jesus loves me'
self.assertEqual(text, wanted)
def test_repr(self):
result = self.sentence.__repr__()
wanted = "Sentence<'<NAME>' 15@diary>"
self.assertEqual(result, wanted)
class TestCorpus(unittest.TestCase):
def setUp(self):
self.corpus_a = c.Corpus(title='testing_a')
self.corpus_b = c.Corpus(title='testing_b')
self.sentences = []
for i in range(100):
sentence = c.Sentence(str(i), i, 'doc'+str(i%10))
self.sentences.append(sentence)
if i < 50:
self.corpus_a.append(sentence)
else:
self.corpus_b.append(sentence)
def test_init(self):
self.assertEqual(self.corpus_a.title, 'testing_a')
def test_add(self):
result = self.corpus_a + self.corpus_b
wanted = c.Corpus('testing_a|testing_b',
self.sentences)
self.assertEqual(result, wanted)
def test_doc_set(self):
wanted = {'doc'+str(i) for i in range(10)}
result = self.corpus_a.doc_set()
self.assertEqual(result, wanted)
def test_divide(self):
corpus_list = self.corpus_a.divide(5, 0)
self.assertEqual(len(corpus_list), 5)
for corpus in corpus_list:
self.assertEqual(len(corpus), 10)
result = sum(corpus_list, c.Corpus())
result.title = 'testing_a'
wanted = self.corpus_a
self.assertEqual(set(result), set(wanted))
```
|
{
"source": "jeroyang/newsletter",
"score": 3
}
|
#### File: newsletter/tests/test_newsletter.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
"""
test_newsletter
----------------------------------
Tests for `newsletter` module.
"""
import unittest
from newsletter import newsletter
from collections import OrderedDict, Counter
class TestNewsletter(unittest.TestCase):
def setUp(self):
pass
def test_count_args(self):
template = """something {{one}}
other things {{two}} {{two}}"""
results = newsletter.count_args(template)
wanted = Counter({'one': 1, 'two': 2})
self.assertEqual(results, wanted)
def test_split_items(self):
context = 'text1\n----\ntext2\n2nd line\n----\ntext3\n\n-----\ntext4'
result = newsletter.split_items(context)
wanted = ['text1', 'text2\n2nd line', 'text3', 'text4']
self.assertEqual(result, wanted)
def test_format_items(self):
items = ['text1', 'text2']
result = newsletter.format_items(items)
wanted = "text1\n----\ntext2"
self.assertEqual(result, wanted)
def build_text(self):
template = '\nTest\n{{one}}\ntest\n{{two}}\n{{two}}\n'
arg2items = {'one': ['bob', 'alice'],
'two': ['kitty', 'bunny']}
result = newsletter.build_text(template, arg2items)
wanted = 'Test\nbob\ntest\nkitty\nbunny'
self.assertEqual(result, wanted)
def tearDown(self):
pass
```
|
{
"source": "jeroyang/txttk",
"score": 3
}
|
#### File: txttk/tests/test_retools.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
"""
test_retools
----------------------------------
Tests for `retools` module.
"""
import unittest
from txttk import retools
import re
class RetoolsTestCase(unittest.TestCase):
def test_condense(self):
words = ['hello', 'hellokitty', 'hellomonkey', 'goodbye', 'hell',
'he', 'his', 'hi', 'bye', 'history', 'story', 'condense',
'hematoma', 'lymphoma']
regex = retools.condense(words)
condensed = re.compile(regex)
for word in words:
self.assertTrue(condensed.match(word))
negative_words = ['akkk', 'adadkjkl ', 'avxnbcjn']
for word in negative_words:
self.assertFalse(condensed.match(word))
def test_is_solid(self):
regex = r'a'
self.assertTrue(retools.is_solid(regex))
regex = r'[ab]'
self.assertTrue(retools.is_solid(regex))
regex = r'(a|b|c)'
self.assertTrue(retools.is_solid(regex))
regex = r'(a|b|c)?'
self.assertTrue(retools.is_solid(regex))
regex = r'(ab)c'
self.assertFalse(retools.is_solid(regex))
regex = r'(ab)c?'
self.assertFalse(retools.is_solid(regex))
regex = r'(a)(b|c)'
self.assertFalse(retools.is_solid(regex))
regex = r'(a)(b|c)?'
self.assertFalse(retools.is_solid(regex))
def test_is_packed(self):
regex = r'a'
self.assertFalse(retools.is_packed(regex))
regex = r'[ab]'
self.assertFalse(retools.is_packed(regex))
regex = r'(a|b|c)'
self.assertTrue(retools.is_packed(regex))
regex = r'(ab)c'
self.assertFalse(retools.is_packed(regex))
def test_consolidate(self):
regex = r'a|b'
wanted = r'(a|b)'
result = retools.consolidate(regex)
self.assertEqual(result, wanted)
regex = r'[ab]'
wanted = r'[ab]'
result = retools.consolidate(regex)
self.assertEqual(result, wanted)
regex = r'(?:a|b)'
wanted = r'(?:a|b)'
result = retools.consolidate(regex)
self.assertEqual(result, wanted)
def test_danger_unpack(self):
regex = r'(abc)'
wanted = r'abc'
result = retools.danger_unpack(regex)
self.assertEqual(result, wanted)
regex = r'(?:abc)'
wanted = r'abc'
result = retools.danger_unpack(regex)
self.assertEqual(result, wanted)
regex = r'(?P<xyz>abc)'
wanted = r'abc'
result = retools.danger_unpack(regex)
self.assertEqual(result, wanted)
regex = r'[abc]'
wanted = r'[abc]'
result = retools.danger_unpack(regex)
self.assertEqual(result, wanted)
def test_unpack(self):
regex = r'(abc)'
wanted = r'abc'
result = retools.unpack(regex)
self.assertEqual(result, wanted)
regex = r'(?:abc)'
wanted = r'abc'
result = retools.unpack(regex)
self.assertEqual(result, wanted)
regex = r'(?P<xyz>abc)'
wanted = r'(?P<xyz>abc)'
result = retools.unpack(regex)
self.assertEqual(result, wanted)
regex = r'[abc]'
wanted = r'[abc]'
result = retools.unpack(regex)
self.assertEqual(result, wanted)
def test_parallel(self):
result = retools.parallel([r'abc', r'def'])
wanted = r'abc|def'
self.assertEqual(result, wanted)
result = retools.parallel([r'abc', r'd|ef'])
wanted = 'abc|d|ef'
self.assertEqual(result, wanted)
result = retools.parallel([r'abc', r'(d|ef)'])
wanted = 'abc|d|ef'
self.assertEqual(result, wanted)
result = retools.parallel([r'abc', r'defg'], sort=True)
wanted = 'defg|abc'
self.assertEqual(result, wanted)
def test_nocatch(self):
regex = r'a|b'
wanted = r'(?:a|b)'
result = retools.nocatch(regex)
self.assertEqual(result, wanted)
regex = r'(a|b)'
wanted = r'(?:a|b)'
result = retools.nocatch(regex)
self.assertEqual(result, wanted)
regex = r'(?P<x>ab)'
wanted = r'(?:ab)'
result = retools.nocatch(regex)
self.assertEqual(result, wanted)
regex = r'[ab]'
wanted = r'[ab]'
result = retools.nocatch(regex)
self.assertEqual(result, wanted)
def test_concat(self):
regex_1 = r'a|b'
regex_2 = r'(c|de)'
regex_3 = r'[fg]'
result_12 = retools.concat([regex_1, regex_2])
wanted_12 = r'(a|b)(c|de)'
self.assertEqual(result_12, wanted_12)
result_13 = retools.concat([regex_1, regex_3])
wanted_13 = r'(a|b)[fg]'
self.assertEqual(result_13, wanted_13)
result_123 = retools.concat([regex_1, regex_2, regex_3])
wanted_123 = r'(a|b)(c|de)[fg]'
self.assertEqual(result_123, wanted_123)
def test_nocatchall(self):
regex = r'abc'
wanted = r'abc'
result = retools.nocatchall(regex)
self.assertEqual(result, wanted)
regex = r'(abc)'
wanted = r'(?:abc)'
result = retools.nocatchall(regex)
self.assertEqual(result, wanted)
regex = r'(?:abc)'
wanted = r'(?:abc)'
result = retools.nocatchall(regex)
self.assertEqual(result, wanted)
regex = r'(?P<xyz>abc)'
wanted = r'(?:abc)'
result = retools.nocatchall(regex)
self.assertEqual(result, wanted)
regex = r'(abc(?P<xyz>def))'
wanted = r'(?:abc(?:def))'
result = retools.nocatchall(regex)
self.assertEqual(result, wanted)
regex = r'\(abc\)'
wanted = r'\(abc\)'
result = retools.nocatchall(regex)
self.assertEqual(result, wanted)
def test_option(self):
regex = r'abc'
wanted = r'(?:abc)?'
result = retools.option(regex)
self.assertEqual(result, wanted)
regex = r'(abc)'
wanted = r'(?:abc)?'
result = retools.option(regex)
self.assertEqual(result, wanted)
regex = r'[abc]'
wanted = r'[abc]?'
result = retools.option(regex)
self.assertEqual(result, wanted)
```
#### File: txttk/txttk/feature.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import OrderedDict
import re
import string
def lexical(token):
"""
Extract lexical features from given token
There are 3 kinds of lexical features, take 'Hello' as an example:
1. lowercase: 'hello'
2. first4: 'hell'
3. last4: 'ello'
"""
lowercase = token.lower()
first4 = lowercase[:4]
last4 = lowercase[-4:]
return OrderedDict([
('lowercase', lowercase),
('first4', first4),
('last4', last4)
])
def _char_shape(char):
if char in string.ascii_uppercase:
return 'A'
if char in string.ascii_lowercase:
return 'a'
if char in string.digits:
return '0'
else:
return char
def _shape(token):
return ''.join([_char_shape(char) for char in token])
def _contains_a_letter(token):
regex = r'[A-Za-z]'
if re.search(regex, token):
return True
else:
return False
def _contains_a_capital(token):
regex = r'[A-Z]'
if re.search(regex, token):
return True
else:
return False
def _begins_with_capital(token):
return _char_shape(token[0]) == 'A'
def _all_capital(token):
regex = r'^[A-Z]+$'
if re.match(regex, token):
return True
else:
return False
def _contains_a_digit(token):
regex = r'\d'
if re.search(regex, token):
return True
else:
return False
def _all_digit(token):
regex = r'^\d+$'
if re.match(regex, token):
return True
else:
return False
def _contains_a_punctuation(token):
return len(set(string.punctuation) & set(token)) > 0
def _consists_letters_n_digits(token):
shape = _shape(token)
return set(shape.lower()) == set('a0')
def _consists_digits_n_punctuations(token):
shape = _shape(token)
lower_shape = shape.lower()
return set(lower_shape) <= set(string.punctuation+'0') and len(lower_shape) >= 2
def orthographic(token):
"""
Extract orthographic features from a given token
There are 11 kinds of orthographic features, take 'Windows10' as an example:
1. shape: 'Aaaaaaa00'
2. length: 9
3. contains_a_letter: True
4. contains_a_capital: True
5. begins_with_capital: True
6. all_capital: False
7. contains_a_digit: True
8. all_digit: False
9. contains_a_punctuation: False
10. consists_letters_n_digits: True
11. consists_digits_n_punctuations: False
"""
return OrderedDict([
('shape', _shape(token)),
('length', len(token)),
('contains_a_letter', _contains_a_letter(token)),
('contains_a_capital', _contains_a_capital(token)),
('begins_with_capital', _begins_with_capital(token)),
('all_capital', _all_capital(token)),
('contains_a_digit', _contains_a_digit(token)),
('all_digit', _all_digit(token)),
('contains_a_punctuation', _contains_a_punctuation(token)),
('consists_letters_n_digits', _consists_letters_n_digits(token)),
('consists_digits_n_punctuations', _consists_digits_n_punctuations(token)),
])
```
#### File: txttk/txttk/retools.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import defaultdict, OrderedDict
from itertools import combinations
import re
def condense(ss_unescaped):
"""
Given multiple strings, returns a compressed regular expression just
for these strings
>>> condense(['she', 'he', 'her', 'hemoglobin'])
'he(moglobin|r)?|she'
"""
def estimated_len(longg, short):
return (3
+ len(short)
+ sum(map(len, longg))
- len(longg)
* (len(short) - 1)
- 1 )
def stupid_len(longg):
return sum(map(len, longg)) + len(longg)
ss = [re.escape(s) for s in set(ss_unescaped)]
ss.sort(key=len)
short2long = defaultdict(lambda: {'p':[],'s':[]})
for short, longg in combinations(ss, 2):
if longg.startswith(short):
short2long[short]['p'].append(longg)
if longg.endswith(short):
short2long[short]['s'].append(longg)
short2long = sorted(list(short2long.items()),
key=lambda x: len(x[0]),
reverse=True)
output = []
objs = set(ss)
for s, pre_sur in short2long:
pp = set(pre_sur['p']) & objs
ss = set(pre_sur['s']) & objs
if ((stupid_len(pp) - estimated_len(pp, s))
< (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'({heads})?{surfix}'
.format(surfix=s,
heads='|'.join(sorted([p[:-len(s)] for p in ss],
key=len,
reverse=True))))
assert len(reg) == estimated_len(ss, s)
output.append(reg)
objs -= (ss | set([s]))
elif ((stupid_len(pp) - estimated_len(pp, s))
> (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'{prefix}({tails})?'
.format(prefix=s,
tails='|'.join(sorted([p[len(s):] for p in pp],
key=len,
reverse=True))))
assert len(reg) == estimated_len(pp, s)
output.append(reg)
objs -= (pp | set([s]))
for residual in objs:
output.append(residual)
return re.sub(r'\(([^)])\)\?', r'\1?', r'|'.join(output))
def is_solid(regex):
"""
Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False
"""
shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex)
skeleton = shape.replace('#', '')
if len(shape) <= 1:
return True
if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape):
return True
if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape):
return True
if re.match(r'^\(\)#*?\)\)', skeleton):
return True
else:
return False
def is_packed(regex):
"""
Check if the regex is solid and packed into a pair of parens
"""
return is_solid(regex) and regex[0] == '('
def consolidate(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet consolidated
"""
if is_solid(regex):
return regex
else:
return '({})'.format(regex)
def danger_unpack(regex):
"""
Remove the outermost parens
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'abc'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex):
return re.sub(r'^\((\?(:|P<.*?>))?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def unpack(regex):
"""
Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex) and not regex.startswith('(?P<'):
return re.sub(r'^\((\?:)?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def parallel(regex_list, sort=False):
"""
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
"""
if sort:
regex_list = sorted(regex_list, key=len, reverse=True)
return '|'.join([unpack(regex) for regex in regex_list])
def nocatch(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet packed;
modified the outmost parens by adding nocatch tag
"""
if is_solid(regex) and not is_packed(regex):
return regex
else:
return '(?:{})'.format(danger_unpack(regex))
def concat(regex_list):
"""
Concat multiple regular expression into one, if the given regular expression is not packed,
a pair of paren will be add.
>>> reg_1 = r'a|b'
>>> reg_2 = r'(c|d|e)'
>>> concat([reg_1, reg2])
(a|b)(c|d|e)
"""
output_list = []
for regex in regex_list:
output_list.append(consolidate(regex))
return r''.join(output_list)
def nocatchall(regex):
"""
Return a regex with all parens has a no catch tag
"""
return re.sub(r'(?<!\\)(?P<leading>(\\\\)*)\((\?(:|P<.*?>))?', r'\g<leading>(?:', regex)
def option(regex):
"""
return a regex has a option tag
>>> option(r'[ab]')
'[ab]?'
>>> option(r'(abc)')
'(abc)?'
>>> option('abc')
'(abc)?'
"""
return nocatch(regex) + '?'
```
|
{
"source": "jerpint/jag",
"score": 3
}
|
#### File: tensorflow/lm/mlflow_utils.py
```python
import mlflow
from tensorflow.keras.callbacks import Callback
class MLflowLogger(Callback):
"""
Keras callback for logging metrics and final model with MLflow.
Metrics are logged after every epoch. The logger keeps track of the best model
based on the validation metric. At the end of the training, the best model is
logged with MLflow.
"""
def __init__(self): pass
def on_epoch_end(self, epoch, logs=None):
"""
Log Keras metrics with MLflow. Update the best model if the model
improved on the validation data.
"""
if not logs:
return
for name, value in logs.items():
mlflow.log_metric(name, value, step=epoch)
def on_train_end(self, *args, **kwargs):
"""
Log the best model with MLflow and evaluate it on the train and
validation data so that the metrics stored with MLflow
reflect the logged model.
"""
pass
```
#### File: jag/models/transformer.py
```python
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras import initializers as init
from tensorflow.keras import backend as K
import numpy as np
import math
class LayerNormalization(layers.Layer):
r"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated separately over the last
dimension.
:math:`\gamma` and :math:`\beta` are learnable affine transform parameters.
Args:
eps: a value added to the denominator for numerical stability. Default: 1e-5
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
.. _`Layer Normalization`: https://arxiv.org/abs/1607.06450
"""
def __init__(self, eps=1e-5, **kwargs):
self.eps = eps
super(LayerNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.gamma = self.add_weight(
name='gamma', shape=input_shape[-1:],
initializer=init.Ones(),
trainable=True
)
self.beta = self.add_weight(
name='beta',
shape=input_shape[-1:],
initializer=init.Zeros(),
trainable=True
)
super().build(input_shape)
def call(self, x):
u = K.mean(x, axis=-1, keepdims=True)
s = K.mean(K.square(x - u), axis=-1, keepdims=True)
z = (x - u) / K.sqrt(s + self.eps)
return self.gamma * z + self.beta
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'eps': self.eps,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class Embedding(layers.Layer):
r"""Implements the unified Embedding Layer of the Transformer architecture.
It performs positional embeddings, token embeddings and, eventually, segment
embeddings.
Args:
output_dim: dimension of the embeddings. Default: 768.
dropout: dropout rate to be applied on the embeddings. Default: 0.1.
vocab_size: size of the vocalbulary. Default: 30000.
max_len: maximum length of the input sequence. Default: 512.
trainable_pos_embedding: whether or not to train the positional embeddings.
Default: ``True``.
num_segments: number of segments. if None or set to zero, then the segment
embeddings won't be performed. Default: None.
use_one_dropout: if ``True``, the different embeddings will be summed up
before applying dropout, otherwise dropout will be applied to each embedding type
independently before summing them. Default: ``False``.
use_embedding_layer_norm: if ``True``, layer normalization will be applied on
the resulting embeddings. Default: ``False``.
layer_norm_epsilon: parameter of the layer normalization operation. Default: 1e-5
Shape:
- Input: :math:`(N, L)`
- Output: :math:`(N, L, output_dim)`
"""
def __init__(
self, output_dim=768, dropout=0.1, vocab_size=30000,
max_len=512, trainable_pos_embedding=True, num_segments=None,
use_one_dropout=False, use_embedding_layer_norm=False,
layer_norm_epsilon=1e-5, **kwargs):
super(Embedding, self).__init__(**kwargs)
self.output_dim = output_dim
self.max_len = max_len
self.dropout = dropout
self.vocab_size = vocab_size
self.num_segments = num_segments
self.trainable_pos_embedding = trainable_pos_embedding
self.use_one_dropout = use_one_dropout
self.use_embedding_layer_norm = use_embedding_layer_norm
self.layer_norm_epsilon = layer_norm_epsilon
if self.num_segments is None or (self.num_segments == 0):
self.segment_emb = None
else:
self.segment_emb = layers.Embedding(
self.num_segments, self.output_dim,
input_length=self.max_len
)
if self.trainable_pos_embedding:
self.pos_emb = layers.Embedding(
max_len, output_dim, input_length=max_len
)
else:
self.pos_emb = layers.Embedding(
max_len, output_dim, input_length=max_len,
trainable=False,
weights=[Embedding._get_pos_encoding_matrix(
max_len, output_dim)]
# embeddings_initializer=Embedding._get_pos_encoding_matrix(
# max_len, output_dim
# )
)
self.token_emb = layers.Embedding(
self.vocab_size, output_dim, input_length=max_len
)
self.embedding_dropout = layers.Dropout(self.dropout)
self.add_embeddings = layers.Add()
if self.use_embedding_layer_norm:
self.embedding_layer_norm = LayerNormalization(
self.layer_norm_epsilon)
else:
self.embedding_layer_norm = None
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape[0]).as_list()
shape.append(self.output_dim)
return tf.TensorShape(shape)
def get_config(self):
config = {
'max_len': self.max_len,
'use_one_dropout': self.use_one_dropout,
'output_dim': self.output_dim,
'dropout': self.dropout,
'num_segments': self.num_segments,
'vocab_size': self.vocab_size,
'trainable_pos_embedding': self.trainable_pos_embedding,
'use_embedding_layer_norm': self.use_embedding_layer_norm,
'layer_norm_epsilon': self.layer_norm_epsilon
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
if self.num_segments is None or (self.num_segments == 0):
tokens, pos_ids = inputs
segment_embedding = None
else:
tokens, segment_ids, pos_ids = inputs
segment_embedding = self.segment_emb(segment_ids)
pos_embedding = self.pos_emb(pos_ids)
token_embedding = self.token_emb(tokens)
if self.use_one_dropout:
embed_list = [] if segment_embedding is None else [segment_embedding]
embed_list.extend([pos_embedding, token_embedding])
sum_embed = self.add_embeddings(embed_list)
if self.embedding_layer_norm is not None:
sum_embed = self.embedding_layer_norm(sum_embed)
return self.embedding_dropout(sum_embed)
else:
embed_list = [] if segment_embedding is None else [
self.embedding_dropout(segment_embedding)
]
embed_list.extend([
self.embedding_dropout(pos_embedding),
self.embedding_dropout(token_embedding)
])
sum_embed = self.add_embeddings(embed_list)
if self.embedding_layer_norm is not None:
sum_embed = self.embedding_layer_norm(sum_embed)
return sum_embed
@classmethod
def from_config(cls, config):
return cls(**config)
@staticmethod
def _get_pos_encoding_matrix(max_len, d_emb):
pos_enc = np.array(
[
[pos / np.power(10000, 2 * (j // 2) / d_emb)
for j in range(d_emb)]
if pos != 0 else np.zeros(d_emb)
for pos in range(max_len)
],
dtype=np.float32
)
pos_enc[1:, 0::2] = np.sin(pos_enc[1:, 0::2]) # dim 2i
pos_enc[1:, 1::2] = np.cos(pos_enc[1:, 1::2]) # dim 2i+1
return pos_enc
class ScaledDotProductAttention(layers.Layer):
r"""Implements the scaled dot product attention mechanism.
Args:
temperature: the normalizing constant.
attn_dropout: dropout rate to be applied on the result. Default: 0.1.
use_attn_mask: whether or not the layer expects to use mask in the computation.
Default: ``False``.
neg_inf: constant representing the negative infinite value. Default: ``-np.inf``.
Inputs:
``query``: the query of dimension :math:`(N, H, L, Dk)`
``keys``: the keys of dimension :math:`(N, H, Dk, L)`
``values``: the values of dimension :math:`(N, H, L, Dv)`
``mask`` (only if use_attn_mask is True): the mask of dimension
:math:`(N, 1, L, L)`.
Outputs:
``result``: the result of the operation :math:`(N, H, L, Dv)`
``attention_weight``: the attention values :math:`(N, H, L, L)`
"""
def __init__(
self, temperature, attn_dropout=0.1, use_attn_mask=False,
neg_inf=-np.inf, **kwargs):
super(ScaledDotProductAttention, self).__init__(**kwargs)
self.temperature = temperature
self.attn_dropout = attn_dropout
self.neg_inf = float(neg_inf)
self.use_attn_mask = use_attn_mask
self.dropout = layers.Dropout(self.attn_dropout)
self.softmax = layers.Softmax(axis=-1)
def compute_output_shape(self, input_shape):
shape1 = tf.TensorShape(input_shape[0]).as_list()
shape2 = tf.TensorShape(input_shape[1]).as_list()
shape3 = tf.TensorShape(input_shape[2]).as_list()
ret_shape1 = list(shape1)
ret_shape1[-1] = shape3[-1]
ret_shape2 = list(shape1)
ret_shape2[-1] = shape2[-1]
ret_shape1 = tf.TensorShape(ret_shape1)
ret_shape2 = tf.TensorShape(ret_shape2)
return [ret_shape1, ret_shape2]
def call(self, inputs):
# q and v are B, H, L, C//H ; k is B, H, C//H, L ; mask is B, 1, L, L
# q: B, H, lq, dk and v: B, H, lv, dv and k:B, H, dk, Lk and mask: B,
# 1, Lq, Lk
if self.use_attn_mask:
q, k, v, mask = inputs
else:
q, k, v = inputs
mask = None
attn = K.batch_dot(q, k) # attn is of shape B, H, Lq, Lk
attn = attn / self.temperature
if mask is not None:
mask = tf.cast(mask, attn.dtype)
attn = (mask * attn) + ((1.0 - mask) * self.neg_inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = K.batch_dot(attn, v) # output: B, H, L, C//H (B, H, Lq, dv)
return [output, attn]
def get_config(self):
config = {
'temperature': self.temperature,
'attn_dropout': self.attn_dropout,
'neg_inf': self.neg_inf,
'use_attn_mask': self.use_attn_mask,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class MultiHeadAttention(layers.Layer):
r"""Implements the multi head attention mechanism.
Args:
n_head: number of heads.
d_model: dimension of the ouput results.
d_k: dimension of the keys and the queries.
d_v: dimension of the values.
attention_dropout: dropout rate to be applied on each single attention
head. Default: 0.1.
dropout: dropout rate to be applied on the projection of
the concatenation of all attention heads. Default: 0.1.
use_attn_mask: whether or not the layer expects to use mask in the computation.
Default: ``False``.
layer_norm_epsilon: parameter of the layer normalization operation. Default: 1e-5
neg_inf: constant representing the negative infinite value. Default: ``-np.inf``.
Inputs:
``seq``: the input sequence of dimension :math:`(N, L, d_model)`
``mask`` (only if use_attn_mask is True): the mask of dimension
:math:`(N, 1, L, L)`.
Outputs:
``result``: the result of the operation :math:`(N, L, d_model)`
``attention_weight``: the attention values :math:`(N, n_head, L, L)`
"""
def __init__(
self, n_head, d_model, d_k, d_v, attention_dropout=0.1,
dropout=0.1, use_attn_mask=False, layer_norm_epsilon=1e-5,
neg_inf=-np.inf, **kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self.n_head = n_head
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.attention_dropout = attention_dropout
self.use_attn_mask = use_attn_mask
self.neg_inf = neg_inf
self.dropout = dropout
self.layer_norm_epsilon = layer_norm_epsilon
self.c_attn = layers.Conv1D(
n_head * (d_k * 2 + d_v), 1, input_shape=(None, self.d_model)
)
self.attention = ScaledDotProductAttention(
temperature=np.power(d_k, 0.5),
attn_dropout=self.attention_dropout,
neg_inf=self.neg_inf,
use_attn_mask=self.use_attn_mask
)
self.c_attn_proj = layers.Conv1D(
d_model, 1, input_shape=(None, n_head * d_v)
)
self.multihead_dropout = layers.Dropout(self.dropout)
self.multihead_add = layers.Add()
self.multihead_norm = LayerNormalization(self.layer_norm_epsilon)
@staticmethod
def _shape_list(x):
tmp = K.int_shape(x)
tmp = list(tmp)
tmp[0] = -1
return tmp
@staticmethod
def _split_heads(x, n, k=False):
x_shape = MultiHeadAttention._shape_list(x)
m = x_shape[-1]
new_x_shape = x_shape[:-1] + [n, m // n]
new_x = K.reshape(x, new_x_shape)
return K.permute_dimensions(new_x, [0, 2, 3, 1] if k else [0, 2, 1, 3])
@staticmethod
def _merge_heads(x):
new_x = K.permute_dimensions(x, [0, 2, 1, 3])
x_shape = MultiHeadAttention._shape_list(new_x)
new_x_shape = x_shape[:-2] + [np.prod(x_shape[-2:])]
return K.reshape(new_x, new_x_shape)
def compute_output_shape(self, input_shape):
x = input_shape[0] if self.use_attn_mask else input_shape
shape = tf.TensorShape(x).as_list()
shape1 = list(shape)
shape1[-1] = self.d_model
shape2 = [shape1[0], self.n_head, shape1[1], shape1[1]]
ret_shape1 = tf.TensorShape(shape1)
ret_shape2 = tf.TensorShape(shape2)
return [ret_shape1, ret_shape2]
def call(self, inputs):
if self.use_attn_mask:
x, mask = inputs
else:
x = inputs
mask = None
residual = x
x = self.c_attn(x)
q_l = self.n_head * self.d_k
k_l = 2 * self.n_head * self.d_k
q, k, v = x[:, :, :q_l], x[:, :, q_l:k_l], x[:, :, k_l:]
q = MultiHeadAttention._split_heads(q, self.n_head) # B, H, L, d_k
k = MultiHeadAttention._split_heads(
k, self.n_head, k=True) # B, H, d_k, L
v = MultiHeadAttention._split_heads(v, self.n_head) # B, H, L, d_v
args = [q, k, v]
if self.use_attn_mask:
args.append(mask)
output, attn = self.attention(args) # (B, H, Lq, dv), (B, H, Lq, Lk)
output = MultiHeadAttention._merge_heads(output) # (B, Lq, H x dv)
output = self.c_attn_proj(output)
output = self.multihead_dropout(output)
output = self.multihead_norm(self.multihead_add([output, residual]))
return [output, attn]
def get_config(self):
config = {
'n_head': self.n_head,
'd_model': self.d_model,
'd_k': self.d_k,
'd_v': self.d_v,
'attention_dropout': self.attention_dropout,
'use_attn_mask': self.use_attn_mask,
'neg_inf': self.neg_inf,
'dropout': self.dropout,
'layer_norm_epsilon': self.layer_norm_epsilon
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class GeLU(layers.Layer):
r"""Implementation of the gelu activation function as described in
the paper `Gaussian Error Linear Units (GELUs)`_ .
.. math::
0.5 * x * (1 + tanh(\sqrt(2 / \pi) * (x + 0.044715 * pow(x, 3))))
Args:
accurate: if ``False``, an approximate of this function is computed.
Default: ``False``.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
.. _`Gaussian Error Linear Units (GELUs)`: https://arxiv.org/abs/1606.08415
"""
def __init__(self, accurate=False, **kwargs):
super().__init__(**kwargs)
self.accurate = accurate
def call(self, x, **kwargs):
if not self.accurate:
ouput = 0.5 * x * (
1.0 + K.tanh(math.sqrt(2 / math.pi) *
(x + 0.044715 * K.pow(x, 3)))
)
return ouput
else:
return x * 0.5 * (1.0 + tf.erf(x / math.sqrt(2.0)))
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'accurate': self.accurate,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class PositionwiseFeedForward(layers.Layer):
r"""Implements the position wise feed forward network.
Args:
d_in: dimension of the input data.
d_hid: dimension of the intermediate dense layer.
dropout: dropout rate to be applied on the results. Default: 0.1.
d_out: dimension of the output data. if ``None``, it is set to d_in.
layer_norm_epsilon: parameter of the layer normalization operation. Default: 1e-5
use_gelu: if ``True``, use the ``GeLU`` activation layer instead of
the ``ReLU`` one. Default: ``False``
accurate_gelu: whether or not to use accurate (vs approximate)
computation of the `GeLU`` operator. Default: ``False``.
Shape:
- Input: :math:`(N, L, d_in)`
- Output: :math:`(N, L, d_out)`
"""
def __init__(
self, d_in, d_hid, dropout=0.1, d_out=None,
layer_norm_epsilon=1e-5, use_gelu=False,
accurate_gelu=False, **kwargs):
super(PositionwiseFeedForward, self).__init__(**kwargs)
if d_out is None:
d_out = d_in
self.d_in = d_in
self.d_out = d_out
self.d_hid = d_hid
self.dropout = dropout
self.layer_norm_epsilon = layer_norm_epsilon
self.use_gelu = use_gelu
self.accurate_gelu = accurate_gelu
self.conv1 = layers.Conv1D(
self.d_hid, 1, input_shape=(None, self.d_in)
)
self.conv2 = layers.Conv1D(
self.d_out, 1, input_shape=(None, self.d_hid)
)
if not self.use_gelu:
self.activation = layers.ReLU()
else:
self.activation = GeLU(accurate=self.accurate_gelu)
self.pff_dropout = layers.Dropout(self.dropout)
self.pff_add = layers.Add()
self.pff_norm = LayerNormalization(self.layer_norm_epsilon)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.d_out
return tf.TensorShape(shape)
def call(self, x):
residual = x
output = self.conv2(self.activation(self.conv1(x)))
output = self.pff_dropout(output)
if (self.d_out == self.d_in):
output = self.pff_norm(self.pff_add([output, residual]))
elif (self.d_out % self.d_in == 0):
tmp = K.int_shape(residual)
tmp1 = list(tmp)
tmp1.append(self.d_out // self.d_in)
new_o = K.reshape(output, tmp1)
tmp2 = list(tmp)
tmp2.append(1)
new_r = K.reshape(residual, tmp2)
output = self.pff_add([new_o, new_r])
tmp3 = list(tmp)
tmp3[-1] = self.d_out
output = K.reshape(output, tmp3)
output = self.pff_norm(output)
else:
output = self.pff_norm(output)
return output
def get_config(self):
config = {
'd_in': self.d_in,
'd_out': self.d_out,
'd_hid': self.d_hid,
'dropout': self.dropout,
'layer_norm_epsilon': self.layer_norm_epsilon,
'use_gelu': self.use_gelu,
'accurate_gelu': self.accurate_gelu,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class Pooler(layers.Layer):
""" Implements the pooling operation of the transformer architecture.
This is done by simply taking the hidden state corresponding to the first token
on which some nonlinear transformations are performed.
Args:
d_hid: dimension of the input data.
Shape:
- Input: :math:`(N, L, d_hid)`
- Output: :math:`(N, d_hid)`
"""
def __init__(self, d_hid, **kwargs):
super(Pooler, self).__init__(**kwargs)
self.d_hid = d_hid
self.dense = layers.Dense(
self.d_hid, input_shape=(self.d_hid, )
)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-2] = self.d_hid
return tf.TensorShape(shape[:-1])
def call(self, x):
first_token = x[:, 0]
pooled_output = K.tanh(self.dense(first_token))
return pooled_output
def get_config(self):
config = {
'd_hid': self.d_hid,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class LMPredictionHeadTransform(layers.Layer):
def __init__(
self, d_hid, layer_norm_epsilon=1e-5,
use_gelu=True, accurate_gelu=False, **kwargs):
super(LMPredictionHeadTransform, self).__init__(**kwargs)
self.d_hid = d_hid
self.use_gelu = use_gelu
self.accurate_gelu = accurate_gelu
self.layer_norm_epsilon = layer_norm_epsilon
self.layerNorm = LayerNormalization(self.layer_norm_epsilon)
self.dense = layers.Dense(
self.d_hid, input_shape=(self.d_hid, )
)
if not self.use_gelu:
self.activation = layers.ReLU()
else:
self.activation = GeLU(accurate=self.accurate_gelu)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.d_hid
return tf.TensorShape(shape)
def call(self, x):
return self.layerNorm(self.activation(self.dense(x)))
def get_config(self):
config = {
'd_hid': self.d_hid,
'use_gelu': self.use_gelu,
'accurate_gelu': self.accurate_gelu,
'layer_norm_epsilon': self.layer_norm_epsilon
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class LMPredictionHead(layers.Layer):
"""Implements a module for handling Masked Language Modeling task.
"""
def __init__(
self, d_hid, vocab_size, embedding_weights,
layer_norm_epsilon=1e-5, use_gelu=True,
accurate_gelu=False, **kwargs):
super(LMPredictionHead, self).__init__(**kwargs)
self.d_hid = d_hid
self.use_gelu = use_gelu
self.accurate_gelu = accurate_gelu
self.layer_norm_epsilon = layer_norm_epsilon
self.vocab_size = vocab_size
output_shape = [vocab_size]
output_shape = tf.TensorShape(output_shape)
self.output_bias = self.add_weight(
name='output_bias',
shape=output_shape,
initializer=init.Zeros(),
trainable=True
)
self.transform = LMPredictionHeadTransform(
d_hid, layer_norm_epsilon, use_gelu, accurate_gelu
)
# self.decoder = layers.Dense(
# self.vocab_size, use_bias=False
# )
# self.decoder.build([None, d_hid])
# # self.decoder.set_weights([embedding_weights])
# self.decoder.kernel = embedding_weights
self.decoder = embedding_weights
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.vocab_size
return tf.TensorShape(shape)
def call(self, x):
x = self.transform(x)
# x = self.decoder(x)
x = K.dot(x, K.transpose(self.decoder.embeddings))
x = x + self.output_bias
return x
def get_config(self):
config = {
'd_hid': self.d_hid,
'use_gelu': self.use_gelu,
'accurate_gelu': self.accurate_gelu,
'layer_norm_epsilon': self.layer_norm_epsilon,
'vocab_size': self.vocab_size,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class MaskedLM_NextSentenceHead(layers.Layer):
"""Implements a module for handling both Masked Language Modeling task
and Next sentence prediction task.
"""
def __init__(
self, d_hid, vocab_size, embedding_weights=None,
layer_norm_epsilon=1e-5, use_gelu=True,
accurate_gelu=False, use_masked_lm=True,
use_next_sp=True, **kwargs):
super(MaskedLM_NextSentenceHead, self).__init__(**kwargs)
assert (use_next_sp or use_masked_lm)
self.d_hid = d_hid
self.use_gelu = use_gelu
self.accurate_gelu = accurate_gelu
self.layer_norm_epsilon = layer_norm_epsilon
self.vocab_size = vocab_size
self.use_masked_lm = use_masked_lm
self.use_next_sp = use_next_sp
if self.use_masked_lm:
assert (embedding_weights is not None)
self.predictions = LMPredictionHead(
d_hid, vocab_size, embedding_weights,
layer_norm_epsilon, use_gelu, accurate_gelu
)
if self.use_next_sp:
self.seq_relationship = layers.Dense(2, input_shape=(self.d_hid, ))
def compute_output_shape(self, input_shape):
if self.use_masked_lm and self.use_next_sp:
shape1 = tf.TensorShape(input_shape[0]).as_list()
shape2 = tf.TensorShape(input_shape[1]).as_list()
shape1[-1] = self.vocab_size
shape2[-1] = 2
return [tf.TensorShape(shape1), tf.TensorShape(shape2)]
elif self.use_masked_lm:
shape1 = tf.TensorShape(input_shape).as_list()
shape1[-1] = self.vocab_size
return tf.TensorShape(shape1)
elif self.use_next_sp:
shape1 = tf.TensorShape(input_shape).as_list()
shape1[-1] = 2
return tf.TensorShape(shape1)
raise ValueError('incompatible mode')
def call(self, inputs):
if self.use_masked_lm and self.use_next_sp:
sequence_output, pooled_output = inputs
elif self.use_masked_lm:
sequence_output = inputs
elif self.use_next_sp:
pooled_output = inputs
output = []
if self.use_masked_lm:
a = self.predictions(sequence_output)
output.append(a)
if self.use_next_sp:
b = self.seq_relationship(pooled_output)
output.append(b)
if len(output) == 1:
output = output[0]
return output
def get_config(self):
config = {
'd_hid': self.d_hid,
'use_gelu': self.use_gelu,
'accurate_gelu': self.accurate_gelu,
'layer_norm_epsilon': self.layer_norm_epsilon,
'vocab_size': self.vocab_size,
'use_next_sp': self.use_next_sp,
'use_masked_lm': self.use_masked_lm,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class SequenceClassificationTask(layers.Layer):
"""Implements a module for handling sequence level classification task.
"""
def __init__(
self, d_hid, num_labels, dropout=0.1, **kwargs):
super(SequenceClassificationTask, self).__init__(**kwargs)
self.d_hid = d_hid
self.num_labels = num_labels
self.dropout = dropout
self.seq_class_dropout = layers.Dropout(self.dropout)
self.classifier = layers.Dense(
self.num_labels, input_shape=(self.d_hid, )
)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.num_labels
return tf.TensorShape(shape)
def call(self, pooled):
x = self.seq_class_dropout(pooled)
x = self.classifier(x)
return x
def get_config(self):
config = {
'd_hid': self.d_hid,
'num_labels': self.num_labels,
'dropout': self.dropout,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class MultipleChoiceTask(layers.Layer):
"""Implements a module for handling multiple choice task.
"""
def __init__(
self, d_hid, num_choices, dropout=0.1, **kwargs):
super(MultipleChoiceTask, self).__init__(**kwargs)
self.d_hid = d_hid
self.dropout = dropout
self.num_choices = num_choices
self.mod_dropout = layers.Dropout(self.dropout)
self.classifier = layers.Dense(
1, input_shape=(self.d_hid, )
)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.num_labels
return tf.TensorShape(shape)
def call(self, pooled):
x = self.mod_dropout(pooled)
x = self.classifier(x)
x = K.reshape(x, [-1, self.num_choices])
return x
def get_config(self):
config = {
'd_hid': self.d_hid,
'dropout': self.dropout,
'num_choices': self.num_choices
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class TokenClassificationTask(layers.Layer):
"""Implements a module for handling token level classification task.
"""
def __init__(
self, d_hid, num_labels, dropout=0.1, **kwargs):
super(TokenClassificationTask, self).__init__(**kwargs)
self.d_hid = d_hid
self.num_labels = num_labels
self.dropout = dropout
self.mod_dropout = layers.Dropout(self.dropout)
self.classifier = layers.Dense(
self.num_labels, input_shape=(self.d_hid, )
)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.num_labels
return tf.TensorShape(shape)
def call(self, x):
x = self.mod_dropout(x)
x = self.classifier(x)
return x
def get_config(self):
config = {
'd_hid': self.d_hid,
'num_labels': self.num_labels,
'dropout': self.dropout,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class QuestionAnsweringTask(layers.Layer):
"""Implements a module for handling token level classification task.
"""
def __init__(
self, d_hid, dropout=0.1, **kwargs):
super(QuestionAnsweringTask, self).__init__(**kwargs)
self.d_hid = d_hid
self.dropout = dropout
self.mod_dropout = layers.Dropout(self.dropout)
self.qa_outputs = layers.Dense(
2, input_shape=(self.d_hid, )
)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
return [tf.TensorShape(shape[:-1]), tf.TensorShape(shape[:-1])]
def call(self, x):
x = self.mod_dropout(x)
x = self.qa_outputs(x)
start_logits = x[:, :, 0]
end_logits = x[:, :, 1]
return [start_logits, end_logits]
def get_config(self):
config = {
'd_hid': self.d_hid,
'dropout': self.dropout,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class EncoderLayer(layers.Layer):
r"""Implements an encoder layer of the transformer architecture.
Args:
d_model: dimension of the input data.
d_inner: dimension of the intermediate hidden layer.
n_head: number of heads.
d_k: dimension of the keys and the queries.
d_v: dimension of the values.
d_out: dimension of the output data. if ``None``, it is set to d_model.
residual_dropout: dropout rate to be applied on each residual operation
results. Default: 0.1.
attention_dropout: dropout rate to be applied on each attention
mechanism results. Default: 0.1.
use_pad_mask: whether or not the layer expects to use pad mask in the
computation. Default: ``False``.
use_attn_mask: whether or not the layer expects to use attention mask
in the computation. Default: ``True``.
neg_inf: constant representing the negative infinite value. Default: ``-np.inf``.
ln_epsilon: parameter of the layer normalization operation. Default: 1e-5
use_gelu: if ``True``, use the ``GeLU`` activation layer instead of
the ``ReLU`` one. Default: ``True``
accurate_gelu: whether or not to use accurate (vs approximate)
computation of the `GeLU`` operator. Default: ``False``.
Inputs:
``seq``: the input sequence of dimension :math:`(N, L, d_model)`
``attn_mask`` (only if use_attn_mask is True): the attn_mask of dimension
:math:`(N, 1, L, L)`.
``pad_mask`` (only if use_pad_mask is True): the pad_mask of dimension
:math:`(N, L, 1)`.
Outputs:
``result``: the result of the operation :math:`(N, L, d_out)`
``attention_weight``: the attention values :math:`(N, n_head, L, L)`
"""
def __init__(
self, d_model, d_inner, n_head, d_k, d_v, d_out=None,
residual_dropout=0.1, attention_dropout=0.1, use_pad_mask=False,
use_attn_mask=True, neg_inf=-np.inf, ln_epsilon=1e-5,
use_gelu=True, accurate_gelu=False, **kwargs):
super(EncoderLayer, self).__init__(**kwargs)
if d_out is None:
d_out = d_model
self.d_model = d_model
self.d_inner = d_inner
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.d_out = d_out
self.residual_dropout = residual_dropout
self.attention_dropout = attention_dropout
self.use_attn_mask = use_attn_mask
self.use_pad_mask = use_pad_mask
self.neg_inf = neg_inf
self.ln_epsilon = ln_epsilon
self.use_gelu = use_gelu
self.accurate_gelu = accurate_gelu
self.slf_attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, attention_dropout=attention_dropout,
dropout=residual_dropout, use_attn_mask=use_attn_mask,
layer_norm_epsilon=ln_epsilon, neg_inf=neg_inf
)
self.pos_ffn = PositionwiseFeedForward(
d_model, d_inner, dropout=residual_dropout, d_out=d_out,
layer_norm_epsilon=ln_epsilon, use_gelu=use_gelu,
accurate_gelu=accurate_gelu
)
def compute_output_shape(self, input_shape):
if self.use_attn_mask or self.use_pad_mask:
shape = tf.TensorShape(input_shape[0]).as_list()
else:
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.d_out
shape2 = [shape[0], self.n_head, shape[1], shape[1]]
return [tf.TensorShape(shape), tf.TensorShape(shape2)]
def call(self, inputs):
if self.use_attn_mask and self.use_pad_mask:
x, attn_mask, pad_mask = inputs
elif self.use_attn_mask:
x, attn_mask = inputs
pad_mask = None
elif self.use_pad_mask:
x, pad_mask = inputs
attn_mask = None
else:
x = inputs
attn_mask = None
pad_mask = None
args = [x]
if self.use_attn_mask:
args.append(attn_mask)
if len(args) == 1:
args = args[0]
output, attn = self.slf_attn(args)
if self.use_pad_mask:
output = pad_mask * output
output = self.pos_ffn(output)
if self.use_pad_mask:
output = pad_mask * output
return [output, attn]
def get_config(self):
config = {
'd_model': self.d_model,
'd_inner': self.d_inner,
'n_head': self.n_head,
'd_k': self.d_k,
'd_v': self.d_v,
'd_out': self.d_out,
'residual_dropout': self.residual_dropout,
'attention_dropout': self.attention_dropout,
'use_attn_mask': self.use_attn_mask,
'use_pad_mask': self.use_pad_mask,
'neg_inf': self.neg_inf,
'ln_epsilon': self.ln_epsilon,
'use_gelu': self.use_gelu,
'accurate_gelu': self.accurate_gelu,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class DecoderLayer(layers.Layer):
r"""Implements a decoder layer of the transformer architecture.
Args:
d_model: dimension of the input data.
d_inner: dimension of the intermediate hidden layer.
n_head: number of heads.
d_k: dimension of the keys and the queries.
d_v: dimension of the values.
d_out: dimension of the output data. if ``None``, it is set to d_model.
residual_dropout: dropout rate to be applied on each residual operation
results. Default: 0.1.
attention_dropout: dropout rate to be applied on each attention
mechanism results. Default: 0.1.
use_pad_mask: whether or not the layer expects to use pad mask in
the computation. Default: ``False``.
use_attn_mask: whether or not the layer expects to use attention
mask in the computation. Default: ``True``.
use_enc_output: whether or not the layer expects to use ouputs from
the encoder in the computation. Default: ``True``.
use_enc_mask: whether or not the layer expects to use the masks from the
encoder in the computation. Default: ``False``.
neg_inf: constant representing the negative infinite value. Default: ``-np.inf``.
ln_epsilon: parameter of the layer normalization operation. Default: 1e-5
use_gelu: if ``True``, use the ``GeLU`` activation layer instead of
the ``ReLU`` one. Default: ``True``
accurate_gelu: whether or not to use accurate (vs approximate)
computation of the `GeLU`` operator. Default: ``False``.
Inputs:
``seq``: the input sequence of dimension :math:`(N, L, d_model)`
``enc_ouputs``(only if use_enc_output is True): the output of
the encoder :math:`(N, Le, d_model)`
``attn_mask`` (only if use_attn_mask is True): the attn_mask of dimension
:math:`(N, 1, L, L)`.
``pad_mask`` (only if use_pad_mask is True): the pad_mask of dimension
:math:`(N, L, 1)`.
``enc_mask`` (only if use_enc_mask is True): the enc_mask of dimension
:math:`(N, 1, Le, Le)`.
Outputs:
``result``: the result of the operation :math:`(N, L, d_out)`
``attention_weight``: the attention values :math:`(N, n_head, L, L)`
``enc_attention_weight`` (only if use_enc_output is True): the attention
values on encoder outputs :math:`(N, n_head, L, Le)`
"""
def __init__(
self, d_model, d_inner, n_head, d_k, d_v, d_out=None,
residual_dropout=0.1, attention_dropout=0.1, use_pad_mask=False,
use_attn_mask=True, use_enc_output=True, use_enc_mask=False, neg_inf=-np.inf,
ln_epsilon=1e-5, use_gelu=True, accurate_gelu=False, **kwargs):
super(DecoderLayer, self).__init__(**kwargs)
if d_out is None:
d_out = d_model
if not use_enc_output:
use_enc_mask = False
self.d_model = d_model
self.d_inner = d_inner
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.d_out = d_out
self.residual_dropout = residual_dropout
self.attention_dropout = attention_dropout
self.use_attn_mask = use_attn_mask
self.use_pad_mask = use_pad_mask
self.use_enc_mask = use_enc_mask
self.use_enc_output = use_enc_output
self.neg_inf = neg_inf
self.ln_epsilon = ln_epsilon
self.use_gelu = use_gelu
self.accurate_gelu = accurate_gelu
self.slf_attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, attention_dropout=attention_dropout,
dropout=residual_dropout, use_attn_mask=use_attn_mask,
layer_norm_epsilon=ln_epsilon, neg_inf=neg_inf
)
if self.use_enc_output:
self.enc_attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, attention_dropout=attention_dropout,
dropout=residual_dropout, use_attn_mask=use_enc_mask,
layer_norm_epsilon=ln_epsilon, neg_inf=neg_inf
)
else:
self.enc_attn = None
self.pos_ffn = PositionwiseFeedForward(
d_model, d_inner, dropout=residual_dropout, d_out=d_out,
layer_norm_epsilon=ln_epsilon, use_gelu=use_gelu,
accurate_gelu=accurate_gelu
)
def compute_output_shape(self, input_shape):
if self.use_enc_output:
shape = tf.TensorShape(input_shape[0]).as_list()
else:
shape = tf.TensorShape(input_shape).as_list()
shape = tf.TensorShape(input_shape[0]).as_list()
shape[-1] = self.d_out
shape2 = [shape[0], self.n_head, shape[1], shape[1]]
output_shape = [tf.TensorShape(shape), tf.TensorShape(shape2)]
if self.use_enc_output:
output_shape.append(tf.TensorShape(shape2))
return output_shape
def call(self, inputs):
if self.use_attn_mask and self.use_pad_mask and self.use_enc_mask:
x, enc, attn_mask, pad_mask, enc_mask = inputs
elif self.use_attn_mask and self.use_pad_mask:
if self.use_enc_output:
x, enc, attn_mask, pad_mask = inputs
enc_mask = None
else:
x, attn_mask, pad_mask = inputs
enc = None
enc_mask = None
elif self.use_attn_mask and self.use_enc_mask:
x, enc, attn_mask, enc_mask = inputs
pad_mask = None
elif self.use_pad_mask and self.use_enc_mask:
x, enc, pad_mask, enc_mask = inputs
attn_mask = None
elif self.use_attn_mask:
if self.use_enc_output:
x, enc, attn_mask = inputs
pad_mask = None
enc_mask = None
else:
x, attn_mask = inputs
pad_mask = None
enc_mask = None
enc = None
elif self.use_pad_mask:
if self.use_enc_output:
x, enc, pad_mask = inputs
attn_mask = None
enc_mask = None
else:
x, pad_mask = inputs
attn_mask = None
enc_mask = None
enc = None
elif self.use_enc_mask:
x, enc, enc_mask = inputs
attn_mask = None
pad_mask = None
else:
if self.use_enc_output:
x, enc = inputs
else:
x = inputs
enc_mask = None
attn_mask = None
pad_mask = None
args = [x]
if self.use_attn_mask:
args.append(attn_mask)
if len(args) == 1:
args = args[0]
output, attn = self.slf_attn(args)
if self.use_pad_mask:
output = pad_mask * output
if self.use_enc_output:
args = [output]
if self.use_enc_mask:
args.append(enc_mask)
if len(args) == 1:
args = args[0]
output, dec_attn = self.enc_attn(args)
if self.use_pad_mask:
output = pad_mask * output
else:
dec_attn = None
output = self.pos_ffn(output)
if self.use_pad_mask:
output = pad_mask * output
return [output, attn] if dec_attn is None else [output, attn, dec_attn]
def get_config(self):
config = {
'd_model': self.d_model,
'd_inner': self.d_inner,
'n_head': self.n_head,
'd_k': self.d_k,
'd_v': self.d_v,
'd_out': self.d_out,
'residual_dropout': self.residual_dropout,
'attention_dropout': self.attention_dropout,
'use_attn_mask': self.use_attn_mask,
'use_pad_mask': self.use_pad_mask,
'use_enc_mask': self.use_enc_mask,
'use_enc_output': self.use_enc_output,
'neg_inf': self.neg_inf,
'ln_epsilon': self.ln_epsilon,
'use_gelu': self.use_gelu,
'accurate_gelu': self.accurate_gelu,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class TransformerEncoder(models.Model):
r"""Implements an encoder layer of the transformer architecture.
Args:
vocab_size: size of the vocalbulary. Default: 30000.
n_layers: number of layers. Default: 12.
d_model: dimension of the embeddings. Default: 768.
d_inner: dimension of the intermediate hidden layer. Default: 3072.
n_head: number of heads. Default: 12.
d_k: dimension of the keys and the queries. Default: 64.
d_v: dimension of the values. Default: 64.
d_out: dimension of the output data. if ``None``, it is set to d_model.
Default: 768.
max_len: maximum length of the input sequence. Default: 512.
num_segments: number of segments. if None or set to zero, then the segment
embeddings won't be performed. Default: 2.
embedding_dropout: dropout rate to be applied on embedding results. Default: 0.1.
attention_dropout: dropout rate to be applied on each attention
mechanism results. Default: 0.1.
residual_dropout: dropout rate to be applied on each residual operation
results. Default: 0.1.
embedding_layer_norm: if ``True``, layer normalization will be applied on
the resulting embeddings. Default: ``False``.
layer_norm_epsilon: parameter of the layer normalization operation. Default: 1e-5
neg_inf: constant representing the negative infinite value. Default: ``-1e9``.
trainable_pos_embedding: whether or not to train the positional embeddings.
Default: ``True``.
use_one_embedding_dropout: if ``True``, the different embeddings will be
summed up before applying dropout, otherwise dropout will be applied to each
embedding type independently before summing them. Default: ``False``.
use_attn_mask: whether or not the layer expects to use attention mask in the
computation. Default: ``True``.
use_pad_mask: whether or not the layer expects to use pad mask in the computation
Default: ``False``.
use_gelu: if ``True``, use the ``GeLU`` activation layer instead of
the ``ReLU`` one. Default: ``True``
accurate_gelu: whether or not to use accurate (vs approximate)
computation of the `GeLU`` operator. Default: ``False``.
use_pooler: whether or not to compute the pooled representation of
the input sequnces. Default: ``False``.
use_masked_lm: whether or not to compute the masked language modeling outputs.
Default: ``False``.
use_next_sp: whether or not to compute the outputs of the next
sentence prediction task. Default: ``False``.
do_seq_class_task: whether or not to perform sequence level
classifcation task. Default: ``False``.
do_mult_choice_task: whether or not to perform multiple choice
classifcation task. Default: ``False``.
do_tok_class_task: whether or not to perform token level
classifcation task. Default: ``False``.
do_qa_task: whether or not to perform Question Answering
prediction task. Default: ``False``.
seq_class_num_labels: number of labels for the sequence level
classifcation task. Default: 2.
task_num_choices: number of choices for the multiple choice
classifcation task. Default: 2.
tok_class_num_labels: number of labels for the token level
classifcation task. Default: 2.
task_dropout: dropout rate to be applied on various tasks. Default: 0.1.
Inputs:
``seq``: the input sequence of dimension :math:`(N, L)`
`token_type_ids` (only if num_segments > 0): types of the tokens
of dimension `(N, L)` with values in range [0, num_segments[. E.g., for
num_segments = 2, Type 0 corresponds to a `sentence A` and type 1
corresponds to a `sentence B` token (see BERT paper for more details).
``pos_tokens``: the position tokens over the input sequence of
dimension :math:`(N, L)`
``attn_mask`` (only if use_attn_mask is True): the attn_mask of dimension
:math:`(N, 1, L, L)`.
``pad_mask`` (only if use_pad_mask is True): the pad_mask of dimension
:math:`(N, L, 1)`.
Outputs:
``result``: the result of the operation of dimension :math:`(N, L, d_out)`
``pooled``(only if use_pooler is True): the result of the pooler
operation of dimension :math:`(N, d_out)`
``lm_seq``(only if use_masked_lm is True): the result of the masked LM task
of dimension :math:`(N, L, vocab_size)`
``next_sp_tgt``(only if use_next_sp is True): the result of the next sentence
prediction task of dimension :math:`(N, 2)`
``seq_out``(only if do_seq_class_task is True): the result of the sentence
classification task of dimension :math:`(N, seq_class_num_labels)`
``mult_out``(only if do_mult_choice_task is True): the result of the multiple
choice task of dimension :math:`(N//task_num_choices, task_num_choices)`
``tok_out``(only if do_tok_class_task is True): the result of the token
classification task of dimension :math:`(N, L, tok_class_num_labels)`
``qa_out_start``(only if do_qa_task is True): the result of the QA prediction
task of dimension :math:`(N, L)`
``qa_out_end``(only if do_qa_task is True): the result of the QA prediction
task of dimension :math:`(N, L)`
"""
def __init__(
self, vocab_size=30000, n_layers=12, d_model=768, d_inner=768 * 4,
n_head=12, d_k=64, d_v=64, d_out=768, max_len=512, num_segments=2,
embedding_dropout=0.1, attention_dropout=0.1, residual_dropout=0.1,
embedding_layer_norm=False, layer_norm_epsilon=1e-5, neg_inf=-1e9,
trainable_pos_embedding=True, use_one_embedding_dropout=False,
use_attn_mask=True, use_pad_mask=False, use_gelu=True,
accurate_gelu=False, use_pooler=False, use_masked_lm=False,
use_next_sp=False, do_seq_class_task=False, do_mult_choice_task=False,
do_tok_class_task=False, do_qa_task=False,
seq_class_num_labels=2, task_num_choices=2, tok_class_num_labels=2,
task_dropout=0.1, **kwargs):
super(TransformerEncoder, self).__init__()
if d_out is None:
d_out = d_model
if not use_pooler:
use_next_sp = False
if do_seq_class_task or do_mult_choice_task:
assert use_pooler
self.vocab_size = vocab_size
self.n_layers = n_layers
self.d_model = d_model
self.d_inner = d_inner
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.d_out = d_out
self.max_len = max_len
self.num_segments = num_segments
self.embedding_dropout = embedding_dropout
self.attention_dropout = attention_dropout
self.residual_dropout = residual_dropout
self.embedding_layer_norm = embedding_layer_norm
self.layer_norm_epsilon = layer_norm_epsilon
self.neg_inf = neg_inf
self.trainable_pos_embedding = trainable_pos_embedding
self.use_one_embedding_dropout = use_one_embedding_dropout
self.use_attn_mask = use_attn_mask
self.use_pad_mask = use_pad_mask
self.use_gelu = use_gelu
self.accurate_gelu = accurate_gelu
self.use_pooler = use_pooler
self.use_masked_lm = use_masked_lm
self.use_next_sp = use_next_sp
self.do_seq_class_task = do_seq_class_task
self.do_mult_choice_task = do_mult_choice_task
self.do_tok_class_task = do_tok_class_task
self.do_qa_task = do_qa_task
self.seq_class_num_labels = seq_class_num_labels
self.task_num_choices = task_num_choices
self.tok_class_num_labels = tok_class_num_labels
self.task_dropout = task_dropout
self.embed_layer = Embedding(
output_dim=d_model, dropout=embedding_dropout, vocab_size=vocab_size,
max_len=max_len, trainable_pos_embedding=trainable_pos_embedding,
num_segments=num_segments,
use_one_dropout=use_one_embedding_dropout,
use_embedding_layer_norm=embedding_layer_norm,
layer_norm_epsilon=layer_norm_epsilon
)
self.enc_layers = []
for i in range(n_layers):
self.enc_layers.append(
EncoderLayer(
d_model=d_model, d_inner=d_inner, n_head=n_head, d_k=d_k,
d_v=d_v, d_out=d_out, residual_dropout=residual_dropout,
attention_dropout=attention_dropout,
use_pad_mask=use_pad_mask, use_attn_mask=use_attn_mask,
neg_inf=neg_inf, ln_epsilon=layer_norm_epsilon,
use_gelu=use_gelu, accurate_gelu=accurate_gelu,
name='enc_layer_{}'.format(i)
)
)
if self.use_pooler:
self.pooler = Pooler(self.d_out)
if self.use_masked_lm or self.use_next_sp:
emb_p = None
if self.use_masked_lm:
# self.embed_layer.token_emb.build((None, None))
# emb_p = K.transpose(self.embed_layer.token_emb.embeddings)
emb_p = self.embed_layer.token_emb
self.task_cls = MaskedLM_NextSentenceHead(
self.d_out, self.vocab_size,
embedding_weights=emb_p,
layer_norm_epsilon=layer_norm_epsilon,
use_gelu=use_gelu, accurate_gelu=accurate_gelu,
use_masked_lm=self.use_masked_lm, use_next_sp=self.use_next_sp
)
if do_seq_class_task:
self.seq_class_task = SequenceClassificationTask(
self.d_out, seq_class_num_labels, task_dropout
)
if do_mult_choice_task:
self.mult_choice_task = MultipleChoiceTask(
self.d_out, task_num_choices, task_dropout
)
if do_tok_class_task:
self.tok_class_task = TokenClassificationTask(
self.d_out, tok_class_num_labels, task_dropout
)
if do_qa_task:
self.qa_task = QuestionAnsweringTask(
self.d_out, task_dropout
)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape[0]).as_list()
shape.append(self.d_out)
ret = []
if not self.use_pooler:
if not self.use_masked_lm:
ret = [tf.TensorShape(shape)]
else:
shape3 = [shape[0], shape[1], self.vocab_size]
ret = [tf.TensorShape(shape), tf.TensorShape(shape3)]
else:
shape2 = [shape[0], shape[2]]
if not self.use_masked_lm and not self.use_next_sp:
ret = [tf.TensorShape(shape), tf.TensorShape(shape2)]
elif not self.use_masked_lm:
shape3 = [shape[0], 2]
ret = [
tf.TensorShape(shape), tf.TensorShape(shape2),
tf.TensorShape(shape3)
]
elif not self.use_next_sp:
shape3 = [shape[0], shape[1], self.vocab_size]
ret = [
tf.TensorShape(shape), tf.TensorShape(shape2),
tf.TensorShape(shape3)
]
else:
shape3 = [shape[0], shape[1], self.vocab_size]
shape4 = [shape[0], 2]
ret = [
tf.TensorShape(shape), tf.TensorShape(shape2),
tf.TensorShape(shape3), tf.TensorShape(shape4)
]
if self.do_seq_class_task:
shape_seq = [shape[0], self.seq_class_num_labels]
ret.append(tf.TensorShape(shape_seq))
if self.do_mult_choice_task:
shape_mult = [-1, self.task_num_choices]
ret.append(tf.TensorShape(shape_mult))
if self.do_tok_class_task:
shape_tok = [shape[0], shape[1], self.tok_class_num_labels]
ret.append(tf.TensorShape(shape_tok))
if self.do_qa_task:
shape_qa = [shape[0], shape[1]]
ret.append(tf.TensorShape(shape_qa))
if len(ret) == 1:
ret = ret[0]
return ret
def call(self, inputs):
if self.num_segments is None or (self.num_segments == 0):
segment_ids = None
if not self.use_attn_mask and not self.use_pad_mask:
tokens, pos_ids = inputs
attn_mask = None
pad_mask = None
elif not self.use_pad_mask:
tokens, pos_ids, attn_mask = inputs
pad_mask = None
elif not self.use_attn_mask:
tokens, pos_ids, pad_mask = inputs
attn_mask = None
else:
tokens, pos_ids, attn_mask, pad_mask = inputs
else:
if not self.use_attn_mask and not self.use_pad_mask:
tokens, segment_ids, pos_ids = inputs
attn_mask = None
pad_mask = None
elif not self.use_pad_mask:
tokens, segment_ids, pos_ids, attn_mask = inputs
pad_mask = None
elif not self.use_attn_mask:
tokens, segment_ids, pos_ids, pad_mask = inputs
attn_mask = None
else:
tokens, segment_ids, pos_ids, attn_mask, pad_mask = inputs
args = [tokens, pos_ids] if segment_ids is None else [
tokens, segment_ids, pos_ids
]
if len(args) == 1:
args = args[0]
x = self.embed_layer(args)
for i in range(len(self.enc_layers)):
args = [x]
if self.use_attn_mask:
args.append(attn_mask)
if self.use_pad_mask:
args.append(pad_mask)
if len(args) == 1:
args = args[0]
x, _ = self.enc_layers[i](args)
ret = []
if not self.use_pooler:
if not self.use_masked_lm:
ret = [x]
else:
lm_seq = self.task_cls(x)
ret = [x, lm_seq]
else:
pooled = self.pooler(x)
if not self.use_masked_lm and not self.use_next_sp:
ret = [x, pooled]
elif not self.use_masked_lm:
next_sp_tgt = self.task_cls(pooled)
ret = [x, pooled, next_sp_tgt]
elif not self.use_next_sp:
lm_seq = self.task_cls(x)
ret = [x, pooled, lm_seq]
else:
lm_seq, next_sp_tgt = self.task_cls([x, pooled])
ret = [x, pooled, lm_seq, next_sp_tgt]
if self.do_seq_class_task:
seq_out = self.seq_class_task(pooled)
ret.append(seq_out)
if self.do_mult_choice_task:
mult_out = self.mult_choice_task(pooled)
ret.append(mult_out)
if self.do_tok_class_task:
tok_out = self.tok_class_task(x)
ret.append(tok_out)
if self.do_qa_task:
qa_out = self.qa_task(x)
ret.extend(qa_out)
if len(ret) == 1:
ret = ret[0]
return ret
def get_config(self):
config = {
'vocab_size': self.vocab_size,
'n_layers': self.n_layers,
'd_model': self.d_model,
'd_inner': self.d_inner,
'n_head': self.n_head,
'd_k': self.d_k,
'd_v': self.d_v,
'd_out': self.d_out,
'max_len': self.max_len,
'num_segments': self.num_segments,
'embedding_dropout': self.embedding_dropout,
'attention_dropout': self.attention_dropout,
'residual_dropout': self.residual_dropout,
'embedding_layer_norm': self.embedding_layer_norm,
'layer_norm_epsilon': self.layer_norm_epsilon,
'neg_inf': self.neg_inf,
'trainable_pos_embedding': self.trainable_pos_embedding,
'use_one_embedding_dropout': self.use_one_embedding_dropout,
'use_attn_mask': self.use_attn_mask,
'use_pad_mask': self.use_pad_mask,
'use_gelu': self.use_gelu,
'accurate_gelu': self.accurate_gelu,
'use_pooler': self.use_pooler,
'use_masked_lm': self.use_masked_lm,
'use_next_sp': self.use_next_sp,
'do_seq_class_task': self.do_seq_class_task,
'do_mult_choice_task': self.do_mult_choice_task,
'do_tok_class_task': self.do_tok_class_task,
'do_qa_task': self.do_qa_task,
'seq_class_num_labels': self.seq_class_num_labels,
'task_num_choices': self.task_num_choices,
'tok_class_num_labels': self.tok_class_num_labels,
'task_dropout': self.task_dropout,
}
base_config = {} # super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
if __name__ == '__main__':
tf.enable_eager_execution()
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
params = {
'vocab_size': 20,
'n_layers': 3,
'd_model': 15,
'd_inner': 50,
'n_head': 5,
'd_k': 15,
'd_v': 15,
'd_out': 15,
'max_len': 10,
'num_segments': 0,
'embedding_dropout': 0.1,
'attention_dropout': 0.1,
'residual_dropout': 0.1,
'embedding_layer_norm': False,
'layer_norm_epsilon': 1e-5,
'neg_inf': -1e9,
'trainable_pos_embedding': True,
'use_one_embedding_dropout': True,
'use_attn_mask': False,
'use_pad_mask': False,
'use_gelu': False,
'accurate_gelu': False,
'use_pooler': True,
'use_masked_lm': True,
'use_next_sp': True,
'do_seq_class_task': True,
'do_mult_choice_task': True,
'do_tok_class_task': True,
'do_qa_task': True,
'seq_class_num_labels': 3,
'task_num_choices': 2, # bath_size must be a multiple of this params
'tok_class_num_labels': 5,
'task_dropout': 0.1,
}
model = TransformerEncoder(**params)
tokens = tf.Variable([[1, 5, 4, 3, 2, 10, 15], [1, 5, 8, 3, 9, 10, 15]])
pos_ids = tf.Variable([[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]])
outputs = model([tokens, pos_ids])
if isinstance(outputs, (list, tuple)):
out = ['Output_{}: {}'.format(i, v.shape)
for i, v in enumerate(outputs)]
print(' '.join(out))
else:
print('Output: ', outputs.shape)
tokens = tf.Variable([[1, 5, 4, 3, 2, 10, 15, 18],
[1, 5, 16, 3, 2, 14, 15, 18]])
pos_ids = tf.Variable([[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]])
outputs = model([tokens, pos_ids])
if isinstance(outputs, (list, tuple)):
out = ['Output2_{}: {}'.format(i, v.shape)
for i, v in enumerate(outputs)]
print(' '.join(out))
else:
print('Output2: ', outputs.shape)
print('done!')
```
#### File: jag/unit_tests/data_fetcher_test.py
```python
from jag.utils import data_fetcher
def test_mrqa_text_fetcher():
data_fetcher.get_file(data_src='./jag/data/mrqa_urls_sample.txt',
cache_dir='./unit_tests/data')
```
|
{
"source": "jerpint/voicemd",
"score": 2
}
|
#### File: voicemd/voicemd/eval.py
```python
import logging
import os
import mlflow
import orion
import yaml
import time
import torch
import tqdm
import numpy as np
from sklearn.metrics import confusion_matrix
from mlflow import log_metric
from orion.client import report_results
from yaml import dump
from yaml import load
def get_batch_performance_metrics(outputs, model_target):
probs = torch.softmax(outputs, 1).detach().numpy() > 0.5
preds = np.argmax(probs, 1)
targs = model_target.detach().numpy()
acc = np.sum(np.equal(preds, targs)) / len(preds)
conf_mat = confusion_matrix(targs, preds, labels=[0, 1])
return acc, conf_mat
def performance_metrics_per_patient(patient_predictions):
patient_targs = []
patient_preds = []
for patient_pred in patient_predictions:
patient_targs.append(patient_pred["gender"])
patient_preds.append(patient_pred["gender_prediction"])
conf_mat = confusion_matrix(patient_targs, patient_preds, labels=[0, 1])
return conf_mat
def evaluate_loaders(loaders, model, loss_fun, device, pb):
model.eval()
cumulative_loss = 0.0
cumulative_acc = 0.0
cumulative_conf_mat = np.zeros((2, 2))
patient_predictions = []
for loader in pb(loaders, total=len(loaders)):
loader_results = evaluate_loader(loader, model, device, loss_fun)
cumulative_acc += loader_results["avg_acc"]
cumulative_loss += loader_results["avg_loss"]
cumulative_conf_mat += loader_results["conf_mat"]
patient_predictions.append(loader_results)
avg_loss = cumulative_loss / len(loaders)
avg_acc = cumulative_acc / len(loaders)
per_patient_conf_mat = performance_metrics_per_patient(patient_predictions)
loaders_results = {
"avg_loss": avg_loss,
"avg_acc": avg_acc,
"conf_mat_patients": per_patient_conf_mat,
"patient_predictions": patient_predictions,
"conf_mat_spectrums": cumulative_conf_mat,
}
return loaders_results
def evaluate_loader(loader, model, device, loss_fun):
steps = len(loader)
cumulative_loss = 0.0
cumulative_acc = 0.0
cumulative_conf_mat = np.zeros((2, 2))
examples = 0
all_probs = []
for data in loader:
model_input, model_target = data
with torch.no_grad():
outputs = model(model_input.to(device))
model_target = model_target.type(torch.long)
model_target = model_target.to(device)
loss = loss_fun(outputs, model_target)
cumulative_loss += loss.item()
probs = torch.nn.functional.softmax(outputs, dim=1)
all_probs.extend(probs.detach().numpy())
acc, conf_mat = get_batch_performance_metrics(outputs, model_target)
cumulative_acc += acc
cumulative_conf_mat += conf_mat
examples += model_target.shape[0]
all_probs = np.array(all_probs)
avg_prob = np.sum(all_probs, 0) / len(all_probs)
avg_loss = cumulative_loss / examples
avg_acc = cumulative_acc / steps
gender = int(model_target[0])
final_gender_prediction = np.argmax(avg_prob)
gender_confidence = avg_prob[final_gender_prediction]
patient_uid = loader.dataset.metadata.index[0]
loader_results = {
"uid": patient_uid,
"gender": gender,
"gender_prediction": final_gender_prediction,
"gender_confidence": gender_confidence,
"avg_loss": avg_loss,
"avg_acc": avg_acc,
"conf_mat": cumulative_conf_mat,
}
return loader_results
```
#### File: voicemd/models/long_filter_cnn.py
```python
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
# from voicemd.utils.hp_utils import check_and_log_hp
logger = logging.getLogger(__name__)
class LongFilterCNN(nn.Module):
def __init__(self, hyper_params):
super(LongFilterCNN, self).__init__()
self.hyper_params = hyper_params
self.conv2d = nn.Sequential(
nn.Conv2d(1, 64, (80, 3), 1),
nn.ReLU(),
)
self.conv1d = nn.Sequential(
nn.Conv1d(64, 64, 3, 1),
nn.Conv1d(64, 32, 3, 1),
nn.ReLU(),
nn.MaxPool1d(2),
nn.Conv1d(32, 32, 3, 1),
nn.Conv1d(32, 32, 3, 1),
nn.ReLU(),
nn.MaxPool1d(2),
)
self.classifier = nn.Sequential(
nn.Linear(1920, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 2),
)
def forward(self, x):
x = self.conv2d(x)
x = torch.squeeze(x, dim=2)
x = self.conv1d(x)
x = torch.flatten(x, 1)
output = self.classifier(x)
return output
```
#### File: voicemd/models/model_loader.py
```python
import logging
import torch
from torch import optim
from voicemd.models.my_model import MyModel
from voicemd.models.densenet import densenet121, densenet_small
from voicemd.models.simple_cnn import SimpleCNN
from voicemd.models.long_filter_cnn import LongFilterCNN
logger = logging.getLogger(__name__)
def load_model(hyper_params):
architecture = hyper_params['architecture']
# __TODO__ fix architecture list
if architecture == 'my_model':
model_class = MyModel
elif architecture == 'densenet121':
model_class = densenet121
elif architecture == 'densenet_small':
model_class = densenet_small
elif architecture == 'simplecnn':
model_class = SimpleCNN
elif architecture == 'longfilter':
model_class = LongFilterCNN
else:
raise ValueError('architecture {} not supported'.format(architecture))
logger.info('selected architecture: {}'.format(architecture))
model = model_class(hyper_params)
logger.info(model)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
logger.info('using device {}'.format(device))
if torch.cuda.is_available():
logger.info(torch.cuda.get_device_name(0))
return model
def load_optimizer(hyper_params, model):
optimizer_name = hyper_params['optimizer']
lr = hyper_params['learning_rate']
# __TODO__ fix optimizer list
if optimizer_name == 'adam':
optimizer = optim.Adam(model.parameters(), lr=lr)
elif optimizer_name == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=lr)
else:
raise ValueError('optimizer {} not supported'.format(optimizer_name))
return optimizer
def load_loss(hyper_params, train_loader=None):
# Use the proportion from train_loader to weigh the loss since it can be unbalanced classes
if train_loader:
n_male = sum(train_loader.dataset.metadata['gender'] == 'M')
n_female = sum(train_loader.dataset.metadata['gender'] == 'F')
n_total = n_male + n_female
# Male is label 1, female is label 0, use the proportion of the other to weigh the loss
weight = torch.tensor([n_male/n_total, n_female/n_total])
else:
weight = None
return torch.nn.CrossEntropyLoss(weight=weight)
```
#### File: voicemd/voicemd/predict.py
```python
import torch
import yaml
import numpy as np
from tqdm import tqdm
from yaml import load
from voicemd.data.prepare_dataloaders import make_predict_dataloader
from voicemd.models.model_loader import load_model
def make_a_prediction(sound_filepath, config_filepath ='/voicemd/config.yaml',
best_model_path='voicemd/output/best_model.pt'):
sound_filename = sound_filepath.split('/')[-1]
print(f'Analyzing {sound_filename}...')
with open(config_filepath, 'r') as stream:
hyper_params = load(stream, Loader=yaml.FullLoader)
model = load_model(hyper_params)
model.load_state_dict(torch.load(best_model_path))
predict_dataloader = make_predict_dataloader(sound_filepath, hyper_params)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
all_probs = []
model.to(device)
for data in tqdm(predict_dataloader):
pred = model(data.to(device))
probs = torch.nn.functional.softmax(pred, dim=1)
all_probs.extend(probs.detach().cpu().numpy())
all_probs = np.array(all_probs)
avg_prob = np.sum(all_probs, 0) / len(all_probs)
print(f"{sound_filename} probability to be a male's voice: {round((avg_prob[1]*100),2)}%")
print(f"{sound_filename} probability to be a female's voice: {round((avg_prob[0]*100),2)}%\n")
```
#### File: voicemd/utils/balance_commonvoice_metadata.py
```python
import argparse
import shutil
import os
import pandas as pd
def copy_subset(metadata, cv_path, subset_path):
'''
cv_path: path where the original dataset is stored
subset_path: path where the new subset dataset will be saved
metadata: pd.DataFrame, metadata to copy from cv_path to subset_path
'''
for row, sample in metadata.iterrows():
src = os.path.join(cv_path, 'clips', sample['path'])
dest = os.path.join(subset_path, 'clips')
shutil.copy(src, dest)
metadata.to_csv(os.path.join(subset_path, 'cv_metadata_balanced.csv'))
def adapt_metadata(subset_path):
'''Adapt metadata file to match previous dataset'''
df = pd.read_csv(os.path.join(subset_path, 'cv_metadata_balanced.csv'))
df["gender"] = df["gender"].replace("female", "F")
df["gender"] = df["gender"].replace("male", "M")
df = df.rename(columns={"path": "filename"})
df["uid"] = df["filename"]
df.to_csv(os.path.join(subset_path, 'cv_metadata_balanced_clean.csv'))
def balance_and_filter_commonvoice_tsv(tsv_fname, split, seed=42):
"""Balance and filter commonvoice_tsv.
We remove entries with no associated gender for all splits.
We sample and rebalance the train split such that there are
equal samples across males and females for all age categoires
in valid_age_categories.
Args:
tsv_fname: (str) full path to the tsv file
split: (str) can be e.g 'train', 'dev', 'test'
seed: (int) for reproducibility
Returns:
metadata: (pd.Dataframe) containing the processed metadata
"""
tsv = pd.read_csv(tsv_fname, sep="\t")
metadata = tsv.copy()
# show stats before rebalancing
print("Breakdown before rebalance: \n")
print_metadata_stats(metadata)
# do not rebalance dev and test
if split != "train":
print(split + " not rebalanced")
return metadata
# Keep at most 3 samples per unique person
metadata = metadata.groupby(["client_id"]).apply(lambda grp: grp.sample(n=min(3, len(grp)), random_state=42))
# remove samples where gender is unidentified
metadata = metadata[metadata["gender"].isin(["male", "female"])]
# resulting samples will be stored here
male_metadata = pd.DataFrame(columns=metadata.columns)
female_metadata = pd.DataFrame(columns=metadata.columns)
# keep only valid age categories
valid_age_categories = ["twenties", "thirties", "fourties", "fifties", "sixties"]
metadata = metadata[metadata["age"].isin(valid_age_categories)]
# Take the minimum number in females as the number to take
n_samples = min(metadata.loc[metadata["gender"] == "female"]["age"].value_counts())
# for each gender, for each age, sample n_samples at random
for age in valid_age_categories:
# separate by age
tmp_metadata = metadata[metadata["age"] == age]
# separate by gender
tmp_male_metadata = tmp_metadata[tmp_metadata["gender"] == "male"]
tmp_female_metadata = tmp_metadata[tmp_metadata["gender"] == "female"]
# sample and add to all results
male_metadata = male_metadata.append(
tmp_male_metadata.sample(n=n_samples, random_state=seed),
ignore_index=True,
)
female_metadata = female_metadata.append(
tmp_female_metadata.sample(n=n_samples, random_state=seed),
ignore_index=True,
)
metadata = male_metadata.append(female_metadata)
print("Breakdown after rebalance: \n")
print_metadata_stats(metadata)
return metadata
def print_metadata_stats(metadata):
print("Gender breakdown: \n", metadata["gender"].value_counts())
print(
"Age breakdown by gender (male): \n",
metadata[metadata["gender"] == "male"]["age"].value_counts(),
)
print(
"Age breakdown by gender (female): \n",
metadata[metadata["gender"] == "female"]["age"].value_counts(),
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--commonvoice_path", help="path to commonvoice dataset", required=True
)
parser.add_argument(
"--subset_dir", help="path to folder to store balanced dataset", required=True
)
args = parser.parse_args()
# We will only use and balance the train split
splits = ["train"]
for split in splits:
tsv_fname = os.path.join(args.commonvoice_path, split + ".tsv")
print("reading ", tsv_fname)
metadata = balance_and_filter_commonvoice_tsv(tsv_fname, split)
copy_subset(metadata, args.commonvoice_path, args.subset_dir)
adapt_metadata(metadata)
```
|
{
"source": "jerr0328/co2-mini",
"score": 3
}
|
#### File: co2-mini/co2mini/meter.py
```python
import fcntl
import logging
import threading
CO2METER_CO2 = 0x50
CO2METER_TEMP = 0x42
CO2METER_HUM = 0x41
HIDIOCSFEATURE_9 = 0xC0094806
logger = logging.getLogger(__name__)
def _convert_value(sensor, value):
"""Apply Conversion of value dending on sensor type"""
if sensor == CO2METER_TEMP:
return round(value / 16.0 - 273.1, 1)
if sensor == CO2METER_HUM:
return round(value / 100.0, 1)
return value
def _hd(data):
"""Helper function for printing the raw data"""
return " ".join("%02X" % e for e in data)
class CO2Meter(threading.Thread):
_key = [0xC4, 0xC6, 0xC0, 0x92, 0x40, 0x23, 0xDC, 0x96]
_device = ""
_values = {}
_file = ""
running = True
_callback = None
def __init__(self, device="/dev/co2mini0", callback=None):
super().__init__(daemon=True)
self._device = device
self._callback = callback
self._file = open(device, "a+b", 0)
set_report = [0] + self._key
fcntl.ioctl(self._file, HIDIOCSFEATURE_9, bytearray(set_report))
def run(self):
while self.running:
self._read_data()
def _read_data(self):
"""
Function that reads from the device, decodes it, validates the checksum
and adds the data to the dict _values.
Additionally calls the _callback if set
"""
try:
data = list(self._file.read(8))
decrypted = self._decrypt(data)
if decrypted[4] != 0x0D or (sum(decrypted[:3]) & 0xFF) != decrypted[3]:
logger.error("Checksum error: %s => %s", _hd(data), _hd(decrypted))
else:
operation = decrypted[0]
val = decrypted[1] << 8 | decrypted[2]
self._values[operation] = _convert_value(operation, val)
if self._callback is not None:
if operation in {CO2METER_CO2, CO2METER_TEMP} or (
operation == CO2METER_HUM and val != 0
):
self._callback(sensor=operation, value=self._values[operation])
except Exception:
logger.exception("Exception reading data")
self.running = False
def _decrypt(self, data):
"""
The received data has some weak crypto that needs to be decoded first
"""
cstate = [0x48, 0x74, 0x65, 0x6D, 0x70, 0x39, 0x39, 0x65]
shuffle = [2, 4, 0, 7, 1, 6, 5, 3]
phase1 = [0] * 8
for i, j in enumerate(shuffle):
phase1[j] = data[i]
phase2 = [0] * 8
for i in range(8):
phase2[i] = phase1[i] ^ self._key[i]
phase3 = [0] * 8
for i in range(8):
phase3[i] = ((phase2[i] >> 3) | (phase2[(i - 1 + 8) % 8] << 5)) & 0xFF
ctmp = [0] * 8
for i in range(8):
ctmp[i] = ((cstate[i] >> 4) | (cstate[i] << 4)) & 0xFF
out = [0] * 8
for i in range(8):
out[i] = (0x100 + phase3[i] - ctmp[i]) & 0xFF
return out
def get_co2(self):
"""
read the co2 value from _values
:returns dict with value or empty
"""
if not self.running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_CO2 in self._values:
result = {"co2": self._values[CO2METER_CO2]}
return result
def get_temperature(self):
"""
reads the temperature from _values
:returns dict with value or empty
"""
if not self.running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_TEMP in self._values:
result = {"temperature": self._values[CO2METER_TEMP]}
return result
def get_humidity(self): # not implemented by all devices
"""
reads the humidty from _values.
not all devices support this but might still return a value 0.
So values of 0 are discarded.
:returns dict with value or empty
"""
if not self.running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_HUM in self._values and self._values[CO2METER_HUM] != 0:
result = {"humidity": self._values[CO2METER_HUM]}
return result
def get_data(self):
"""
get all currently available values
:returns dict with value or empty
"""
result = {}
result.update(self.get_co2())
result.update(self.get_temperature())
result.update(self.get_humidity())
return result
```
|
{
"source": "jerr0328/HAP-python",
"score": 3
}
|
#### File: HAP-python/accessories/MotionSensor.py
```python
import random
import RPi.GPIO as GPIO
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_SENSOR
class MotionSensor(Accessory):
category = CATEGORY_SENSOR
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
serv_motion = self.add_preload_service('MotionSensor')
self.char_detected = serv_motion.configure_char('MotionDetected')
GPIO.setmode(GPIO.BCM)
GPIO.setup(7, GPIO.IN)
GPIO.add_event_detect(7, GPIO.RISING, callback=self._detected)
def _detected(self, _pin):
self.char_detected.set_value(True)
def stop(self):
super().stop()
GPIO.cleanup()
```
#### File: jerr0328/HAP-python/busy_home.py
```python
import logging
import signal
import random
from pyhap.accessory import Accessory, Bridge
from pyhap.accessory_driver import AccessoryDriver
from pyhap.const import (CATEGORY_FAN,
CATEGORY_LIGHTBULB,
CATEGORY_GARAGE_DOOR_OPENER,
CATEGORY_SENSOR)
logging.basicConfig(level=logging.INFO, format="[%(module)s] %(message)s")
class TemperatureSensor(Accessory):
"""Fake Temperature sensor, measuring every 3 seconds."""
category = CATEGORY_SENSOR
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
serv_temp = self.add_preload_service('TemperatureSensor')
self.char_temp = serv_temp.configure_char('CurrentTemperature')
@Accessory.run_at_interval(3)
async def run(self):
self.char_temp.set_value(random.randint(18, 26))
class FakeFan(Accessory):
"""Fake Fan, only logs whatever the client set."""
category = CATEGORY_FAN
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Add the fan service. Also add optional characteristics to it.
serv_fan = self.add_preload_service(
'Fan', chars=['RotationSpeed', 'RotationDirection'])
self.char_rotation_speed = serv_fan.configure_char(
'RotationSpeed', setter_callback=self.set_rotation_speed)
self.char_rotation_direction = serv_fan.configure_char(
'RotationDirection', setter_callback=self.set_rotation_direction)
def set_rotation_speed(self, value):
logging.debug("Rotation speed changed: %s", value)
def set_rotation_direction(self, value):
logging.debug("Rotation direction changed: %s", value)
class LightBulb(Accessory):
"""Fake lightbulb, logs what the client sets."""
category = CATEGORY_LIGHTBULB
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
serv_light = self.add_preload_service('Lightbulb')
self.char_on = serv_light.configure_char(
'On', setter_callback=self.set_bulb)
def set_bulb(self, value):
logging.info("Bulb value: %s", value)
class GarageDoor(Accessory):
"""Fake garage door."""
category = CATEGORY_GARAGE_DOOR_OPENER
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_preload_service('GarageDoorOpener')\
.configure_char(
'TargetDoorState', setter_callback=self.change_state)
def change_state(self, value):
logging.info("Bulb value: %s", value)
self.get_service('GarageDoorOpener')\
.get_characteristic('CurrentDoorState')\
.set_value(value)
def get_bridge(driver):
bridge = Bridge(driver, 'Bridge')
bridge.add_accessory(LightBulb(driver, 'Lightbulb'))
bridge.add_accessory(FakeFan(driver, 'Big Fan'))
bridge.add_accessory(GarageDoor(driver, 'Garage'))
bridge.add_accessory(TemperatureSensor(driver, 'Sensor'))
return bridge
driver = AccessoryDriver(port=51826, persist_file='busy_home.state')
driver.add_accessory(accessory=get_bridge(driver))
signal.signal(signal.SIGTERM, driver.signal_handler)
driver.start()
```
#### File: HAP-python/pyhap/camera.py
```python
import asyncio
import functools
import os
import ipaddress
import logging
import struct
from uuid import UUID
from pyhap import RESOURCE_DIR
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_CAMERA
from pyhap.util import to_base64_str, byte_bool
from pyhap import tlv
SETUP_TYPES = {
'SESSION_ID': b'\x01',
'STATUS': b'\x02',
'ADDRESS': b'\x03',
'VIDEO_SRTP_PARAM': b'\x04',
'AUDIO_SRTP_PARAM': b'\x05',
'VIDEO_SSRC': b'\x06',
'AUDIO_SSRC': b'\x07'
}
SETUP_STATUS = {
'SUCCESS': b'\x00',
'BUSY': b'\x01',
'ERROR': b'\x02'
}
SETUP_IPV = {
'IPV4': b'\x00',
'IPV6': b'\x01'
}
SETUP_ADDR_INFO = {
'ADDRESS_VER': b'\x01',
'ADDRESS': b'\x02',
'VIDEO_RTP_PORT': b'\x03',
'AUDIO_RTP_PORT': b'\x04'
}
SETUP_SRTP_PARAM = {
'CRYPTO': b'\x01',
'MASTER_KEY': b'\x02',
'MASTER_SALT': b'\x03'
}
STREAMING_STATUS = {
'AVAILABLE': b'\x00',
'STREAMING': b'\x01',
'BUSY': b'\x02'
}
RTP_CONFIG_TYPES = {
'CRYPTO': b'\x02'
}
SRTP_CRYPTO_SUITES = {
'AES_CM_128_HMAC_SHA1_80': b'\x00',
'AES_CM_256_HMAC_SHA1_80': b'\x01',
'NONE': b'\x02'
}
VIDEO_TYPES = {
'CODEC': b'\x01',
'CODEC_PARAM': b'\x02',
'ATTRIBUTES': b'\x03',
'RTP_PARAM': b'\x04'
}
VIDEO_CODEC_TYPES = {
'H264': b'\x00'
}
VIDEO_CODEC_PARAM_TYPES = {
'PROFILE_ID': b'\x01',
'LEVEL': b'\x02',
'PACKETIZATION_MODE': b'\x03',
'CVO_ENABLED': b'\x04',
'CVO_ID': b'\x05'
}
VIDEO_CODEC_PARAM_CVO_TYPES = {
'UNSUPPORTED': b'\x01',
'SUPPORTED': b'\x02'
}
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES = {
'BASELINE': b'\x00',
'MAIN': b'\x01',
'HIGH': b'\x02'
}
VIDEO_CODEC_PARAM_LEVEL_TYPES = {
'TYPE3_1': b'\x00',
'TYPE3_2': b'\x01',
'TYPE4_0': b'\x02'
}
VIDEO_CODEC_PARAM_PACKETIZATION_MODE_TYPES = {
'NON_INTERLEAVED': b'\x00'
}
VIDEO_ATTRIBUTES_TYPES = {
'IMAGE_WIDTH': b'\x01',
'IMAGE_HEIGHT': b'\x02',
'FRAME_RATE': b'\x03'
}
SUPPORTED_VIDEO_CONFIG_TAG = b'\x01'
SELECTED_STREAM_CONFIGURATION_TYPES = {
'SESSION': b'\x01',
'VIDEO': b'\x02',
'AUDIO': b'\x03'
}
RTP_PARAM_TYPES = {
'PAYLOAD_TYPE': b'\x01',
'SYNCHRONIZATION_SOURCE': b'\x02',
'MAX_BIT_RATE': b'\x03',
'RTCP_SEND_INTERVAL': b'\x04',
'MAX_MTU': b'\x05',
'COMFORT_NOISE_PAYLOAD_TYPE': b'\x06'
}
AUDIO_TYPES = {
'CODEC': b'\x01',
'CODEC_PARAM': b'\x02',
'RTP_PARAM': b'\x03',
'COMFORT_NOISE': b'\x04'
}
AUDIO_CODEC_TYPES = {
'PCMU': b'\x00',
'PCMA': b'\x01',
'AACELD': b'\x02',
'OPUS': b'\x03'
}
AUDIO_CODEC_PARAM_TYPES = {
'CHANNEL': b'\x01',
'BIT_RATE': b'\x02',
'SAMPLE_RATE': b'\x03',
'PACKET_TIME': b'\x04'
}
AUDIO_CODEC_PARAM_BIT_RATE_TYPES = {
'VARIABLE': b'\x00',
'CONSTANT': b'\x01'
}
AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES = {
'KHZ_8': b'\x00',
'KHZ_16': b'\x01',
'KHZ_24': b'\x02'
}
SUPPORTED_AUDIO_CODECS_TAG = b'\x01'
SUPPORTED_COMFORT_NOISE_TAG = b'\x02'
SUPPORTED_AUDIO_CONFIG_TAG = b'\x02'
SET_CONFIG_REQUEST_TAG = b'\x02'
SESSION_ID = b'\x01'
NO_SRTP = b'\x01\x01\x02\x02\x00\x03\x00'
'''Configuration value for no SRTP.'''
FFMPEG_CMD = (
# pylint: disable=bad-continuation
'ffmpeg -re -f avfoundation -framerate {fps} -i 0:0 -threads 0 '
'-vcodec libx264 -an -pix_fmt yuv420p -r {fps} -f rawvideo -tune zerolatency '
'-vf scale={width}:{height} -b:v {v_max_bitrate}k -bufsize {v_max_bitrate}k '
'-payload_type 99 -ssrc {v_ssrc} -f rtp '
'-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {v_srtp_key} '
'srtp://{address}:{v_port}?rtcpport={v_port}&'
'localrtcpport={v_port}&pkt_size=1378'
)
'''Template for the ffmpeg command.'''
logger = logging.getLogger(__name__)
class Camera(Accessory):
"""An Accessory that can negotiated camera stream settings with iOS and start a
stream.
"""
category = CATEGORY_CAMERA
@staticmethod
def get_supported_rtp_config(support_srtp):
"""Return a tlv representation of the RTP configuration we support.
SRTP support allows only the AES_CM_128_HMAC_SHA1_80 cipher for now.
:param support_srtp: True if SRTP is supported, False otherwise.
:type support_srtp: bool
"""
if support_srtp:
crypto = SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80']
else:
crypto = SRTP_CRYPTO_SUITES['NONE']
return tlv.encode(RTP_CONFIG_TYPES['CRYPTO'], crypto, to_base64=True)
@staticmethod
def get_supported_video_stream_config(video_params):
"""Return a tlv representation of the supported video stream configuration.
Expected video parameters:
- codec
- resolutions
:param video_params: Supported video configurations
:type video_params: dict
"""
codec_params_tlv = tlv.encode(
VIDEO_CODEC_PARAM_TYPES['PACKETIZATION_MODE'],
VIDEO_CODEC_PARAM_PACKETIZATION_MODE_TYPES['NON_INTERLEAVED'])
codec_params = video_params['codec']
for profile in codec_params['profiles']:
codec_params_tlv += \
tlv.encode(VIDEO_CODEC_PARAM_TYPES['PROFILE_ID'], profile)
for level in codec_params['levels']:
codec_params_tlv += \
tlv.encode(VIDEO_CODEC_PARAM_TYPES['LEVEL'], level)
attr_tlv = b''
for resolution in video_params['resolutions']:
res_tlv = tlv.encode(
VIDEO_ATTRIBUTES_TYPES['IMAGE_WIDTH'], struct.pack('<H', resolution[0]),
VIDEO_ATTRIBUTES_TYPES['IMAGE_HEIGHT'], struct.pack('<H', resolution[1]),
VIDEO_ATTRIBUTES_TYPES['FRAME_RATE'], struct.pack('<H', resolution[2]))
attr_tlv += tlv.encode(VIDEO_TYPES['ATTRIBUTES'], res_tlv)
config_tlv = tlv.encode(VIDEO_TYPES['CODEC'], VIDEO_CODEC_TYPES['H264'],
VIDEO_TYPES['CODEC_PARAM'], codec_params_tlv)
return tlv.encode(SUPPORTED_VIDEO_CONFIG_TAG, config_tlv + attr_tlv,
to_base64=True)
@staticmethod
def get_supported_audio_stream_config(audio_params):
"""Return a tlv representation of the supported audio stream configuration.
iOS supports only AACELD and OPUS
Expected audio parameters:
- codecs
- comfort_noise
:param audio_params: Supported audio configurations
:type audio_params: dict
"""
has_supported_codec = False
configs = b''
for codec_param in audio_params['codecs']:
param_type = codec_param['type']
if param_type == 'OPUS':
has_supported_codec = True
codec = AUDIO_CODEC_TYPES['OPUS']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
elif param_type == 'AAC-eld':
has_supported_codec = True
codec = AUDIO_CODEC_TYPES['AACELD']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
else:
logger.warning('Unsupported codec %s', param_type)
continue
param_samplerate = codec_param['samplerate']
if param_samplerate == 8:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_8']
elif param_samplerate == 16:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_16']
elif param_samplerate == 24:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_24']
else:
logger.warning('Unsupported sample rate %s', param_samplerate)
continue
param_tlv = tlv.encode(AUDIO_CODEC_PARAM_TYPES['CHANNEL'], b'\x01',
AUDIO_CODEC_PARAM_TYPES['BIT_RATE'], bitrate,
AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE'], samplerate)
config_tlv = tlv.encode(AUDIO_TYPES['CODEC'], codec,
AUDIO_TYPES['CODEC_PARAM'], param_tlv)
configs += tlv.encode(SUPPORTED_AUDIO_CODECS_TAG, config_tlv)
if not has_supported_codec:
logger.warning('Client does not support any audio codec that iOS supports.')
codec = AUDIO_CODEC_TYPES['OPUS']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_24']
param_tlv = tlv.encode(
AUDIO_CODEC_PARAM_TYPES['CHANNEL'], b'\x01',
AUDIO_CODEC_PARAM_TYPES['BIT_RATE'], bitrate,
AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE'], samplerate)
config_tlv = tlv.encode(AUDIO_TYPES['CODEC'], codec,
AUDIO_TYPES['CODEC_PARAM'], param_tlv)
configs = tlv.encode(SUPPORTED_AUDIO_CODECS_TAG, config_tlv)
comfort_noise = byte_bool(
audio_params.get('comfort_noise', False))
audio_config = to_base64_str(
configs + tlv.encode(SUPPORTED_COMFORT_NOISE_TAG, comfort_noise))
return audio_config
def __init__(self, options, *args, **kwargs):
"""Initialize a camera accessory with the given options.
:param options: Describes the supported video and audio configuration
of this camera. Expected values are video, audio, srtp and address.
Example configuration:
.. code-block:: python
{
"video": {
"codec": {
"profiles": [
camera.VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["BASELINE"],
],
"levels": [
camera.VIDEO_CODEC_PARAM_LEVEL_TYPES['TYPE3_1'],
],
},
"resolutions": [
[320, 240, 15], # Width, Height, framerate
[1024, 768, 30],
[640, 480, 30],
[640, 360, 30],
[480, 360, 30],
[480, 270, 30],
[320, 240, 30],
[320, 180, 30],
],
},
"audio": {
"codecs": [
{
'type': 'OPUS',
'samplerate': 24,
},
{
'type': 'AAC-eld',
'samplerate': 16
}
],
},
"address": "192.168.1.226", # Address from which the camera will stream
}
Additional optional values are:
- srtp - boolean, defaults to False. Whether the camera supports SRTP.
- start_stream_cmd - string specifying the command to be executed to start
the stream. The string can contain the keywords, corresponding to the
video and audio configuration that was negotiated between the camera
and the client. See the ``start`` method for a full list of parameters.
:type options: ``dict``
"""
self.streaming_status = STREAMING_STATUS['AVAILABLE']
self.has_srtp = options.get('srtp', False)
self.start_stream_cmd = options.get('start_stream_cmd', FFMPEG_CMD)
self.stream_address = options['address']
try:
ipaddress.IPv4Address(self.stream_address)
self.stream_address_isv6 = b'\x00'
except ValueError:
self.stream_address_isv6 = b'\x01'
self.sessions = {}
super().__init__(*args, **kwargs)
self.add_preload_service('Microphone')
management = self.add_preload_service('CameraRTPStreamManagement')
management.configure_char('StreamingStatus',
getter_callback=self._get_streaming_status)
management.configure_char('SupportedRTPConfiguration',
value=self.get_supported_rtp_config(
options.get('srtp', False)))
management.configure_char('SupportedVideoStreamConfiguration',
value=self.get_supported_video_stream_config(
options['video']))
management.configure_char('SupportedAudioStreamConfiguration',
value=self.get_supported_audio_stream_config(
options['audio']))
management.configure_char('SelectedRTPStreamConfiguration',
setter_callback=self.set_selected_stream_configuration)
management.configure_char('SetupEndpoints',
setter_callback=self.set_endpoints)
async def _start_stream(self, objs, reconfigure): # pylint: disable=unused-argument
"""Start or reconfigure video streaming for the given session.
Schedules ``self.start_stream`` or ``self.reconfigure``.
No support for reconfigure currently.
:param objs: TLV-decoded SelectedRTPStreamConfiguration
:type objs: ``dict``
:param reconfigure: Whether the stream should be reconfigured instead of
started.
:type reconfigure: bool
"""
video_tlv = objs.get(SELECTED_STREAM_CONFIGURATION_TYPES['VIDEO'])
audio_tlv = objs.get(SELECTED_STREAM_CONFIGURATION_TYPES['AUDIO'])
opts = {}
if video_tlv:
video_objs = tlv.decode(video_tlv)
video_codec_params = video_objs.get(VIDEO_TYPES['CODEC_PARAM'])
if video_codec_params:
video_codec_param_objs = tlv.decode(video_codec_params)
opts['v_profile_id'] = \
video_codec_param_objs[VIDEO_CODEC_PARAM_TYPES['PROFILE_ID']]
opts['v_level'] = \
video_codec_param_objs[VIDEO_CODEC_PARAM_TYPES['LEVEL']]
video_attrs = video_objs.get(VIDEO_TYPES['ATTRIBUTES'])
if video_attrs:
video_attr_objs = tlv.decode(video_attrs)
opts['width'] = struct.unpack('<H',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['IMAGE_WIDTH']])[0]
opts['height'] = struct.unpack('<H',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['IMAGE_HEIGHT']])[0]
opts['fps'] = struct.unpack('<B',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['FRAME_RATE']])[0]
video_rtp_param = video_objs.get(VIDEO_TYPES['RTP_PARAM'])
if video_rtp_param:
video_rtp_param_objs = tlv.decode(video_rtp_param)
if RTP_PARAM_TYPES['SYNCHRONIZATION_SOURCE'] in video_rtp_param_objs:
opts['v_ssrc'] = struct.unpack('<I',
video_rtp_param_objs.get(
RTP_PARAM_TYPES['SYNCHRONIZATION_SOURCE']))[0]
if RTP_PARAM_TYPES['PAYLOAD_TYPE'] in video_rtp_param_objs:
opts['v_payload_type'] = \
video_rtp_param_objs.get(RTP_PARAM_TYPES['PAYLOAD_TYPE'])
if RTP_PARAM_TYPES['MAX_BIT_RATE'] in video_rtp_param_objs:
opts['v_max_bitrate'] = struct.unpack('<H',
video_rtp_param_objs.get(RTP_PARAM_TYPES['MAX_BIT_RATE']))[0]
if RTP_PARAM_TYPES['RTCP_SEND_INTERVAL'] in video_rtp_param_objs:
opts['v_rtcp_interval'] = struct.unpack('<f',
video_rtp_param_objs.get(RTP_PARAM_TYPES['RTCP_SEND_INTERVAL']))[0]
if RTP_PARAM_TYPES['MAX_MTU'] in video_rtp_param_objs:
opts['v_max_mtu'] = video_rtp_param_objs.get(RTP_PARAM_TYPES['MAX_MTU'])
if audio_tlv:
audio_objs = tlv.decode(audio_tlv)
opts['a_codec'] = audio_objs[AUDIO_TYPES['CODEC']]
audio_codec_param_objs = tlv.decode(
audio_objs[AUDIO_TYPES['CODEC_PARAM']])
audio_rtp_param_objs = tlv.decode(
audio_objs[AUDIO_TYPES['RTP_PARAM']])
opts['a_comfort_noise'] = audio_objs[AUDIO_TYPES['COMFORT_NOISE']]
opts['a_channel'] = \
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['CHANNEL']][0]
opts['a_bitrate'] = struct.unpack('?',
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['BIT_RATE']])[0]
opts['a_sample_rate'] = 8 * (
1 + audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE']][0])
opts['a_packet_time'] = struct.unpack('<B',
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['PACKET_TIME']])[0]
opts['a_ssrc'] = struct.unpack('<I',
audio_rtp_param_objs[RTP_PARAM_TYPES['SYNCHRONIZATION_SOURCE']])[0]
opts['a_payload_type'] = audio_rtp_param_objs[RTP_PARAM_TYPES['PAYLOAD_TYPE']]
opts['a_max_bitrate'] = struct.unpack('<H',
audio_rtp_param_objs[RTP_PARAM_TYPES['MAX_BIT_RATE']])[0]
opts['a_rtcp_interval'] = struct.unpack('<f',
audio_rtp_param_objs[RTP_PARAM_TYPES['RTCP_SEND_INTERVAL']])[0]
opts['a_comfort_payload_type'] = \
audio_rtp_param_objs[RTP_PARAM_TYPES['COMFORT_NOISE_PAYLOAD_TYPE']]
session_objs = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
session_id = UUID(bytes=session_objs[SETUP_TYPES['SESSION_ID']])
session_info = self.sessions[session_id]
opts.update(session_info)
success = await self.reconfigure_stream(session_info, opts) if reconfigure \
else await self.start_stream(session_info, opts)
if success:
self.streaming_status = STREAMING_STATUS['STREAMING']
else:
logger.error(
'[%s] Failed to start/reconfigure stream, deleting session.',
session_id
)
del self.sessions[session_id]
self.streaming_status = STREAMING_STATUS['AVAILABLE']
def _get_streaming_status(self):
"""Get the streaming status in TLV format.
Called when iOS reads the StreaminStatus ``Characteristic``.
"""
return tlv.encode(b'\x01', self.streaming_status, to_base64=True)
async def _stop_stream(self, objs):
"""Stop the stream for the specified session.
Schedules ``self.stop_stream``.
:param objs: TLV-decoded SelectedRTPStreamConfiguration value.
:param objs: ``dict``
"""
session_objs = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
session_id = UUID(bytes=session_objs[SETUP_TYPES['SESSION_ID']])
session_info = self.sessions.get(session_id)
if not session_info:
logger.error(
'Requested to stop stream for session %s, but no '
'such session was found',
session_id
)
return
await self.stop_stream(session_info)
del self.sessions[session_id]
self.streaming_status = STREAMING_STATUS['AVAILABLE']
def set_selected_stream_configuration(self, value):
"""Set the selected stream configuration.
Called from iOS to set the SelectedRTPStreamConfiguration ``Characteristic``.
This method schedules a stream for the session in ``value`` to be start, stopped
or reconfigured, depending on the request.
:param value: base64-encoded selected configuration in TLV format
:type value: ``str``
"""
logger.debug('set_selected_stream_config - value - %s', value)
objs = tlv.decode(value, from_base64=True)
if SELECTED_STREAM_CONFIGURATION_TYPES['SESSION'] not in objs:
logger.error('Bad request to set selected stream configuration.')
return
session = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
request_type = session[b'\x02'][0]
logger.debug('Set stream config request: %d', request_type)
if request_type == 1:
job = functools.partial(self._start_stream, reconfigure=False)
elif request_type == 0:
job = self._stop_stream
elif request_type == 4:
job = functools.partial(self._start_stream, reconfigure=True)
else:
logger.error('Unknown request type %d', request_type)
return
self.driver.add_job(job, objs)
def set_endpoints(self, value):
"""Configure streaming endpoints.
Called when iOS sets the SetupEndpoints ``Characteristic``. The endpoint
information for the camera should be set as the current value of SetupEndpoints.
:param value: The base64-encoded stream session details in TLV format.
:param value: ``str``
"""
objs = tlv.decode(value, from_base64=True)
session_id = UUID(bytes=objs[SETUP_TYPES['SESSION_ID']])
# Extract address info
address_tlv = objs[SETUP_TYPES['ADDRESS']]
address_info_objs = tlv.decode(address_tlv)
is_ipv6 = struct.unpack('?',
address_info_objs[SETUP_ADDR_INFO['ADDRESS_VER']])[0]
address = address_info_objs[SETUP_ADDR_INFO['ADDRESS']].decode('utf8')
target_video_port = struct.unpack(
'<H', address_info_objs[SETUP_ADDR_INFO['VIDEO_RTP_PORT']])[0]
target_audio_port = struct.unpack(
'<H', address_info_objs[SETUP_ADDR_INFO['AUDIO_RTP_PORT']])[0]
# Video SRTP Params
video_srtp_tlv = objs[SETUP_TYPES['VIDEO_SRTP_PARAM']]
video_info_objs = tlv.decode(video_srtp_tlv)
video_crypto_suite = video_info_objs[SETUP_SRTP_PARAM['CRYPTO']][0]
video_master_key = video_info_objs[SETUP_SRTP_PARAM['MASTER_KEY']]
video_master_salt = video_info_objs[SETUP_SRTP_PARAM['MASTER_SALT']]
# Audio SRTP Params
audio_srtp_tlv = objs[SETUP_TYPES['AUDIO_SRTP_PARAM']]
audio_info_objs = tlv.decode(audio_srtp_tlv)
audio_crypto_suite = audio_info_objs[SETUP_SRTP_PARAM['CRYPTO']][0]
audio_master_key = audio_info_objs[SETUP_SRTP_PARAM['MASTER_KEY']]
audio_master_salt = audio_info_objs[SETUP_SRTP_PARAM['MASTER_SALT']]
logger.debug(
'Received endpoint configuration:'
'\nsession_id: %s\naddress: %s\nis_ipv6: %s'
'\ntarget_video_port: %s\ntarget_audio_port: %s'
'\nvideo_crypto_suite: %s\nvideo_srtp: %s'
'\naudio_crypto_suite: %s\naudio_srtp: %s',
session_id, address, is_ipv6, target_video_port, target_audio_port,
video_crypto_suite,
to_base64_str(video_master_key + video_master_salt),
audio_crypto_suite,
to_base64_str(audio_master_key + audio_master_salt)
)
# Configure the SetupEndpoints response
if self.has_srtp:
video_srtp_tlv = tlv.encode(
SETUP_SRTP_PARAM['CRYPTO'], SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80'],
SETUP_SRTP_PARAM['MASTER_KEY'], video_master_key,
SETUP_SRTP_PARAM['MASTER_SALT'], video_master_salt)
audio_srtp_tlv = tlv.encode(
SETUP_SRTP_PARAM['CRYPTO'], SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80'],
SETUP_SRTP_PARAM['MASTER_KEY'], audio_master_key,
SETUP_SRTP_PARAM['MASTER_SALT'], audio_master_salt)
else:
video_srtp_tlv = NO_SRTP
audio_srtp_tlv = NO_SRTP
video_ssrc = int.from_bytes(os.urandom(3), byteorder="big")
audio_ssrc = int.from_bytes(os.urandom(3), byteorder="big")
res_address_tlv = tlv.encode(
SETUP_ADDR_INFO['ADDRESS_VER'], self.stream_address_isv6,
SETUP_ADDR_INFO['ADDRESS'], self.stream_address.encode('utf-8'),
SETUP_ADDR_INFO['VIDEO_RTP_PORT'], struct.pack('<H', target_video_port),
SETUP_ADDR_INFO['AUDIO_RTP_PORT'], struct.pack('<H', target_audio_port))
response_tlv = tlv.encode(
SETUP_TYPES['SESSION_ID'], session_id.bytes,
SETUP_TYPES['STATUS'], SETUP_STATUS['SUCCESS'],
SETUP_TYPES['ADDRESS'], res_address_tlv,
SETUP_TYPES['VIDEO_SRTP_PARAM'], video_srtp_tlv,
SETUP_TYPES['AUDIO_SRTP_PARAM'], audio_srtp_tlv,
SETUP_TYPES['VIDEO_SSRC'], struct.pack('<I', video_ssrc),
SETUP_TYPES['AUDIO_SSRC'], struct.pack('<I', audio_ssrc),
to_base64=True)
self.sessions[session_id] = {
'id': session_id,
'address': address,
'v_port': target_video_port,
'v_srtp_key': to_base64_str(video_master_key + video_master_salt),
'v_ssrc': video_ssrc,
'a_port': target_audio_port,
'a_srtp_key': to_base64_str(audio_master_key + audio_master_salt),
'a_ssrc': audio_ssrc
}
self.get_service('CameraRTPStreamManagement')\
.get_characteristic('SetupEndpoints')\
.set_value(response_tlv)
async def stop(self):
"""Stop all streaming sessions."""
await asyncio.gather(*(
self.stop_stream(session_info) for session_info in self.sessions.values()))
# ### For client extensions ###
async def start_stream(self, session_info, stream_config):
"""Start a new stream with the given configuration.
This method can be implemented to start a new stream. Any specific information
about the started stream can be persisted in the ``session_info`` argument.
The same will be passed to ``stop_stream`` when the stream for this session
needs to be stopped.
The default implementation starts a new process with the command in
``self.start_stream_cmd``, formatted with the ``stream_config``.
:param session_info: Contains information about the current session. Can be used
for session storage. Available keys:
- id - The session ID.
:type session_info: ``dict``
:param stream_config: Stream configuration, as negotiated with the HAP client.
Implementations can only use part of these. Available keys:
General configuration:
- address - The IP address from which the camera will stream
- v_port - Remote port to which to stream video
- v_srtp_key - Base64-encoded key and salt value for the
AES_CM_128_HMAC_SHA1_80 cipher to use when streaming video.
The key and the salt are concatenated before encoding
- a_port - Remote audio port to which to stream audio
- a_srtp_key - As v_srtp_params, but for the audio stream.
Video configuration:
- v_profile_id - The profile ID for the H.264 codec, e.g. baseline.
Refer to ``VIDEO_CODEC_PARAM_PROFILE_ID_TYPES``.
- v_level - The level in the profile ID, e.g. 3:1.
Refer to ``VIDEO_CODEC_PARAM_LEVEL_TYPES``.
- width - Video width
- height - Video height
- fps - Video frame rate
- v_ssrc - Video synchronisation source
- v_payload_type - Type of the video codec
- v_max_bitrate - Maximum bit rate generated by the codec in kbps
and averaged over 1 second
- v_rtcp_interval - Minimum RTCP interval in seconds
- v_max_mtu - MTU that the IP camera must use to transmit
Video RTP packets.
Audio configuration:
- a_bitrate - Whether the bitrate is variable or constant
- a_codec - Audio codec
- a_comfort_noise - Wheter to use a comfort noise codec
- a_channel - Number of audio channels
- a_sample_rate - Audio sample rate in KHz
- a_packet_time - Length of time represented by the media in a packet
- a_ssrc - Audio synchronisation source
- a_payload_type - Type of the audio codec
- a_max_bitrate - Maximum bit rate generated by the codec in kbps
and averaged over 1 second
- a_rtcp_interval - Minimum RTCP interval in seconds
- a_comfort_payload_type - The type of codec for comfort noise
:return: True if and only if starting the stream command was successful.
:rtype: ``bool``
"""
logger.debug(
'[%s] Starting stream with the following parameters: %s',
session_info['id'],
stream_config
)
cmd = self.start_stream_cmd.format(**stream_config).split()
logger.debug('Executing start stream command: "%s"', ' '.join(cmd))
try:
process = await asyncio.create_subprocess_exec(*cmd,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.PIPE,
limit=1024)
except Exception as e: # pylint: disable=broad-except
logger.error('Failed to start streaming process because of error: %s', e)
return False
session_info['process'] = process
logger.info(
'[%s] Started stream process - PID %d',
session_info['id'],
process.pid
)
return True
async def stop_stream(self, session_info): # pylint: disable=no-self-use
"""Stop the stream for the given ``session_id``.
This method can be implemented if custom stop stream commands are needed. The
default implementation gets the ``process`` value from the ``session_info``
object and terminates it (assumes it is a ``subprocess.Popen`` object).
:param session_info: The session info object. Available keys:
- id - The session ID.
:type session_info: ``dict``
"""
session_id = session_info['id']
ffmpeg_process = session_info.get('process')
if ffmpeg_process:
logger.info('[%s] Stopping stream.', session_id)
try:
ffmpeg_process.terminate()
_, stderr = await asyncio.wait_for(
ffmpeg_process.communicate(), timeout=2.0)
logger.debug('Stream command stderr: %s', stderr)
except asyncio.TimeoutError:
logger.error(
'Timeout while waiting for the stream process '
'to terminate. Trying with kill.'
)
ffmpeg_process.kill()
await ffmpeg_process.wait()
logger.debug('Stream process stopped.')
else:
logger.warning('No process for session ID %s', session_id)
async def reconfigure_stream(self, session_info, stream_config):
"""Reconfigure the stream so that it uses the given ``stream_config``.
:param session_info: The session object for the session that needs to
be reconfigured. Available keys:
- id - The session id.
:type session_id: ``dict``
:return: True if and only if the reconfiguration is successful.
:rtype: ``bool``
"""
await self.start_stream(session_info, stream_config)
def get_snapshot(self, image_size): # pylint: disable=unused-argument, no-self-use
"""Return a jpeg of a snapshot from the camera.
Overwrite to implement getting snapshots from your camera.
:param image_size: ``dict`` describing the requested image size. Contains the
keys "image-width" and "image-height"
"""
with open(os.path.join(RESOURCE_DIR, 'snapshot.jpg'), 'rb') as fp:
return fp.read()
```
#### File: HAP-python/tests/test_state.py
```python
from unittest.mock import patch
import pytest
from pyhap.state import State
def test_setup():
"""Test if State class is setup correctly."""
with pytest.raises(TypeError):
State('invalid_argument')
addr = '172.0.0.1'
mac = '00:00:00:00:00:00'
pin = b'123-45-678'
port = 11111
with patch('pyhap.util.get_local_address') as mock_local_addr, \
patch('pyhap.util.generate_mac') as mock_gen_mac, \
patch('pyhap.util.generate_pincode') as mock_gen_pincode, \
patch('pyhap.util.generate_setup_id') as mock_gen_setup_id, \
patch('ed25519.create_keypair', return_value=(1, 2)) \
as mock_create_keypair:
state = State(address=addr, mac=mac, pincode=pin, port=port)
assert not mock_local_addr.called
assert not mock_gen_mac.called
assert not mock_gen_pincode.called
assert mock_gen_setup_id.called
assert mock_create_keypair.called
assert state.address == addr
assert state.mac == mac
assert state.pincode == pin
assert state.port == port
state = State()
assert mock_local_addr.called
assert mock_gen_mac.called
assert mock_gen_pincode.called
assert state.port == 51827
assert state.config_version == 2
def test_pairing():
"""Test if pairing methods work."""
with patch('pyhap.util.get_local_address'), \
patch('pyhap.util.generate_mac'), \
patch('pyhap.util.generate_pincode'), \
patch('pyhap.util.generate_setup_id'), \
patch('ed25519.create_keypair', return_value=(1, 2)):
state = State()
assert not state.paired
assert not state.paired_clients
state.add_paired_client('uuid', 'public')
assert state.paired
assert state.paired_clients == {'uuid': 'public'}
state.remove_paired_client('uuid')
assert not state.paired
assert not state.paired_clients
```
|
{
"source": "jerr0328/isitsnowinginberlin",
"score": 3
}
|
#### File: isitsnowinginberlin/tests/snowing_test.py
```python
import unittest
# import os
# import flask
# from isitsnowinginberlin import isitsnowinginberlin
class SnowingTests(unittest.TestCase):
def test_truthiness(self):
five = 5
self.assertEqual(five, 5)
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jerr0328/sample-python-app",
"score": 2
}
|
#### File: sample-python-app/infrabox/generator.py
```python
import json
import os
from typing import Dict
REGISTRY = os.getenv("DOCKER_REGISTRY", "docker.io/jerr0328")
INFRABOX_GIT_TAG = os.getenv("INFRABOX_GIT_TAG")
INFRABOX_GIT_BRANCH = os.getenv("INFRABOX_GIT_BRANCH")
INFRABOX_GITHUB_PULL_REQUEST = os.getenv("INFRABOX_GITHUB_PULL_REQUEST")
INFRABOX_BUILD_NUMBER = os.getenv("INFRABOX_BUILD_NUMBER")
def generate_deployment(image_name: str, tag: str) -> Dict:
return {
"type": "docker-registry",
"host": REGISTRY,
"repository": image_name,
"tag": tag,
}
def add_deployments(job: Dict):
deployments = job.get("deployments")
if not deployments:
return
image_name = deployments[0]["repository"]
if INFRABOX_GIT_BRANCH == "master":
job["deployments"].append(generate_deployment(image_name, "latest"))
if INFRABOX_GIT_TAG:
job["deployments"].append(generate_deployment(image_name, INFRABOX_GIT_TAG))
def main():
with open("infrabox/infrabox.json") as f:
infrabox_json = json.load(f)
for job in infrabox_json["jobs"]:
add_deployments(job)
with open("/infrabox/output/infrabox.json", "w") as f:
json.dump(infrabox_json, f, indent=2)
if __name__ == "__main__":
main()
```
|
{
"source": "jerradf/Crypto-Blockchain-Project",
"score": 4
}
|
#### File: jerradf/Crypto-Blockchain-Project/blockchain.py
```python
import block
import hashlib
import time
class Blockchain:
def __init__(self, name):
self.name = name
self.chain = []
self.transactions = []
self.nodes = set()
self.construct_genesis_block()
self.bought = {}
self.sold = {}
self.in_circulation = {}
self.last_block = self.chain[-1]
def construct_genesis_block(self):
self.construct_block("Genesis Coin", nonce = 0, prev_hash = 0)
def construct_block(self, name, nonce, prev_hash):
new_block = block.Block(name,
len(self.chain),
nonce,
prev_hash,
self.transactions)
self.chain.append(new_block)
return new_block
def add_transaction(self, name, sender, recipient, quantity):
self.transactions.append({
'name': name,
'sender': sender,
'recipient': recipient,
'quantity': quantity,
})
def buy(self, sender, receiver, quantity, traded_coin_quantity, coin_name):
self.add_transaction(coin_name, sender, receiver, quantity)
self.bought[coin_name] += (quantity/100)
time.sleep(2)
def sell(self, sender, receiver, quantity, traded_coin_quantity, coin_name):
self.add_transaction((-1*coin_name), sender, receiver, quantity)
self.sold[coin_name] += (quantity/100)
time.sleep(2)
def value(self, name):
relation = self.bought[name]/self.sold[name]
val = (pow(1.01, relation)) - 1
return val
def check_validity(self, curr_block: block.Block, prev_block: block.Block):
if curr_block.timestamp <= prev_block.timestamp:
return False
return True
def verifying_proof(proof, last_proof):
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
def proof_of_work(last_proof):
nonce = 0
while Blockchain.verifying_proof(nonce, last_proof) == False:
nonce += 1
return nonce
def mine(self, quantity, name, miner_receiver):
self.add_transaction( name,
"0",
miner_receiver,
quantity,
)
last_block = self.last_block
last_nonce = last_block.nonce
nonce = Blockchain.proof_of_work(last_nonce)
block = None
time.sleep(quantity)
if nonce != 0:
last_hash = last_block.calculate_hash
block = self.construct_block(self.name, nonce, last_hash)
if (self.check_validity(block, last_block) == False):
block = None
self.chain.pop()
else:
self.in_circulation[name] += quantity
return block
def create_node(self, address):
self.nodes.add(address)
```
|
{
"source": "jerradf/Crypto-Blockchain-Project-template",
"score": 2
}
|
#### File: jerradf/Crypto-Blockchain-Project-template/colored_text.py
```python
import colorama
def print_green(s):
print(colorama.Fore.GREEN + s)
def print_white(s):
print(colorama.Fore.WHITE + s)
```
|
{
"source": "jerradgenson/rapid-learn",
"score": 2
}
|
#### File: rapid-learn/tools/preprocessing.py
```python
def normalize(data):
"""
Normalize data set to have zero mean and unit variance.
Args
data: A numpy array of arrays containing input or target data.
Returns
A normalized numpy array of arrays.
"""
return (data - data.mean(axis=0)) / data.var(axis=0)
```
|
{
"source": "jerr/bcc",
"score": 2
}
|
#### File: bcc/tools/runqslower.py
```python
from __future__ import print_function
from bcc import BPF
import argparse
from time import strftime
import ctypes as ct
# arguments
examples = """examples:
./runqslower # trace run queue latency higher than 10000 us (default)
./runqslower 1000 # trace run queue latency higher than 1000 us
./runqslower -p 123 # trace pid 123 only
"""
parser = argparse.ArgumentParser(
description="Trace high run queue latency",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-p", "--pid", type=int, metavar="PID", dest="pid",
help="trace this PID only")
parser.add_argument("min_us", nargs="?", default='10000',
help="minimum run queue latecy to trace, in ms (default 10000)")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
min_us = int(args.min_us)
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#include <linux/nsproxy.h>
#include <linux/pid_namespace.h>
BPF_HASH(start, u32);
struct rq;
struct data_t {
u32 pid;
char task[TASK_COMM_LEN];
u64 delta_us;
};
BPF_PERF_OUTPUT(events);
// record enqueue timestamp
static int trace_enqueue(u32 tgid, u32 pid)
{
if (FILTER_PID || pid == 0)
return 0;
u64 ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
return 0;
}
"""
bpf_text_kprobe = """
int trace_wake_up_new_task(struct pt_regs *ctx, struct task_struct *p)
{
return trace_enqueue(p->tgid, p->pid);
}
int trace_ttwu_do_wakeup(struct pt_regs *ctx, struct rq *rq, struct task_struct *p,
int wake_flags)
{
return trace_enqueue(p->tgid, p->pid);
}
// calculate latency
int trace_run(struct pt_regs *ctx, struct task_struct *prev)
{
u32 pid, tgid;
// ivcsw: treat like an enqueue event and store timestamp
if (prev->state == TASK_RUNNING) {
tgid = prev->tgid;
pid = prev->pid;
if (!(FILTER_PID || pid == 0)) {
u64 ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
}
}
tgid = bpf_get_current_pid_tgid() >> 32;
pid = bpf_get_current_pid_tgid();
u64 *tsp, delta_us;
// fetch timestamp and calculate delta
tsp = start.lookup(&pid);
if (tsp == 0) {
return 0; // missed enqueue
}
delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
if (FILTER_US)
return 0;
struct data_t data = {};
data.pid = pid;
data.delta_us = delta_us;
bpf_get_current_comm(&data.task, sizeof(data.task));
// output
events.perf_submit(ctx, &data, sizeof(data));
start.delete(&pid);
return 0;
}
"""
bpf_text_raw_tp = """
RAW_TRACEPOINT_PROBE(sched_wakeup)
{
// TP_PROTO(struct task_struct *p)
struct task_struct *p = (struct task_struct *)ctx->args[0];
return trace_enqueue(p->tgid, p->pid);
}
RAW_TRACEPOINT_PROBE(sched_wakeup_new)
{
// TP_PROTO(struct task_struct *p)
struct task_struct *p = (struct task_struct *)ctx->args[0];
u32 tgid, pid;
bpf_probe_read(&tgid, sizeof(tgid), &p->tgid);
bpf_probe_read(&pid, sizeof(pid), &p->pid);
return trace_enqueue(tgid, pid);
}
RAW_TRACEPOINT_PROBE(sched_switch)
{
// TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next)
struct task_struct *prev = (struct task_struct *)ctx->args[1];
struct task_struct *next= (struct task_struct *)ctx->args[2];
u32 pid, tgid;
long state;
// ivcsw: treat like an enqueue event and store timestamp
bpf_probe_read(&state, sizeof(long), &prev->state);
if (state == TASK_RUNNING) {
bpf_probe_read(&tgid, sizeof(prev->tgid), &prev->tgid);
bpf_probe_read(&pid, sizeof(prev->pid), &prev->pid);
if (!(FILTER_PID || pid == 0)) {
u64 ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
}
}
bpf_probe_read(&tgid, sizeof(next->tgid), &next->tgid);
bpf_probe_read(&pid, sizeof(next->pid), &next->pid);
u64 *tsp, delta_us;
// fetch timestamp and calculate delta
tsp = start.lookup(&pid);
if (tsp == 0) {
return 0; // missed enqueue
}
delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
if (FILTER_US)
return 0;
struct data_t data = {};
data.pid = pid;
data.delta_us = delta_us;
bpf_get_current_comm(&data.task, sizeof(data.task));
// output
events.perf_submit(ctx, &data, sizeof(data));
start.delete(&pid);
return 0;
}
"""
is_support_raw_tp = BPF.support_raw_tracepoint()
if is_support_raw_tp:
bpf_text += bpf_text_raw_tp
else:
bpf_text += bpf_text_kprobe
# code substitutions
if min_us == 0:
bpf_text = bpf_text.replace('FILTER_US', '0')
else:
bpf_text = bpf_text.replace('FILTER_US', 'delta_us <= %s' % str(min_us))
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
else:
bpf_text = bpf_text.replace('FILTER_PID', '0')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# kernel->user event data: struct data_t
DNAME_INLINE_LEN = 32 # linux/dcache.h
TASK_COMM_LEN = 16 # linux/sched.h
class Data(ct.Structure):
_fields_ = [
("pid", ct.c_uint),
("task", ct.c_char * TASK_COMM_LEN),
("delta_us", ct.c_ulonglong),
]
# process event
def print_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data)).contents
print("%-8s %-16s %-6s %14s" % (strftime("%H:%M:%S"), event.task, event.pid, event.delta_us))
# load BPF program
b = BPF(text=bpf_text)
if not is_support_raw_tp:
b.attach_kprobe(event="ttwu_do_wakeup", fn_name="trace_ttwu_do_wakeup")
b.attach_kprobe(event="wake_up_new_task", fn_name="trace_wake_up_new_task")
b.attach_kprobe(event="finish_task_switch", fn_name="trace_run")
print("Tracing run queue latency higher than %d us" % min_us)
print("%-8s %-16s %-6s %14s" % ("TIME", "COMM", "PID", "LAT(us)"))
# read events
b["events"].open_perf_buffer(print_event, page_cnt=64)
while 1:
b.perf_buffer_poll()
```
#### File: bcc/tools/tplist.py
```python
import argparse
import fnmatch
import os
import re
import sys
from bcc import USDT
trace_root = "/sys/kernel/debug/tracing"
event_root = os.path.join(trace_root, "events")
parser = argparse.ArgumentParser(
description="Display kernel tracepoints or USDT probes " +
"and their formats.",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-p", "--pid", type=int, default=None,
help="List USDT probes in the specified process")
parser.add_argument("-l", "--lib", default="",
help="List USDT probes in the specified library or executable")
parser.add_argument("-v", dest="verbosity", action="count", default=0,
help="Increase verbosity level (print variables, arguments, etc.)")
parser.add_argument(dest="filter", nargs="?",
help="A filter that specifies which probes/tracepoints to print")
args = parser.parse_args()
def print_tpoint_format(category, event):
fmt = open(os.path.join(event_root, category, event, "format")) \
.readlines()
for line in fmt:
match = re.search(r'field:([^;]*);', line)
if match is None:
continue
parts = match.group(1).split()
field_name = parts[-1:][0]
field_type = " ".join(parts[:-1])
if field_name.startswith("common_"):
continue
print(" %s %s;" % (field_type, field_name))
def print_tpoint(category, event):
tpoint = "%s:%s" % (category, event)
if not args.filter or fnmatch.fnmatch(tpoint, args.filter):
print(tpoint)
if args.verbosity > 0:
print_tpoint_format(category, event)
def print_tracepoints():
for category in os.listdir(event_root):
cat_dir = os.path.join(event_root, category)
if not os.path.isdir(cat_dir):
continue
for event in os.listdir(cat_dir):
evt_dir = os.path.join(cat_dir, event)
if os.path.isdir(evt_dir):
print_tpoint(category, event)
def print_usdt_argument_details(location):
for idx in range(0, location.num_arguments):
arg = location.get_argument(idx)
print(" argument #%d %s" % (idx + 1, arg))
def print_usdt_details(probe):
if args.verbosity > 0:
print(probe)
if args.verbosity > 1:
for idx in range(0, probe.num_locations):
loc = probe.get_location(idx)
print(" location #%d %s" % (idx + 1, loc))
print_usdt_argument_details(loc)
else:
print(" %d location(s)" % probe.num_locations)
print(" %d argument(s)" % probe.num_arguments)
else:
print("%s %s:%s" %
(probe.bin_path, probe.provider, probe.name))
def print_usdt(pid, lib):
reader = USDT(path=lib, pid=pid)
probes_seen = []
for probe in reader.enumerate_probes():
probe_name = probe.short_name()
if not args.filter or fnmatch.fnmatch(probe_name, args.filter):
if probe_name in probes_seen:
continue
probes_seen.append(probe_name)
print_usdt_details(probe)
if __name__ == "__main__":
try:
if args.pid or args.lib != "":
print_usdt(args.pid, args.lib)
else:
print_tracepoints()
except:
if sys.exc_info()[0] is not SystemExit:
print(sys.exc_info()[1])
```
|
{
"source": "jerriais/SecureGateway",
"score": 2
}
|
#### File: examples/climateapp/climateapp.py
```python
import argparse
import logging
import os.path
import sys
try:
import tkinter
except ImportError:
tkinter = None
if sys.version_info < (3, 2, 0):
raise EnvironmentError("Python version 3.2 or later required!")
MODULES_DIRECTORY = os.path.abspath('../../modules')
sys.path.append(MODULES_DIRECTORY)
import sgframework
DESCRIPTIVE_TEXT_TEMPLATE = """
A vehicle app example for the Secure Gateway concept architecture.
This is an "App" according to the Secure Gateway nomenclature. It registers on
the Secure Gateway network, and receives vehicle data. It can also send commands
to the CAN-adapter to turn on the air condition.
It can be used in two different modes. The command line mode should always be
available. The graphical mode requires Tk installed on the machine.
This is typically installed with:
sudo apt-get install python3-tk
This app can connect to the broker in a secure or insecure way. The settings
of the broker determines what is allowed. To connect in the secure way,
the directory of the certificate files must be specified.
The certificate files should be named:
CA file: {}
Certificate file: {}
Key file: {}
"""
APPNAME = "climateapp"
CLIMATERESOURCE_NAME = "climateservice"
MQTT_SIGNALNAME_AIRCONDITION = "aircondition"
MQTT_SIGNALNAME_VEHICLESPEED = "vehiclespeed"
MQTT_SIGNALNAME_ENGINESPEED = "enginespeed"
MQTT_SIGNALNAME_INDOORTEMPERATURE = "actualindoortemperature"
CAN_PAYLOAD_TRUE = 1
CAN_PAYLOAD_FALSE = 0
TIMEOUT = 0.1 # seconds
def init_climateapp():
## Parse command line and set output verbosity ##
epilog = DESCRIPTIVE_TEXT_TEMPLATE.format(sgframework.Resource.CA_CERTS,
sgframework.Resource.CERTFILE,
sgframework.Resource.KEYFILE)
commandlineparser = argparse.ArgumentParser(epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)
commandlineparser.add_argument('-v', action='count', default=0, help="Increase verbosity level. Can be repeated.")
commandlineparser.add_argument('-host', default='localhost', help="Broker host name. Defaults to %(default)s.")
commandlineparser.add_argument('-port', default=1883, help="Broker port number. Defaults to %(default)s.")
commandlineparser.add_argument('-cert', help="Directory for certificate files. Defaults to not using certificates.")
commandlineparser.add_argument('-mode', choices=['commandline', 'graphical'], default='commandline',
help="Type of use interface. Depends on graphical display. " +
"Defaults to '%(default)s'.")
commandline = commandlineparser.parse_args()
if commandline.v == 1:
loglevel = logging.INFO
elif commandline.v >= 2:
loglevel = logging.DEBUG
else:
loglevel = logging.WARNING
logging.basicConfig(level=loglevel)
## Initialize Secure Gateway app framework ##
app = sgframework.App(APPNAME, commandline.host, commandline.port, commandline.cert)
app.timeout = TIMEOUT
app.register_incoming_availability(app.PREFIX_RESOURCEAVAILABLE, CLIMATERESOURCE_NAME, "", on_resource_presence)
for mqtt_signalname in [MQTT_SIGNALNAME_AIRCONDITION, MQTT_SIGNALNAME_VEHICLESPEED,
MQTT_SIGNALNAME_ENGINESPEED, MQTT_SIGNALNAME_INDOORTEMPERATURE]:
app.register_incoming_data(CLIMATERESOURCE_NAME, mqtt_signalname, on_incoming_data)
app.on_broker_connectionstatus_info = on_broker_connectionstatus_info
## Select display mode ##
if commandline.mode == 'graphical':
displ = GraphicalAppDisplay(app)
else:
displ = CommandlineAppDisplay(app)
app.userdata = displ
app.start()
return app
def loop_climateapp(app):
# Handle MQTT communication
app.loop()
# Update GUI if any
displ = app.userdata
try:
displ.loop()
except tkinter.TclError:
app.logger.warning("The graphical app window was closed")
app.stop()
raise KeyboardInterrupt
###############
## Callbacks ##
###############
def on_broker_connectionstatus_info(app, broker_connected):
"""Callback for use when the broker connection status info is available."""
displ = app.userdata
displ.broker_connectionstatus = broker_connected
def on_resource_presence(app, messagetype, servicename, signalname, payload):
"""Callback for use when receiving a MQTT message.
Sets the presence information (on the display) about the resource in use.
"""
displ = app.userdata
if payload == app.PAYLOAD_TRUE:
presence = True
else:
presence = False
displ.resource_online = presence
logging.info("Resource online: {}".format(presence))
def on_incoming_data(app, messagetype, servicename, signalname, payload):
"""Callback for use when receiving a MQTT message.
Sets display fields.
"""
displ = app.userdata
if not displ.resource_online:
app.logger.warning("Received signal from an offline resource. Servicename: {}, signalname: {}, payload: {}".format(
servicename, signalname, payload))
return
if signalname == MQTT_SIGNALNAME_VEHICLESPEED:
displ.vehiclespeed = float(payload)
elif signalname == MQTT_SIGNALNAME_ENGINESPEED:
displ.enginespeed = float(payload)
elif signalname == MQTT_SIGNALNAME_INDOORTEMPERATURE:
displ.indoortemperature = float(payload)
elif signalname == MQTT_SIGNALNAME_AIRCONDITION:
displ.aircondition_state = bool(int(payload))
######################
## Main application ##
######################
def main():
app = init_climateapp()
## Main loop ##
while True:
try:
loop_climateapp(app)
except KeyboardInterrupt:
sys.exit()
##########################
## Displays for the app ##
##########################
STATE_UNKNOWN = -1
class CommandlineAppDisplay:
def __init__(self, app):
self.app = app
self._broker_connectionstatus = False
self._resource_online = False
self._initialize_values()
def _initialize_values(self):
self._aircondition_state = STATE_UNKNOWN
self._enginespeed = 0.0
self._vehiclespeed = 0.0
self._indoortemperature = 0.0
@property
def broker_connectionstatus(self):
return self._broker_connectionstatus
@broker_connectionstatus.setter
def broker_connectionstatus(self, value):
self._broker_connectionstatus = bool(value)
if not value:
self._resource_online = False
self.redraw()
@property
def resource_online(self):
return self._resource_online
@resource_online.setter
def resource_online(self, value):
self._resource_online = bool(value)
self.redraw()
@property
def aircondition_state(self):
return self._aircondition_state
@aircondition_state.setter
def aircondition_state(self, value):
self._aircondition_state = bool(value)
self.redraw()
@property
def vehiclespeed(self):
return self._vehiclespeed
@vehiclespeed.setter
def vehiclespeed(self, value):
self._vehiclespeed = float(value)
self.redraw()
@property
def enginespeed(self):
return self._enginespeed
@enginespeed.setter
def enginespeed(self, value):
self._enginespeed = float(value)
self.redraw()
@property
def indoortemperature(self):
return self._indoortemperature
@indoortemperature.setter
def indoortemperature(self, value):
self._indoortemperature = float(value)
self.redraw()
def loop(self):
self.redraw()
answer = input(" AC input: 'on'/'off' or Enter to redraw: ")
if not answer:
return
if not self.resource_online:
return
if answer == 'on':
logging.info("Turning on air condition")
self.app.send_command(CLIMATERESOURCE_NAME, MQTT_SIGNALNAME_AIRCONDITION, CAN_PAYLOAD_TRUE)
elif answer == 'off':
logging.info("Turning off air condition")
self.app.send_command(CLIMATERESOURCE_NAME, MQTT_SIGNALNAME_AIRCONDITION, CAN_PAYLOAD_FALSE)
def redraw(self):
if not self.broker_connectionstatus:
statustext = "Not connected to broker "
elif not self.resource_online:
statustext = "Climateservice offline. "
else:
if self.aircondition_state == STATE_UNKNOWN:
acstatus = "unknown"
elif self.aircondition_state:
acstatus = "on"
else:
acstatus = "off"
TEMPLATE = "{:5.1f} km/h {:4.0f} RPM {:5.1f} degC, AC {:<8s}"
statustext = TEMPLATE.format(self.vehiclespeed, self.enginespeed, self.indoortemperature, acstatus)
sys.stdout.write("\r" + statustext)
sys.stdout.flush()
def close(self):
pass
class GraphicalAppDisplay(CommandlineAppDisplay):
DISPLAY_TITLE = "Climate app"
TEMPLATE_AIRCONDITION = "Air condition: {}"
TEMPLATE_VEHICLESPEED = "Vehicle speed: {:.1f} km/h"
TEMPLATE_ENGINESPEED = "Engine speed: {:.0f} RPM"
TEMPLATE_TEMPERATURE = "In-car temperature: {:.1f} deg C"
TEMPLATE_CONNECTION = "Connection status: {}"
def __init__(self, app):
if tkinter is None:
raise ImportError("TK or tkinter is not installed")
self._rootframe = tkinter.Tk()
self._rootframe.title(self.DISPLAY_TITLE)
self._label_temperature = tkinter.Label(self._rootframe, text="")
self._label_temperature.pack()
self._label_aircondition = tkinter.Label(self._rootframe, text="")
self._label_aircondition.pack()
self._button_on = tkinter.Button(self._rootframe, text="Air condition: Turn ON", width=25)
self._button_on.pack()
self._button_on.bind('<Button-1>', self._button_on_handler)
self._button_off = tkinter.Button(self._rootframe, text="Air condition: Turn OFF", width=25)
self._button_off.pack()
self._button_off.bind('<Button-1>', self._button_off_handler)
dummylabel = tkinter.Label(self._rootframe, text=" ")
dummylabel.pack()
self._label_vehiclespeed = tkinter.Label(self._rootframe, text="")
self._label_vehiclespeed.pack()
self._label_enginespeed = tkinter.Label(self._rootframe, text="")
self._label_enginespeed.pack()
dummylabel = tkinter.Label(self._rootframe, text=" ")
dummylabel.pack()
self._label_connectionstatus = tkinter.Label(self._rootframe, text="")
self._label_connectionstatus.pack()
super().__init__(app)
self.loop()
def _button_on_handler(self, event):
if self.resource_online:
logging.info("Turning on air condition")
self.app.send_command(CLIMATERESOURCE_NAME, MQTT_SIGNALNAME_AIRCONDITION, CAN_PAYLOAD_TRUE)
def _button_off_handler(self, event):
if self.resource_online:
logging.info("Turning off air condition")
self.app.send_command(CLIMATERESOURCE_NAME, MQTT_SIGNALNAME_AIRCONDITION, CAN_PAYLOAD_FALSE)
def loop(self):
"""Update the GUI"""
self._rootframe.update_idletasks()
self._rootframe.update()
def close(self):
"""Close the GUI"""
self._rootframe.destroy()
def redraw(self):
if not self.broker_connectionstatus:
self._initialize_values()
connectionstatus = "No broker"
widget_state = tkinter.DISABLED
ac_statustext = "unknown"
ac_color = 'black'
elif not self.resource_online:
self._initialize_values()
connectionstatus = "Climateservice offline"
widget_state = tkinter.DISABLED
ac_statustext = "unknown"
ac_color = 'black'
else:
connectionstatus = "Climateservice online"
widget_state = tkinter.NORMAL
if self.aircondition_state == STATE_UNKNOWN:
ac_statustext = "unknown"
ac_color = 'black'
elif self.aircondition_state:
ac_statustext = "on"
ac_color = 'green4'
else:
ac_statustext = "off"
ac_color = 'red'
self._label_connectionstatus.config(text=self.TEMPLATE_CONNECTION.format(connectionstatus))
self._button_on.config(state=widget_state)
self._button_off.config(state=widget_state)
self._label_vehiclespeed.config(text=self.TEMPLATE_VEHICLESPEED.format(self.vehiclespeed), state=widget_state)
self._label_enginespeed.config(text=self.TEMPLATE_ENGINESPEED.format(self.enginespeed), state=widget_state)
self._label_aircondition.config(text=self.TEMPLATE_AIRCONDITION.format(ac_statustext),
state=widget_state, foreground=ac_color)
self._label_temperature.config(text=self.TEMPLATE_TEMPERATURE.format(self.indoortemperature), state=widget_state)
if __name__ == '__main__':
main()
```
#### File: examples/minimal/minimaltaxisign.py
```python
import time
import sgframework
def on_taxisign_state_command(resource, messagetype, servicename,
commandname, commandpayload):
if commandpayload.strip() == 'True':
print("Turning on my taxi sign.", flush=True)
return 'True'
else:
print("Turning off my taxi sign.", flush=True)
return 'False'
resource = sgframework.Resource('taxisignservice', 'localhost')
resource.register_incoming_command('state',
on_taxisign_state_command,
defaultvalue='False',
send_echo_as_retained=True)
resource.start(use_threaded_networking=True)
while True:
time.sleep(1)
```
#### File: taxisignservice/drivers/output_pin_driver.py
```python
FILE_FOR_GPIO_EXPORT = '/sys/class/gpio/export'
TEMPLATE_FOR_GPIO_PIN_DIRECTION_FILE = '/sys/class/gpio/gpio{}/direction'
TEMPLATE_FOR_GPIO_PIN_VALUE_FILE = '/sys/class/gpio/gpio{}/value'
DIRECTION_OUT = 'out'
GPIO_STATE_ON = '1'
GPIO_STATE_OFF = '0'
MODE_FILE_WRITE = 'w'
import errno
class Outputpin(object):
"""GPIO output pin representation.
For controlling a GPIO output pin on a Beaglebone.
Note that root permissions are required.
Attributes:
* state (bool): Turn on the GPIO output pin if the value is True.
"""
def __init__(self, GPIO_number):
self._state = False
self._GPIO_number = GPIO_number
# Export the GPIO pin to Linux userspace
try:
with open(FILE_FOR_GPIO_EXPORT, MODE_FILE_WRITE) as f:
f.write(str(self._GPIO_number))
except IOError as e:
if e.errno != errno.EBUSY: # EBUSY: Pin is already exported.
raise
# Set pin in digital output mode
file_for_gpio_pin_direction = TEMPLATE_FOR_GPIO_PIN_DIRECTION_FILE.format(self._GPIO_number)
with open(file_for_gpio_pin_direction, MODE_FILE_WRITE) as f:
f.write(DIRECTION_OUT)
# Set initial state
self.state = False
@property
def state(self):
return self._state
@state.setter
def state(self, value):
if value:
filetext = GPIO_STATE_ON
self._state = True
else:
filetext = GPIO_STATE_OFF
self._state = False
# Write pin value to the file-like driver interface
file_for_gpio_pin_value = TEMPLATE_FOR_GPIO_PIN_VALUE_FILE.format(self._GPIO_number)
with open(file_for_gpio_pin_value, MODE_FILE_WRITE) as f:
f.write(filetext)
########################################################
# Testing the module #
# #
# Connect a LED from port8 pin3 to ground via 470 Ohm. #
# The voltage is 3.3 V on the pin. #
# The output is gpmc_ad6 = GPIO1_6 #
# This is GPIO 38 (1*32 + 6) #
# #
########################################################
if __name__ == '__main__':
import time
pin = Outputpin(38)
pin.state = True
time.sleep(1)
pin.state = False
time.sleep(1)
pin.state = True
time.sleep(1)
pin.state = False
```
#### File: examples/vehiclesimulator/vehiclesimulator.py
```python
import argparse
import logging
import time
import sys
import can4python as can
import vehiclesimulationutilities
# Settings #
CYCLE_TIME = 0.1 # seconds
def init_vehiclesimulator():
"""Initialize the vehicle simulator.
Returns the tuple (temperature_simulator, speed_simulator, canbus)
"""
# Define CAN messages #
# Example on how to define CAN signals in source code
CAN_EGO_NODE_ID = "2"
FRAMEDEF1 = can.CanFrameDefinition(8, name='vehiclesimulationdata')
FRAMEDEF1.producer_ids = [CAN_EGO_NODE_ID]
FRAMEDEF1.signaldefinitions.append(can.CanSignalDefinition('vehiclespeed', startbit=8, numberofbits=16,
scalingfactor=0.01, endianness='big'))
FRAMEDEF1.signaldefinitions.append(can.CanSignalDefinition('enginespeed', startbit=26, numberofbits=14,
endianness='big'))
FRAMEDEF2 = can.CanFrameDefinition(9, name='climatesimulationdata')
FRAMEDEF2.producer_ids = [CAN_EGO_NODE_ID]
FRAMEDEF2.signaldefinitions.append(can.CanSignalDefinition('indoortemperature', startbit=8, numberofbits=11,
valueoffset=-50, scalingfactor=0.1,
endianness='big'))
FRAMEDEF3 = can.CanFrameDefinition(7, name='climatecontrolsignals')
FRAMEDEF3.signaldefinitions.append(can.CanSignalDefinition('acstatus', startbit=7, numberofbits=1,
endianness='big'))
CONFIG = can.Configuration(ego_node_ids=[CAN_EGO_NODE_ID])
CONFIG.add_framedefinition(FRAMEDEF1)
CONFIG.add_framedefinition(FRAMEDEF2)
CONFIG.add_framedefinition(FRAMEDEF3)
# Parse command line and set output verbosity #
commandlineparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
commandlineparser.add_argument('-v', action='count', default=0, help="Increase verbosity level. Can be repeated.")
commandlineparser.add_argument('-i', dest="interface", default="vcan0",
help="CAN interface name. Defaults to %(default)s.")
commandline = commandlineparser.parse_args()
if commandline.v == 1:
loglevel = logging.INFO
elif commandline.v >= 2:
loglevel = logging.DEBUG
else:
loglevel = logging.WARNING
logging.basicConfig(level=loglevel)
# Set up CAN bus #
logging.info(" ")
logging.info(" ")
logging.info("Starting vehicle simulator, using CAN interface {!r} with timeout {} s.".format(
commandline.interface, CYCLE_TIME))
canbus = can.CanBus(CONFIG, commandline.interface, timeout=CYCLE_TIME)
logging.debug(canbus.get_descriptive_ascii_art())
# Set up simulators #
speed_simulator = vehiclesimulationutilities.VehicleSpeedSimulator()
temperature_simulator = vehiclesimulationutilities.CabinTemperatureSimulator()
return temperature_simulator, speed_simulator, canbus
def loop_vehiclesimulator(temperature_simulator, speed_simulator, canbus):
# Run simulators #
temperature = temperature_simulator.get_new_temperature()
vehiclespeed = speed_simulator.get_new_randomized_speed()
enginespeed = vehiclesimulationutilities.calculate_engine_speed(vehiclespeed)
logging.info(" {0:5.1f} km/h, {1:4.0f} RPM, {2:4.1f} deg C. Air condition state: {3}".format(
vehiclespeed, enginespeed, temperature, temperature_simulator.aircondition_state))
# Send CAN data #
signals_to_send = {'indoortemperature': temperature,
'vehiclespeed': vehiclespeed,
'enginespeed': enginespeed}
canbus.send_signals(signals_to_send)
# Receive CAN data, if available #
readstart = time.time()
try:
received = canbus.recv_next_signals()
except KeyboardInterrupt:
logging.warning("Keyboard interrupt. Quitting.")
raise
except can.CanTimeoutException:
received = {}
except can.CanException as err:
logging.warning('Failed to receive CAN frame. Error: {}'.format(err))
received = {}
readtime = time.time() - readstart
time.sleep(max(0, CYCLE_TIME-readtime))
# Set air condition state #
if 'acstatus' in received:
temperature_simulator.aircondition_state = bool(received['acstatus'])
######################
## Main application ##
######################
def main():
temperature_simulator, speed_simulator, canbus = init_vehiclesimulator()
## Main loop ##
while True:
try:
loop_vehiclesimulator(temperature_simulator, speed_simulator, canbus)
except KeyboardInterrupt:
sys.exit()
if __name__ == '__main__':
main()
```
#### File: SecureGateway/tests/test_servicemanager.py
```python
import os.path
import os
import signal
import subprocess
import sys
import time
import unittest
assert sys.version_info >= (3, 3, 0), "Python version 3.3 or later required!"
THIS_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
PARENT_DIRECTORY = os.path.dirname(THIS_DIRECTORY)
SOURCE_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'scripts/')
sys.path.append(SOURCE_DIRECTORY)
MQTT_TOPICS_TO_DELETE = [
'dataavailable/testresource2/testdata1',
'dataavailable/testresource2/testdata2',
'dataavailable/testresource2/testdata3',
'commandavailable/testresource2/testcommand1',
'commandavailable/testresource2/testcommand2',
'commandavailable/testresource2/testcommand3',
'resourceavailable/testresource2/presence',
]
class TestServicemanager(unittest.TestCase):
OUTPUT_FILE_SUBSCRIBER = 'temporary-sub.txt'
def setUp(self):
self.environment = os.environ.copy()
self.environment["COVERAGE_PROCESS_START"] = os.path.join(THIS_DIRECTORY, "coveragerc")
def tearDown(self):
# Delete temporary filess
try:
os.remove(self.OUTPUT_FILE_SUBSCRIBER)
except FileNotFoundError:
pass
# Delete persistent MQTT messages
for topic in MQTT_TOPICS_TO_DELETE:
pub = subprocess.Popen(['mosquitto_pub', '-t', topic, '-r', '-n'])
time.sleep(0.2)
pub.terminate()
def testServicemanager(self):
with open(self.OUTPUT_FILE_SUBSCRIBER, 'w') as subscriber_outputfile:
subcriber = subprocess.Popen(['mosquitto_sub', '-v', '-t', '+/#'],
stdout=subscriber_outputfile,
stderr=subprocess.STDOUT)
servicemanager = subprocess.Popen(['python3', 'scripts/servicemanager'],
stderr=subprocess.STDOUT,
env=self.environment)
time.sleep(3)
# Simulate a starting resource (so the servicemanager can store the data)
mqtt_message_list = [('resourceavailable/testresource2/presence', 'True'),
('dataavailable/testresource2/testdata1', 'True'),
('dataavailable/testresource2/testdata2', 'True'),
('dataavailable/testresource2/testdata3', 'True'),
('commandavailable/testresource2/testcommand1', 'True'),
('commandavailable/testresource2/testcommand2', 'True'),
('commandavailable/testresource2/testcommand3', 'True'),
]
for topic, payload in mqtt_message_list:
pub1 = subprocess.Popen(['mosquitto_pub', '-t', topic, '-m', payload])
time.sleep(0.2)
pub1.terminate()
time.sleep(3)
# Simulate a stopping resource
pub1 = subprocess.Popen(['mosquitto_pub', '-t', 'resourceavailable/testresource2/presence', '-m', 'False'])
time.sleep(2)
pub1.terminate()
# Test error handling
mqtt_message_list = [('resourceavailable/testresource3/presence/extralevel', 'True'),
('resourceavailable/testresource4/presence', 'False'),
('a/b', '1.2.3'),
('hatt/testresource5/presence', 'True'),
]
for topic, payload in mqtt_message_list:
pub1 = subprocess.Popen(['mosquitto_pub', '-t', topic, '-m', payload])
time.sleep(0.2)
pub1.terminate()
# Terminate servicemanager, and flush files
servicemanager.send_signal(signal.SIGINT)
time.sleep(1)
servicemanager.terminate()
time.sleep(1)
servicemanager.kill()
time.sleep(3)
subcriber.kill()
time.sleep(0.5)
subscriber_outputfile.flush()
os.fsync(subscriber_outputfile.fileno())
# Verify that the servicemanager has sent proper MQTT messages
with open(self.OUTPUT_FILE_SUBSCRIBER, 'r') as subscriber_outputfile:
text = ' '.join(subscriber_outputfile.readlines())
self.assertIn("dataavailable/testresource2/testdata1 False", text)
self.assertIn("dataavailable/testresource2/testdata2 False", text)
self.assertIn("dataavailable/testresource2/testdata3 False", text)
self.assertIn("commandavailable/testresource2/testcommand1 False", text)
self.assertIn("commandavailable/testresource2/testcommand2 False", text)
self.assertIn("commandavailable/testresource2/testcommand3 False", text)
if __name__ == '__main__':
# Run all tests #
unittest.main(verbosity=2)
# Run a single test #
# suite = unittest.TestSuite()
# suite.addTest(TestCanBus("testReceiveNoData"))
# unittest.TextTestRunner(verbosity=2).run(suite)
```
|
{
"source": "jerrickhoang/rllab",
"score": 2
}
|
#### File: thanard/me-trpo/env_helpers.py
```python
import numpy as np
import tensorflow as tf
import os.path
import rllab.misc.logger as rllab_logger
from rllab.envs.normalized_env import normalize
from rllab.envs.gym_env import GymEnv
from envs import *
from sandbox.rocky.tf.envs.base import TfEnv
####################
#### Environment ###
####################
def get_env(env_name):
if env_name == 'snake':
return TfEnv(normalize(SnakeEnv()))
elif env_name == 'swimmer':
return TfEnv(normalize(SwimmerEnv()))
elif env_name == 'benchmark-half-cheetah':
return TfEnv(normalize(BenchmarkHalfCheetahEnv()))
elif env_name == 'half-cheetah':
return TfEnv(normalize(HalfCheetahEnv()))
elif env_name == 'hopper':
return TfEnv(normalize(HopperEnv()))
elif env_name == 'ant':
return TfEnv(normalize(AntEnv()))
# elif env_name == 'humanoidstandup':
# return TfEnv(GymEnv('HumanoidStandup-v1',
# record_video=False,
# record_log=False))
elif env_name == 'humanoid':
return TfEnv(normalize(HumanoidEnv()))
# elif env_name == 'simple_humanoid':
# return TfEnv(normalize(SimpleHumanoidEnv()))
else:
assert False, "Define the env from env_name."
policy_scope = 'training_policy'
clip_action = ''
def get_action(observation,
policy_in,
policy_out,
sess,
action_noise,
**kwargs):
# TODO think about what to do in first iteration when diff weights is None.
action = sess.run(policy_out, feed_dict={policy_in: np.array([observation])})
# More noisy as t increases, max_var = 1.0
n_actions = len(action)
action += action_noise * np.random.randn(n_actions)
return np.clip(action, *kwargs['action_bounds'])
def prepare_policy(sess, param_noise, diff_weights, initial_param_std):
if diff_weights is not None:
num_weight_vars = diff_weights.shape[0]
flat_weight_update = param_noise * diff_weights * np.random.randn(num_weight_vars)
fwu_ph = tf.get_collection('perturb_policy')[0]
opts = tf.get_collection('perturb_policy')[1:]
sess.run(opts, feed_dict={fwu_ph: flat_weight_update})
return np.mean(np.abs(flat_weight_update))
assert initial_param_std == 0.0
return 0.0
def write_stats(dict, data):
for key, value in dict.items():
if '%' in key:
value.append(np.percentile(data, int(key[:-1]), axis=0))
elif key == 'avg':
value.append(np.mean(data, axis=0))
elif key == 'batch_size':
value.append(len(data))
else:
assert False
def write_to_csv(data, timesteps, path):
# Make it 2D np array
make_values_np_array(data)
# Save to csv
import csv
header = sorted(data.keys())
with open(path, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['timesteps'] + header)
for i, timestep in enumerate(timesteps):
writer.writerow([str(timestep)] + [str(data[h][i]) for h in header])
# with open(os.path.join(log_dir,'errors_state_cost_%d.csv'%count), 'w', newline='') as f:
# writer = csv.writer(f)
# writer.writerow(['timesteps'] + header)
# for i, timestep in enumerate(timesteps):
# writer.writerow([str(timestep)] + ["%f"%errors['state_diff'][h][i][-1] if
# h =='batch_size' else
# errors['state_diff'][h][i] for h in header])
def make_values_np_array(dict):
for key, value in dict.items():
dict[key] = np.array(value)
# Under the current policy and dynamics models, we sample roll-outs from a set of fixed initial states.
# We also perturb the policy on its parameters and compute the average.
def evaluate_model_predictions(env,
policy_in,
policy_out,
dynamics_in,
dynamics_out,
reset_initial_states,
sess,
log_dir,
count, # For logging csv
max_timestep,
cost_np_vec,
timesteps=(1, 3, 5, 7, 10, 12, 15, 18, 20, 100)):
errors = {'timesteps': timesteps,
'l2_sum': [],
'l1_sum': [],
'l1_state_cost': [],
'state_diff':{
'100%':[],
'0%':[],
'75%':[],
'25%':[],
'50%':[],
'avg':[],
'batch_size':[]
},
'cost_diff': {
'100%': [],
'0%': [],
'75%': [],
'25%': [],
'50%': [],
'avg': [],
'batch_size': []
}
}
# Get numpy arrays Os, As, Rs
Os, As, Rs = sample_fixed_init_trajectories(env,
policy_in,
policy_out,
reset_initial_states,
sess,
max_timestep)
assert max_timestep == Rs.shape[1]
# Compute the errors
for timestep in timesteps:
n_states = Os.shape[2]
Xs = np.reshape(Os[:, :-timestep, :], (-1, n_states))
Ys = np.reshape(Os[:, timestep:, :], (-1, n_states))
costs = np.zeros(len(Xs))
rewards = np.zeros(len(Xs))
observations = Xs
for t in range(timestep):
actions = sess.run(policy_out, feed_dict={policy_in: observations})
actions = np.clip(actions, *env.action_space.bounds)
next_observations = sess.run(dynamics_out,
feed_dict={dynamics_in: np.concatenate([observations, actions], axis=1)})
costs += cost_np_vec(observations, actions, next_observations)
rewards += np.reshape(Rs[:, t:t+max_timestep+1-timestep], -1)
# Update observations
observations = next_observations
# Get the different after t steps
state_diff = np.abs(Ys - observations)
cost_diff = np.abs(costs + rewards)
# Add errors
errors['l1_sum'].append(np.mean(np.sum(state_diff, axis=1)))
errors['l2_sum'].append(np.mean(np.sum(state_diff, axis=1)))
errors['l1_state_cost'].append(np.mean(state_diff[:, -1]))
write_stats(errors['state_diff'], state_diff)
write_stats(errors['cost_diff'], cost_diff)
write_to_csv(errors['state_diff'], timesteps,
os.path.join(log_dir, 'state_diff_%d.csv' % count))
write_to_csv(errors['cost_diff'], timesteps,
os.path.join(log_dir, 'cost_diff_%d.csv' % count))
return errors
# TODO: fix when early stop
def get_error_distribution(policy_in,
policy_out,
dynamics_in,
dynamics_out,
env,
cost_np_vec,
sess,
logger,
log_dir,
count,
horizon=100,
sample_size=100,
known_actions=False,
is_plot=False
):
real_costs = []
initial_states = []
actions = []
real_final_states = []
# Compute real costs
for i in range(sample_size):
x = env.reset()
initial_states.append(x)
real_cost = 0
_action = []
for t in range(horizon):
action = sess.run(policy_out,
feed_dict={policy_in: x[None]})[0]
x, r, done, _ = env.step(action)
_action.append(action)
real_cost -= r
if done:
break
actions.append(_action)
real_costs.append(real_cost)
real_final_states.append(x)
real_costs = np.array(real_costs)
real_final_states = np.array(real_final_states)
# Compute estimated costs
o = np.array(initial_states)
actions = np.clip(actions, -1, 1)
estimated_costs = np.zeros_like(real_costs)
for t in range(horizon):
# Sim step
if known_actions:
a = actions[:, t, :]
else:
a = np.clip(sess.run(
policy_out, feed_dict={policy_in: o}
), *env.action_space.bounds)
o_next = sess.run(dynamics_out,
feed_dict={dynamics_in: np.concatenate([o, a], axis=1)})
estimated_costs += cost_np_vec(o, a, o_next)
# update
o = o_next
# Plot
e_cost = estimated_costs - real_costs
e_state = o - real_final_states
loss = np.sum(np.square(e_state), axis=1)
logger.info('### Real cost ###')
logger.info('mean: {}'.format(np.mean(real_costs)))
logger.info('std: {}'.format(np.std(real_costs)))
logger.info('median: {}'.format(np.median(real_costs)))
logger.info("### Total cost difference ###")
logger.info('mean: {}'.format(np.mean(e_cost)))
logger.info('std: {}'.format(np.std(e_cost)))
logger.info('median: {}'.format(np.median(e_cost)))
logger.info("### Final state error ###")
logger.info('mean: {}'.format(np.mean(loss)))
logger.info('std: {}'.format(np.std(loss)))
logger.info('median: {}'.format(np.median(loss)))
logger.info("### Dimension mean ###")
logger.info(np.mean(np.square(e_state), axis=0))
if is_plot:
import matplotlib as mpl
mpl.use('Agg')
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
x = pd.Series(e_cost, name="total cost difference")
sns.distplot(x)
plt.savefig(os.path.join(log_dir, 'cost_diff_dist_%d.png' % count))
plt.close()
plt.figure()
x = pd.Series(loss, name="final state prediction error (L2)")
sns.distplot(x, color='g')
plt.savefig(os.path.join(log_dir, 'state_diff_dist_%d.png' % count))
plt.close()
return (e_cost, e_state)
def test_policy_cost(policy_init,
policy_cost,
policy_in,
policy_out,
dynamics_in,
dynamics_out,
env,
cost_np_vec,
sess,
horizon=100,
sample_size=10,
is_done=None
):
'''
:return: verify if the trajectory costs computed in tensorflow and numpy.
We check the average over sample_size trajectories.
'''
initial_states = np.array([env.reset() for i in range(sample_size)])
estimated_policy_cost = sess.run(policy_cost, feed_dict={policy_init: initial_states})
o = initial_states
estimated_cost = 0
dones = np.array([False for i in range(sample_size)])
for t in range(horizon):
# Sim step
a = np.clip(sess.run(
policy_out, feed_dict={policy_in: o}
), *env.action_space.bounds)
o_next = sess.run(dynamics_out,
feed_dict={dynamics_in: np.concatenate([o, a], axis=1)})
estimated_cost += np.mean((1-dones)*cost_np_vec(o, a, o_next))
o = o_next
if is_done is not None and is_done(o, o_next).any():
dones[is_done(o, o_next)] = True
print(estimated_cost, estimated_policy_cost)
assert np.allclose(estimated_cost, estimated_policy_cost)
# Make sure that this can be found and matched to the policy learning curve.
def sample_fixed_init_trajectories(env,
policy_in,
policy_out,
reset_initial_states,
sess,
max_timestep):
Os = []
As = []
Rs = []
for x in reset_initial_states:
_os = []
_as = []
_rs = []
if hasattr(env.wrapped_env, 'wrapped_env'):
observation = env.wrapped_env.wrapped_env.reset(x)
else:
env.reset()
half = int(len(x)/2)
inner_env = env.wrapped_env.env.unwrapped
inner_env.set_state(x[:half], x[half:])
observation = inner_env._get_obs()
_os.append(observation)
for t in range(max_timestep):
action = sess.run(policy_out, feed_dict={policy_in: observation[None]})
next_observation, reward, done, info = env.step(action[0])
_os.append(next_observation)
_as.append(action[0])
_rs.append(reward)
# Update observation
observation = next_observation
if done:
break
Os.append(_os)
As.append(_as)
Rs.append(_rs)
return np.array(Os), np.array(As), np.array(Rs)
# Sample a batch of trajectories from an environment
# Use tensorflow policy given as (in and out).
# Batch size is the total number of transitions (not trajectories).
def sample_trajectories(env,
policy_in,
policy_out,
exploration,
batch_size,
saver,
diff_weights,
log_dir,
logger,
is_monitored,
monitorpath,
sess,
max_timestep,
render_every=None,
cost_np=None,
is_done=None):
saver.save(sess,
os.path.join(log_dir, 'policy.ckpt'),
write_meta_graph=False)
if is_monitored:
from gym import wrappers
env = wrappers.Monitor(env, monitorpath)
Os = []
As = []
Rs = []
max_eps_reward = -np.inf
min_eps_reward = np.inf
avg_eps_reward = 0.0
_counter = 1
while _counter <= batch_size:
o = []
a = []
r = []
if is_monitored:
env.stats_recorder.done = True
observation = env.reset()
o.append(observation)
episode_reward = 0.0
avg_weight_change = prepare_policy(sess,
exploration['param_noise'],
diff_weights,
exploration['initial_param_std'])
for t in range(max_timestep):
# Perturb policy.
if exploration['vary_trajectory_noise']:
action_noise = exploration['action_noise']*np.random.uniform()
else:
action_noise = exploration['action_noise']
action = get_action(observation,
policy_in,
policy_out,
sess,
action_noise=action_noise,
action_bounds=env.action_space.bounds)
observation, reward, done, info = env.step(action)
# Debug is_done
if is_done is not None:
assert done == is_done(o[-1][None], observation[None])[0]
o.append(observation)
a.append(action[0])
r.append(reward)
episode_reward += reward
_counter += 1
if render_every is not None and len(Os) % render_every == 0:
env.render()
if done:
break
# debugging cost function
if cost_np is not None:
cost_np_value = cost_np(np.array(o[:-1]),
np.array(a),
np.array(o[1:]))
episode_cost = len(a) * cost_np_value
# Check if cost_np + env_reward == 0
logger.info('%d steps, cost %.2f, verify_cost %.3f, avg_weight_change %.3f'
% (_counter - 1,
episode_cost,
episode_reward + episode_cost,
avg_weight_change))
else:
logger.info('%d steps, reward %.2f, avg_weight_change %.3f'
% (_counter - 1, episode_reward, avg_weight_change))
# Recover policy
saver.restore(sess, os.path.join(log_dir, 'policy.ckpt'))
logger.debug("Restored the policy back to %s" % os.path.join(log_dir, 'policy.ckpt'))
Os.append(o)
As.append(a)
Rs.append(r)
# Update stats
avg_eps_reward += episode_reward
if episode_reward > max_eps_reward:
max_eps_reward = episode_reward
if episode_reward < min_eps_reward:
min_eps_reward = episode_reward
avg_eps_reward /= len(Os)
rllab_logger.record_tabular('EpisodesCollected', len(Os))
rllab_logger.record_tabular('TimeStepsCollected', _counter - 1)
return Os, As, Rs, {'avg_eps_reward': avg_eps_reward,
'min_eps_reward': min_eps_reward,
'max_eps_reward': max_eps_reward}
def reset_batch(envs, reset_initial_states):
obs = []
for env, x in zip(envs, reset_initial_states):
if hasattr(env.wrapped_env, 'wrapped_env'):
obs.append(env.wrapped_env.wrapped_env.reset(x))
else:
env.reset()
half = int(len(x) / 2)
inner_env = env.wrapped_env.env.unwrapped
inner_env.set_state(x[:half], x[half:])
obs.append(inner_env._get_obs())
return np.array(obs)
def step_batch(envs, actions):
next_steps = [env.step(action) for (env, action) in zip(envs, actions)]
next_obs, rs, ds, infos = list(zip(*next_steps))
return np.array(next_obs), np.array(rs), np.array(ds), infos
# Given a batch of initial states and a policy, do deterministic rollout on real env.
# Don't render. Add cost function evaluation.
def evaluate_fixed_init_trajectories(env,
policy_in,
policy_out,
reset_initial_states,
cost_np_vec, sess,
max_timestep=100,
gamma=1.0):
import pickle
n_envs = len(reset_initial_states)
envs = [pickle.loads(pickle.dumps(env)) for _ in range(n_envs)]
observations = reset_batch(envs, reset_initial_states)
dones = [False for _ in range(n_envs)]
cost = 0.0
reward = 0.0
for t in range(max_timestep):
actions = sess.run(policy_out, feed_dict={policy_in: observations})
# clipping
actions = np.clip(actions, *env.action_space.bounds)
next_observations, _rewards, _dones, _ = step_batch(envs, actions)
dones = np.logical_or(dones, _dones)
# Update rewards and costs
rewards = (1.0 - dones) * _rewards * gamma**t
costs = (1.0-dones)*cost_np_vec(observations, actions, next_observations) * gamma**t
# Update observation
observations = next_observations
cost += np.mean(costs)
reward += np.mean(rewards)
# assert cost + reward < 1e-2, "cost is {}, reward is {}".format(cost, reward)
return cost
# def evaluate_learned_dynamics_trajectories(dynamics_in,
# dynamics_out,
# policy_in,
# policy_out,
# initial_states,
# cost_np, sess,
# max_timestep=100):
# batch_size, n_states = initial_states.shape
# avg_eps_cost = 0.0
#
# observations = initial_states
# for t in range(max_timestep):
# actions = sess.run(policy_out, feed_dict={policy_in: observations})
# actions = np.clip(actions, -1.0, 1.0)
# # Only using model 0.
# next_observations = sess.run(dynamics_out,
# feed_dict={dynamics_in: np.concatenate([observations, actions], axis=1)})
# avg_cost = cost_np(observations, actions, next_observations)
# # Update observations
# observations = next_observations
# # Update cost
# avg_eps_cost += avg_cost
# return avg_eps_cost
from rllab.envs.base import Env
from rllab.envs.base import Step
class NeuralNetEnv(Env):
def __init__(self, env, inner_env, cost_np, dynamics_in, dynamics_outs, sam_mode):
self.vectorized = True
self.env = env
self.cost_np = cost_np
self.is_done = getattr(inner_env, 'is_done', lambda x, y: np.asarray([False] * len(x)))
self.dynamics_in = dynamics_in
self.dynamics_outs = dynamics_outs
self.n_models = len(dynamics_outs)
self.sam_mode = sam_mode
super(NeuralNetEnv, self).__init__()
@property
def observation_space(self):
return self.env.observation_space
@property
def action_space(self):
return self.env.action_space
def reset(self):
self._state = self.env.reset()
observation = np.copy(self._state)
return observation
def step(self, action):
sess = tf.get_default_session()
action = np.clip(action, *self.action_space.bounds)
index = np.random.randint(self.n_models)
next_observation = sess.run(self.dynamics_outs[index],
feed_dict={self.dynamics_in: np.concatenate([self._state, action])[None]})
reward = - self.cost_np(self._state[None], action[None], next_observation)
done = self.is_done(self._state[None], next_observation)[0]
self._state = np.reshape(next_observation, -1)
return Step(observation=self._state, reward=reward, done=done)
def render(self):
print('current state:', self._state)
def vec_env_executor(self, n_envs, max_path_length):
return VecSimpleEnv(env=self, n_envs=n_envs, max_path_length=max_path_length)
class VecSimpleEnv(object):
def __init__(self, env, n_envs, max_path_length):
self.env = env
self.n_envs = n_envs
self.num_envs = n_envs
self.states = np.zeros((self.n_envs, env.observation_space.shape[0]))
self.ts = np.zeros((self.n_envs,))
self.max_path_length = max_path_length
self.cur_model_idx = np.random.randint(len(self.env.dynamics_outs), size=(n_envs, ))
def reset(self, dones=None):
if dones is None:
dones = np.asarray([True] * self.n_envs)
else:
dones = np.cast['bool'](dones)
for i, done in enumerate(dones):
if done:
self.states[i] = self.env.reset()
self.cur_model_idx[i] = np.random.randint(len(self.env.dynamics_outs))
self.ts[dones] = 0
return self.states[dones]
def step(self, actions):
self.ts += 1
actions = np.clip(actions, *self.env.action_space.bounds)
next_observations = self.get_next_observation(actions)
rewards = - self.env.cost_np(self.states, actions, next_observations)
self.states = next_observations
dones = self.env.is_done(self.states, next_observations)
dones[self.ts >= self.max_path_length] = True
if np.any(dones):
self.reset(dones)
return self.states, rewards, dones, dict()
def get_next_observation(self, actions):
sess = tf.get_default_session()
sam_mode = self.env.sam_mode
next_possible_observations = sess.run(self.env.dynamics_outs,
feed_dict={self.env.dynamics_in:
np.concatenate([self.states, actions],
axis=1)})
next_possible_observations = np.array(next_possible_observations)
if sam_mode == 'step_rand':
# Choose a random model for each batch.
indices = np.random.randint(self.env.n_models, size=self.n_envs)
next_observations = next_possible_observations[indices, range(self.n_envs)]
elif sam_mode == 'eps_rand':
indices = self.cur_model_idx
next_observations = next_possible_observations[indices, range(self.n_envs)]
elif sam_mode == 'model_mean_std':
std = np.std(next_possible_observations, axis=0)
next_observations = np.mean(next_possible_observations, axis=0) + np.random.normal(size=std.shape)*std
elif sam_mode == 'model_mean':
next_observations = np.mean(next_possible_observations, axis=0)
elif sam_mode == 'model_med':
next_observations = np.median(next_possible_observations, axis=0)
elif sam_mode == 'one_model':
next_observations = next_possible_observations[0]
else:
assert False, "sam mode %s is not defined." % sam_mode
return next_observations
```
#### File: thanard/me-trpo/model_based_rl.py
```python
import tensorflow as tf
import numpy as np
import pickle
import os
import logging
from env_helpers import sample_trajectories, \
evaluate_fixed_init_trajectories, evaluate_model_predictions, \
get_error_distribution, test_policy_cost, NeuralNetEnv
from utils import *
from svg_utils import setup_gradients, svg_update
import rllab.misc.logger as rllab_logger
import time
import copy
import joblib
np.set_printoptions(
formatter={
'float_kind': lambda x: "%.2f" % x
}
)
TF_SUMMARY = True
def build_dynamics_graph(scope,
dynamics_model,
dynamics_in,
dynamics_in_full,
y_training_full,
n_dynamics_input,
n_models,
get_regularizer_loss,
n_states,
logger):
'''
Build dynamics tensorflow graph at training and test times.
:param scope:
:param dynamics_model:
:param dynamics_in:
:param y_training:
:param dynamics_in_full:
:param y_training_full:
:param n_dynamics_input:
:param n_models:
:param get_regularizer_loss:
:param n_states:
:return:
'''
# For training
_dynamics_outs = [
dynamics_model(
get_ith_tensor(dynamics_in_full, i, n_dynamics_input),
scope,
'model%d' % i,
collect_summary=TF_SUMMARY
) for i in range(n_models)
]
_regularizer_losses = [get_regularizer_loss(scope, 'model%d' % i) for i in range(n_models)]
_prediction_losses = [
tf.reduce_mean(
tf.reduce_sum(
tf.square(
y_predicted - get_ith_tensor(
y_training_full,
i,
n_states
)
),
axis=[1]
)
)
for i, y_predicted in enumerate(_dynamics_outs)
]
prediction_loss = tf.reduce_sum(_prediction_losses,
name='total_prediction_loss')
regularizer_loss = tf.reduce_sum(_regularizer_losses,
name='total_regularizer_loss')
# Add summaries
with tf.name_scope('%s/prediction_loss' % scope):
tf.summary.histogram('dist_over_models', _prediction_losses)
tf.summary.scalar('summary', prediction_loss)
assert len(_prediction_losses) == len(_regularizer_losses)
dynamics_losses = [
_prediction_losses[i] + _regularizer_losses[i]
for i in range(len(_prediction_losses))
]
dynamics_loss = tf.add(prediction_loss, regularizer_loss,
name='total_dynamics_loss')
logger.info("Defined %d models in scope %s" % (n_models, scope))
# At test time
_dynamics_outs = [
dynamics_model(
dynamics_in,
scope,
'model%d' % i
) for i in range(n_models)
]
# TODO: change this hack back.
# avg_prediction = tf.reduce_mean(tf.stack(_dynamics_outs, axis=0),
# axis=0,
# name='avg_prediction')
logger.info("Built prediction network for scope %s" % (scope))
return dynamics_loss, prediction_loss, regularizer_loss, _dynamics_outs, dynamics_losses
def build_policy_graph(policy_scope,
scope,
policy_training_init,
n_models,
policy_opt_params,
policy_model,
dynamics_model,
env,
cost_tf,
logger,
is_env_done_tf=None,
stochastic=None):
# TODO: Think about using avg model in each prediction step.
_policy_costs = []
n_saturates = 0 # Debug
with tf.name_scope(scope):
for i in range(n_models):
# Initial states
x = policy_training_init
_policy_cost = 0
dones = 0.0
for t in range(policy_opt_params.T):
u = tf.clip_by_value(policy_model(x, stochastic), *env.action_space.bounds)
n_saturates += tf.cast(tf.equal(tf.abs(u), 1.0), tf.int32)
x_next = dynamics_model(tf.concat([x, u], axis=1),
scope,
'model%d' % i)
# Update dones after computing cost.
if is_env_done_tf is not None:
_policy_cost += (policy_opt_params.gamma ** t) * cost_tf(x, u, x_next,
dones=dones)
dones = tf.maximum(dones, is_env_done_tf(x, x_next))
else:
_policy_cost += (policy_opt_params.gamma ** t) * cost_tf(x, u, x_next)
# Move forward 1 step.
x = x_next
_policy_costs.append(_policy_cost)
# Average over cost from all dynamics models
# Collecting summary
with tf.name_scope('%s/policy_cost' % policy_scope):
tf.summary.histogram('dist_over_models', _policy_costs)
tf.summary.scalar('cost_on_model0', _policy_costs[0])
policy_model(policy_training_init, collect_summary=TF_SUMMARY)
logger.info("Built %d policy graphs for %s model" % (n_models, scope))
return _policy_costs, n_saturates
def get_dynamics_optimizer(scope, prediction_loss, reg_loss, dynamics_opt_params, logger):
with tf.variable_scope('adam_' + scope):
# Allow learning rate decay schedule.
if type(dynamics_opt_params.learning_rate) == dict:
adaptive_lr = tf.Variable(dynamics_opt_params.learning_rate["scratch"], trainable=False)
else:
adaptive_lr = dynamics_opt_params.learning_rate
_prediction_opt = tf.train.AdamOptimizer(learning_rate=adaptive_lr)
prediction_opt_op = minimize_and_clip(_prediction_opt,
prediction_loss[scope],
var_list=tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope),
collect_summary=TF_SUMMARY
)
_reg_opt = tf.train.GradientDescentOptimizer(learning_rate=adaptive_lr)
reg_opt_op = minimize_and_clip(_reg_opt,
reg_loss[scope],
var_list=tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope),
collect_summary=TF_SUMMARY
)
dynamics_opt_op = [prediction_opt_op, reg_opt_op]
# Get variables and re-initializer.
_dynamics_adam_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope='adam_' + scope)
dynamics_adam_init = tf.variables_initializer(_dynamics_adam_vars)
logger.debug('num_%s_adam_variables %d' % (scope, len(_dynamics_adam_vars)))
return dynamics_opt_op, dynamics_adam_init, adaptive_lr
def get_policy_optimizer(scope, policy_cost, policy_opt_params, logger):
with tf.variable_scope('adam_' + scope):
policy_opt = tf.train.AdamOptimizer(learning_rate=policy_opt_params.learning_rate)
policy_opt_op = minimize_and_clip(
policy_opt,
policy_cost,
var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope),
clip_val=policy_opt_params.grad_norm_clipping,
collect_summary=TF_SUMMARY
)
# Debugging
policy_grads_and_vars = policy_opt.compute_gradients(
policy_cost,
var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
)
# Get variables and re-initializer.
policy_adam_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='adam_' + scope)
policy_adam_init = tf.variables_initializer(policy_adam_vars)
logger.debug('num_policy_adam_variables %d' % len(policy_adam_vars))
logger.info("Created policy opt operator.")
return policy_opt_op, policy_adam_init, policy_grads_and_vars
def create_perturb_policy_opts(policy_scope, shape):
'''
:return: flat weight update placeholder, and
collection of perturb weight operators
'''
flat_weight_update_ph = tf.placeholder(tf.float32, shape=shape)
weights = get_variables(scope=policy_scope, filter='/b:')
weights.extend(get_variables(scope=policy_scope, filter='/W:'))
weight_updates = unflatten_tensors(flat_weight_update_ph, weights)
opts = get_update_variable_opt(weight_updates, weights)
tf.add_to_collection('perturb_policy', flat_weight_update_ph)
for opt in opts:
tf.add_to_collection('perturb_policy', opt)
'''
This method is used for training dynamics and policy models.
The oracle option can be made in policy_opt_params.
The oracle mode will allow an access to true dynamics (oracle)
during policy optimization step (know when to stop).
So, this could be an upper bound on how stable could bootstrapping achieve.
'''
def train_models(env,
dynamics_model,
dynamics_opt_params,
get_regularizer_loss,
policy_model,
policy_opt_params,
rollout_params,
cost_np,
cost_np_vec,
cost_tf,
snapshot_dir,
working_dir,
n_models=1,
sweep_iters=10,
sample_size=1000,
verbose=False,
variant={},
**kwargs
):
n_states = env.observation_space.shape[0]
n_actions = env.action_space.shape[0]
u_max = env.action_space.high[0]
sess = tf.get_default_session()
assert (sess is not None)
logger = get_logger(__name__, snapshot_dir)
is_env_done = getattr(kwargs['inner_env'], 'is_done', None)
is_env_done_tf = getattr(kwargs['inner_env'], 'is_done_tf', None)
###############
# Build Graph #
###############
'''
Rollouts
'''
policy_scope = 'training_policy'
policy_in = tf.placeholder(tf.float32, shape=(None, n_states), name='policy_in')
policy_out = policy_model(policy_in)
tf.add_to_collection("policy_in", policy_in)
tf.add_to_collection("policy_out", policy_out)
'''
Dynamics Optimization
'''
n_dynamics_input = n_states + n_actions
dynamics_in = tf.placeholder(tf.float32,
shape=(None, n_dynamics_input),
name='dynamics_in')
dynamics_in_full = tf.placeholder(tf.float32,
shape=(None, n_models * n_dynamics_input),
name='dyanmics_in_full')
y_training_full = tf.placeholder(tf.float32,
shape=(None, n_models * n_states),
name='y_training_full')
# Ground-truth next states.
if policy_opt_params.mode == 'fourth_estimated':
model_scopes = ["training_dynamics", "validation_dynamics",
"second_validation_dynamics", "third_validation_dynamics"]
elif policy_opt_params.mode == 'third_estimated':
model_scopes = ["training_dynamics", "validation_dynamics", "second_validation_dynamics"]
elif policy_opt_params.mode == 'second_estimated':
model_scopes = ["training_dynamics", "validation_dynamics"]
elif policy_opt_params.mode == 'estimated' or policy_opt_params.mode == 'trpo_mean': #TODO: bad hacking
model_scopes = ["training_dynamics"]
else:
# assert 'real' == policy_opt_params.mode
model_scopes = ["training_dynamics"]
# model_scopes = ["training_dynamics", "validation_dynamics"]
dynamics_loss = {}
prediction_loss = {}
reg_loss = {}
dynamics_outs = {}
dynamics_losses = {}
for scope in model_scopes:
dynamics_loss[scope], prediction_loss[scope], reg_loss[scope], \
dynamics_outs[scope], dynamics_losses[scope] = \
build_dynamics_graph(scope,
dynamics_model,
dynamics_in,
dynamics_in_full,
y_training_full,
n_dynamics_input,
n_models,
get_regularizer_loss,
n_states,
logger)
for i in range(n_models):
tf.add_to_collection('%s_out' % scope, dynamics_outs[scope][i])
tf.add_to_collection('dynamics_in', dynamics_in)
# Adam optimizers
dynamics_opt_op = {}
dynamics_adam_init = []
lr_up = []
lr_ph = tf.placeholder(tf.float32, shape=())
for scope in model_scopes:
dynamics_opt_op[scope], _dynamics_adam_init, model_lr = \
get_dynamics_optimizer(scope,
prediction_loss,
reg_loss,
dynamics_opt_params,
logger)
lr_up.append(model_lr.assign(lr_ph))
dynamics_adam_init.append(_dynamics_adam_init)
logger.info("Created dynamics opt operator.")
# File writers
train_writer = tf.summary.FileWriter(os.path.join(snapshot_dir, 'tf_logs/train'), sess.graph)
val_writer = tf.summary.FileWriter(os.path.join(snapshot_dir, 'tf_logs/val'), sess.graph)
'''
Policy Optimization
'''
policy_training_init = tf.placeholder(tf.float32, shape=(None, n_states), name='x_optimizing')
policy_costs = {}
stochastic = tf.Variable(0.0, trainable=False)
set_stochastic = [stochastic.assign(0.0), stochastic.assign(1.0)]
for scope in model_scopes:
policy_costs[scope], n_saturates = build_policy_graph(policy_scope,
scope,
policy_training_init,
n_models,
policy_opt_params,
policy_model,
dynamics_model,
env,
cost_tf,
logger,
is_env_done_tf,
stochastic)
# Setting up for BPTT, TRPO, SVG, and L-BFGS
# TODO play with different training dynamics model.
training_policy_cost = tf.reduce_mean(policy_costs['training_dynamics'])
training_models = dynamics_outs['training_dynamics']
# mid = int(n_models/2)
# topk_values = tf.nn.top_k(policy_costs['training_dynamics'], mid+1)[0]
# if 2*mid==n_models:
# training_policy_cost = (topk_values[-1] + topk_values[-2])/2
# else:
# training_policy_cost = topk_values[-1]
if kwargs['algo_name'] == 'trpo' or kwargs['algo_name'] == 'vpg':
from envs.base import TfEnv
kwargs["rllab_algo"].env = TfEnv(NeuralNetEnv(env=env,
inner_env=kwargs['inner_env'],
cost_np=cost_np_vec,
dynamics_in=dynamics_in,
dynamics_outs=training_models,
sam_mode=policy_opt_params.sam_mode))
elif kwargs['algo_name'] == 'svg':
dynamics_configs = dict(scope_name='training_dynamics', variable_name='0')
cost_gradient, policy_gradient, model_gradient, theta_vars = \
setup_gradients(policy_model,
dynamics_model,
cost_tf,
sess,
kwargs['inner_env'],
dynamics_configs)
logger.info('Build SVG update graph.')
elif kwargs['algo_name'] == 'l-bfgs':
train_step = tf.contrib.opt.ScipyOptimizerInterface(
training_policy_cost,
var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='training_policy'),
method='L-BFGS-B'
)
tf.add_to_collection("l-bfgs_step", train_step)
if "bptt" in kwargs['algo_name']:
policy_opt_op, policy_adam_init, policy_grads_and_vars = \
get_policy_optimizer(policy_scope, training_policy_cost, policy_opt_params, logger)
else:
policy_opt_op = []
policy_adam_init = []
policy_grads_and_vars = []
'''
Prepare variables and data for learning
'''
# Initialize all variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
logger.info('Re-initialize policy.')
'''
Policy weights
'''
policy_weights = get_variables(scope=policy_scope, filter='/b:')
policy_weights.extend(get_variables(scope=policy_scope, filter='/W:'))
flat_policy_weights = flatten_tensors(policy_weights)
create_perturb_policy_opts(policy_scope, flat_policy_weights.shape)
############# Dynamics validation data ###############
dynamics_data = {}
dynamics_validation = {}
datapath = os.path.join(working_dir, rollout_params.datapath)
for scope in model_scopes:
dynamics_data[scope] = data_collection(max_size=rollout_params.training_data_size)
dynamics_validation[scope] = data_collection(max_size=rollout_params.validation_data_size)
if os.path.isfile(datapath) and rollout_params.load_rollout_data:
for scope in model_scopes:
with open(datapath, 'rb') as f:
training_data = pickle.load(f)
split_ratio = rollout_params.split_ratio
validation_size = round(training_data['dc'].n_data * split_ratio/(1.-split_ratio))
dynamics_data[scope].clone(training_data['dc'])
dynamics_validation[scope].clone(training_data['dc_valid'], validation_size)
logger.warning('Load data collections from %s.' % rollout_params.datapath)
else:
logger.warning('Start dynamics data and validation collection from scratch.')
############# Policy validation data (fixed) ###############
vip = os.path.join(working_dir, policy_opt_params.validation_init_path)
vrip = os.path.join(working_dir, policy_opt_params.validation_reset_init_path)
if os.path.isfile(vip) and os.path.isfile(vrip):
with open(vip, 'rb') as f:
policy_validation_init = pickle.load(f)
logger.info('Loaded policy val init state data from %s.' % vip)
with open(vrip, 'rb') as f:
policy_validation_reset_init = pickle.load(f)
logger.info('Loaded policy val reset init state data from %s.' % vrip)
elif vip == vrip:
# We know that reset is correct, e.g., swimmer.
policy_validation_init = [env.reset() for i in range(policy_opt_params.batch_size)]
policy_validation_reset_init = np.array(policy_validation_init)
with open(vip, 'wb') as f:
pickle.dump(policy_validation_init, f)
logger.info('Created %s contains policy validation initial state data.' % vip)
else:
# Make sure that the reset works with the representation.
# If not generate this manually.
policy_validation_init = []
policy_validation_reset_init = []
for i in range(policy_opt_params.batch_size):
init = env.reset()
if hasattr(env._wrapped_env, '_wrapped_env'):
inner_env = env._wrapped_env._wrapped_env
else:
inner_env = env._wrapped_env.env.unwrapped
reset_init = np.concatenate(
[inner_env.model.data.qpos[:, 0],
inner_env.model.data.qvel[:, 0]])
if hasattr(env._wrapped_env, '_wrapped_env'):
assert np.allclose(init, inner_env.reset(reset_init))
policy_validation_init.append(init)
policy_validation_reset_init.append(reset_init)
policy_validation_init = np.array(policy_validation_init)
policy_validation_reset_init = np.array(policy_validation_reset_init)
os.makedirs(os.path.dirname(vip), exist_ok=True)
os.makedirs(os.path.dirname(vrip), exist_ok=True)
with open(vip, 'wb') as f:
pickle.dump(policy_validation_init, f)
logger.info('Created %s contains policy validation initial state data.' % vip)
with open(vrip, 'wb') as f:
pickle.dump(policy_validation_reset_init, f)
logger.info('Created %s contains policy validation reset initial state data.' % vrip)
'''
Saver
'''
log_dir = os.path.join(snapshot_dir, 'training_logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
saver = tf.train.Saver(max_to_keep=1)
saver.save(sess, os.path.join(log_dir, 'policy-and-models-0.ckpt'))
# Model savers
dynamics_savers = {}
dynamics_vars = []
for scope in dynamics_data.keys():
dynamics_savers[scope] = []
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
dynamics_vars.extend(var_list)
# TODO:here assuming #validation models = #training models = n_models
for i in range(n_models):
vars_i = list(filter(lambda x: '%s/model%d' % (scope, i) in x.name, var_list))
dynamics_savers[scope].append(tf.train.Saver(vars_i))
logger.info('Dynamics saver %d in scope %s has %d variables.' % (i, scope, len(vars_i)))
logger.info('Total dynamics var %d' % len(dynamics_vars))
# Model initializers
dynamics_initializer = tf.variables_initializer(dynamics_vars)
# Model summaries
dynamics_summaries = {}
for scope in dynamics_data.keys():
dynamics_summaries[scope] = []
for i in range(n_models):
# TODO: Hack to save global model stats
# TODO: Hack adam_ + namescope is supposed to be global.
name_scope = "%s" % scope
#name_scope = "%s/model%d" % (scope, i)
merged = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES, scope=name_scope)+\
tf.get_collection(tf.GraphKeys.SUMMARIES, scope='adam_'+name_scope)
)
dynamics_summaries[scope].append(merged)
logger.info('Summaries merged.')
# Policy saver
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=policy_scope)
policy_saver = tf.train.Saver(var_list)
logger.info('Policy saver has %d variables.' % (len(var_list)))
logger.debug(''.join([var.name for var in var_list]))
# Policy summaries
policy_summary = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES, scope=policy_scope) +\
tf.get_collection(tf.GraphKeys.SUMMARIES, scope='adam_'+policy_scope)
)
###############
## Learning ###
###############
start_time = time.time()
count = 1
diff_weights = None
while True:
itr_start_time = time.time()
# Save params every iteration
joblib.dump(kwargs["saved_policy"], os.path.join(snapshot_dir, 'training_logs/params_%d.pkl' % count))
reinit_every = int(dynamics_opt_params.reinitialize)
if reinit_every <= 0 or count % reinit_every != 1:
reinitialize = False
else:
reinitialize = True
if count == 1:
reinitialize = True
# # Check if policy cost is computed consistently using tf and np.
# for i in range(n_models):
# test_policy_cost(policy_training_init,
# policy_costs["training_dynamics"][i],
# policy_in,
# policy_out,
# dynamics_in,
# dynamics_outs["training_dynamics"][i],
# env,
# cost_np_vec,
# sess,
# horizon=policy_opt_params.T,
# is_done=is_env_done)
# Policy Rollout
logger.info('\n\nPolicy Rollout and Cost Estimation: %d' % (count))
rollout_data = collect_data(env,
sample_size,
dynamics_data,
dynamics_validation,
policy_in,
policy_out,
cost_np, # for debugging
sess,
diff_weights, # for param_noise exploration
policy_saver, # for param_noise exploration
log_dir, # for param_noise exploration
rollout_params,
count,
logger,
is_env_done,
kwargs['input_rms'],
kwargs['diff_rms'])
rllab_logger.record_tabular('collect_data_time', time.time() - itr_start_time)
current_time = time.time()
# Dynamics Optimization
logger.info('\n\nDynamics Optimization: %d' % (count))
dynamics_learning_logs = optimize_models(dynamics_data,
dynamics_validation,
dynamics_opt_op,
dynamics_opt_params,
dynamics_adam_init,
dynamics_loss, # for debugging
dynamics_losses,
prediction_loss, # for debugging
dynamics_in_full,
y_training_full,
sess,
verbose,
dynamics_savers, log_dir, logger,
n_models,
dynamics_initializer,
dynamics_summaries,
train_writer,
val_writer,
lr_up,
lr_ph,
reinitialize)
rllab_logger.record_tabular('model_opt_time', time.time() - current_time)
current_time = time.time()
# # Evaluate training dynamics model
# logger.info('\n\nEvaluate training dynamics model: %d' % (count))
# # Compute model prediction errors T steps ahead.
# errors = evaluate_model_predictions(env,
# policy_in,
# policy_out,
# dynamics_in,
# avg_prediction['training_dynamics'],
# policy_validation_reset_init,
# sess,
# log_dir,
# count,
# max_timestep=policy_opt_params.T,
# cost_np_vec=cost_np_vec)
# # Compute an error distribution.
# errors = get_error_distribution(policy_in,
# policy_out,
# dynamics_in,
# training_models[0],
# env,
# cost_np_vec,
# sess,
# logger,
# log_dir,
# count,
# horizon=policy_opt_params.T,
# sample_size=50,
# known_actions=False,
# is_plot=True #(kwargs['mode'] == 'local')
# )
# rllab_logger.record_tabular('eval_model_time', time.time() - current_time)
# current_time = time.time()
# 1-SVG update
if kwargs["algo_name"] == "svg":
svg_args = dict(rollout_data=rollout_data,
policy_gradient=policy_gradient,
model_gradient=model_gradient,
cost_gradient=cost_gradient,
lr=policy_opt_params.learning_rate,
theta_vars=theta_vars)
else:
svg_args = {}
# Policy Optimization
logger.info('\n\nPolicy Optimization: %d' % (count))
# Get weights before update
assert len(get_variables(scope=policy_scope, filter='weights')) == 0
old_policy_weights = sess.run(flat_policy_weights)
policy_learning_logs = optimize_policy(env,
policy_opt_op,
policy_opt_params,
policy_costs,
set_stochastic,
n_saturates,
training_policy_cost, # for training
policy_adam_init,
policy_in, # for oracle
policy_out, # for oracle
cost_np_vec, # for oracle
sess,
policy_training_init,
policy_validation_init,
policy_validation_reset_init,
policy_grads_and_vars,
verbose,
policy_saver, log_dir, logger,
dynamics_data['training_dynamics'],
svg_args,
policy_summary,
train_writer,
val_writer,
**kwargs)
new_policy_weights = sess.run(flat_policy_weights)
rllab_logger.record_tabular('policy_opt_time', time.time() - current_time)
current_time = time.time()
######################
## Save every sweep ##
######################
if (np.abs(new_policy_weights - old_policy_weights) > 0).any():
diff_weights = np.abs(new_policy_weights - old_policy_weights)
if not diff_weights is None:
rllab_logger.record_tabular('MaxPolicyWeightDiff', np.amax(diff_weights))
rllab_logger.record_tabular('MinPolicyWeightDiff', np.amin(diff_weights))
rllab_logger.record_tabular('AvgPolicyWeightDiff', np.mean(diff_weights))
else:
rllab_logger.record_tabular('MaxPolicyWeightDiff', 0)
rllab_logger.record_tabular('MinPolicyWeightDiff', 0)
rllab_logger.record_tabular('AvgPolicyWeightDiff', 0)
# # Save errors
# error_log_path = os.path.join(log_dir, 'errors_%d' % count)
# with open(error_log_path, 'wb') as f:
# pickle.dump(errors, f)
# logger.info('\tSave errors to %s.' % error_log_path)
policy_learning_log_path = os.path.join(log_dir, 'policy_learning_sweep_%d.pkl' % (count))
with open(policy_learning_log_path, 'wb') as f:
pickle.dump(policy_learning_logs, f)
logger.info('\tSave policy training and test results to %s.' % policy_learning_log_path)
dynamics_learning_log_path = os.path.join(log_dir, 'dynamics_learning_sweep_%d.pkl' % (count))
with open(dynamics_learning_log_path, 'wb') as f:
pickle.dump(dynamics_learning_logs, f)
logger.info('\tSave dynamics training and test results to %s.' % dynamics_learning_log_path)
# Save meta for the final model.
ckpt_path = os.path.join(log_dir, 'policy-and-models-%d.ckpt' % count)
saver.save(sess, ckpt_path)
rllab_logger.record_tabular('save_and_log_time', time.time() - current_time)
rllab_logger.record_tabular('Time', time.time() - start_time)
rllab_logger.record_tabular('ItrTime', time.time() - itr_start_time)
rllab_logger.dump_tabular()
######################
# Prepare next sweep #
######################
ask_to_run_more = 'mode' in variant and variant['mode'] == 'local'
count += 1
if count > sweep_iters:
if ask_to_run_more:
response = input('Do you want to run 5 more?\n')
if response in {'yes', 'y', 'Y', 'Yes'}:
sweep_iters += 5
elif str.isdigit(response):
sweep_iters += int(response)
else:
break
else:
break
# Save meta for the final model.
ckpt_path = os.path.join(log_dir, 'policy-and-models-final.ckpt')
saver.save(sess, ckpt_path)
return sess
def collect_data(env,
sample_size,
dynamics_data,
dynamics_validation,
policy_in,
policy_out,
cost_np, # for debugging
sess,
diff_weights,
saver,
log_dir,
rollout_params,
count,
logger,
is_env_done,
input_rms,
output_rms):
if sample_size == 0:
return []
Os, As, Rs, info = sample_trajectories(env,
policy_in,
policy_out,
rollout_params.exploration,
sample_size,
saver,
diff_weights,
log_dir,
logger,
is_monitored=rollout_params.is_monitored,
monitorpath=rollout_params.monitorpath + '/iter_%d' % count,
sess=sess,
max_timestep=rollout_params.max_timestep,
render_every=rollout_params.render_every,
cost_np=cost_np,
is_done=is_env_done)
# Get all x and y pairs.
x_all = []
y_all = []
# TODO: for svg only can remove if no longer needed.
rollout_data = []
for i, o in enumerate(Os):
a = As[i]
triplets = []
for t in range(len(o) - 1):
x_all.append(np.concatenate([o[t], a[t]]))
y_all.append(o[t + 1])
triplets.append((o[t], a[t], o[t + 1]))
rollout_data.append(triplets)
x_all = np.array(x_all)
y_all = np.array(y_all)
# Save data
with open(os.path.join(log_dir, 'new_rollouts_%d.pkl' % count), 'wb') as f:
pickle.dump((x_all, y_all), f)
logger.info('Saved new rollouts data')
indices = list(range(len(x_all)))
# Splitting data into collections.
if rollout_params.splitting_mode == "trajectory":
pass
elif rollout_params.splitting_mode == "triplet":
# Randomly permute data before added
np.random.shuffle(indices)
else:
assert (False)
cur_i = 0
assert len(x_all) >= sample_size
total_sample_size = len(x_all)
for scope in dynamics_data.keys():
if rollout_params.use_same_dataset:
_n = round(rollout_params.split_ratio * total_sample_size)
dynamics_validation[scope].add_data(x_all[indices[:_n], :],
y_all[indices[:_n], :])
dynamics_data[scope].add_data(x_all[indices[_n:], :],
y_all[indices[_n:], :])
cur_i = len(indices)
# Update running mean and std.
input_rms.update(x_all[indices[_n:], :])
output_rms.update(y_all[indices[_n:], :] - x_all[indices[_n:], :y_all.shape[1]])
else:
_n = int(rollout_params.split_ratio * total_sample_size /
len(dynamics_data.keys()))
dynamics_validation[scope].add_data(x_all[indices[cur_i:cur_i + _n], :],
y_all[indices[cur_i:cur_i + _n], :])
cur_i += _n
_m = int(total_sample_size / len(dynamics_data.keys()) - _n)
dynamics_data[scope].add_data(
x_all[indices[cur_i:cur_i + _m], :],
y_all[indices[cur_i:cur_i + _m], :]
)
cur_i += _m
logger.info('current %s dynamics_data size : %d' %
(scope, dynamics_data[scope].get_num_data()))
logger.info('current %s dynamics_validation size : %d' %
(scope, dynamics_validation[scope].get_num_data()))
assert (cur_i == total_sample_size)
# if rollout_params.splitting_mode == "trajectory":
# for i in range(total_sample_size):
# if i % rollout_params.max_timestep != rollout_params.max_timestep - 1:
# assert (x_all[indices[i] + 1, 0] == y_all[indices[i], 0])
return rollout_data
# Baseline - predict using previous state.
def compute_baseline_loss(x_batch, y_batch, n_models):
return n_models * np.sum(
np.square(
y_batch - x_batch[:, :y_batch.shape[1]]
)
) / y_batch.shape[0]
def assign_lr(lr_up, lr_ph, value_to_assign, sess):
sess.run(lr_up, feed_dict={lr_ph: value_to_assign})
def recover_weights(savers, recover_indices, scope, logger, log_dir, sess):
logger.info(
'Recover back best weights for each model.'
)
n_models = len(savers[scope])
for i in range(n_models):
savers[scope][i].restore(sess, os.path.join(log_dir, '%s_%d.ckpt' % (scope, i)))
logger.debug('Restore %s %d from iter %d' % (scope, i, recover_indices[i]))
def optimize_models(dynamics_data,
dynamics_validation,
dynamics_opt_op,
dynamics_opt_params,
dynamics_adam_init,
dynamics_loss, # for debugging
dynamics_losses,
prediction_loss,
dynamics_in_full,
y_training_full,
sess,
verbose,
savers,
log_dir,
logger,
n_models,
dynamics_initializer,
dynamics_summaries,
train_writer,
val_writer,
lr_up,
lr_ph,
reinitialize):
## Re-initialize Adam parameters.
lr = dynamics_opt_params.learning_rate
if reinitialize:
assign_lr(lr_up, lr_ph, lr["scratch"], sess)
sess.run([dynamics_adam_init, dynamics_initializer])
logger.info('Reinitialize dynamics models & '
'Adam Optimizer & '
'update the learning rate to %f.' %
lr["scratch"])
else:
assign_lr(lr_up, lr_ph, lr["refine"], sess)
sess.run(dynamics_adam_init)
logger.info('Reinitialize Adam Optimizer & '
'update the learning rate to %f.' %
lr["refine"])
batch_size = dynamics_opt_params.batch_size
training_losses = dict([(scope, []) for scope in dynamics_data.keys()])
validation_losses = dict([(scope, []) for scope in dynamics_data.keys()])
best_js = dict([(scope, 0) for scope in dynamics_data.keys()])
for scope in dynamics_data.keys():
## Save all weights before training
for i in range(n_models):
savers[scope][i].save(sess,
os.path.join(log_dir, '%s_%d.ckpt' % (scope, i)),
write_meta_graph=False)
logger.info('Saved all initial weights in %s.' % scope)
## Don't recompute validation input
x_batch_val = np.tile(dynamics_validation[scope].x, n_models)
y_batch_val = np.tile(dynamics_validation[scope].y, n_models)
logger.info('Model %s' % scope)
## Initialize min validation loss
min_sum_validation_loss, min_validation_losses = sess.run(
[dynamics_loss[scope], dynamics_losses[scope]],
feed_dict={
dynamics_in_full: x_batch_val,
y_training_full: y_batch_val
}
)
min_validation_losses = np.array(min_validation_losses)
log_losses(logger,
0,
min_sum_validation_loss,
min_validation_losses)
recover_indices = np.zeros(n_models)
refine_idx = -1
iter_const = dynamics_data[scope].n_data / batch_size
max_iters = int(dynamics_opt_params.max_passes * iter_const)
log_every = int(dynamics_opt_params.log_every * iter_const)
num_iters_threshold = int(dynamics_opt_params.num_passes_threshold * iter_const)
for j in range(1, max_iters + 1):
# Training
if dynamics_opt_params.sample_mode == 'next_batch':
x_batch, y_batch = dynamics_data[scope].get_next_batch(batch_size * n_models, is_shuffled=False)
else:
assert dynamics_opt_params.sample_mode == 'random'
x_batch, y_batch = dynamics_data[scope].sample(batch_size * n_models)
_, training_loss = sess.run([dynamics_opt_op[scope], dynamics_loss[scope]],
feed_dict={
dynamics_in_full: np.reshape(x_batch, (batch_size, -1)),
y_training_full: np.reshape(y_batch, (batch_size, -1))
})
# Validation and Logging
if j % log_every == 0:
training_losses[scope].append(training_loss)
### TODO: Now only get the summary of the first model in each scope.
validation_loss, _validation_losses, _prediction_loss = sess.run(
[dynamics_loss[scope],
dynamics_losses[scope],
prediction_loss[scope]],
feed_dict={dynamics_in_full: x_batch_val,
y_training_full: y_batch_val}
)
_validation_losses = np.array(_validation_losses)
validation_losses[scope].append(validation_loss)
log_losses(logger,
j,
validation_loss,
_validation_losses,
training_loss=training_loss,
prediction_loss=_prediction_loss)
# Save the model when each validation cost reaches minimum.
# Save best_j for best total loss.
if min_sum_validation_loss > validation_loss:
min_sum_validation_loss = validation_loss
best_js[scope] = j
to_update = min_validation_losses > _validation_losses
min_validation_losses[to_update] = _validation_losses[to_update]
for i, is_updated in enumerate(to_update):
if is_updated:
savers[scope][i].save(sess,
os.path.join(log_dir, '%s_%d.ckpt' % (scope, i)),
write_meta_graph=False)
if verbose:
logger.debug('Saved %s %d' % (scope, i))
recover_indices[i] = j
logger.info('Saved %d models.' % np.sum(to_update))
# # Break if the cost is going up, and recover to the ckpt.
# if dynamics_opt_params.stop_critereon(min_validation_loss, validation_loss) or \
# j == dynamics_opt_params.max_iters:
# logger.info('\tStop at iter %d.' % (j))
# logger.info('\t\tAfter update validation loss increased from min %.3f to %.3f' % (
# min_validation_loss, validation_loss))
#
# logger.info(
# '\t\tRecover back to iter %d from %s.' %
# (best_j[scope], tf.train.latest_checkpoint(log_dir)) <-- TO FIX
# )
# saver.restore(sess, tf.train.latest_checkpoint(log_dir)) <-- TO FIX
# break
if j - max(np.amax(recover_indices), refine_idx) \
>= num_iters_threshold:
if reinitialize and refine_idx < 0 and lr["scratch"] > lr["refine"]:
recover_weights(savers, recover_indices, scope, logger, log_dir, sess)
logger.info('\nFinished training with lr = %f. Now, reduce to %f\n' %
(lr["scratch"], lr["refine"]))
assign_lr(lr_up, lr_ph, lr["refine"], sess)
refine_idx = j
continue
break
## After updates go to minimum
recover_weights(savers, recover_indices, scope, logger, log_dir, sess)
assert ('dynamics' in scope)
rllab_logger.record_tabular('# model updates', j)
rllab_logger.record_tabular('%s_min_sum_validation_loss' % scope,
min_sum_validation_loss)
# Save summary
if TF_SUMMARY:
summary = sess.run(
dynamics_summaries[scope][0],
feed_dict={dynamics_in_full: x_batch_val[:5],
y_training_full: y_batch_val[:5]}
)
val_writer.add_summary(summary, j)
return {'dynamics_opt_params': get_pickeable(dynamics_opt_params),
'training_losses': training_losses,
'validation_losses': validation_losses,
'best_index': best_js}
def log_losses(logger,
index,
validation_loss,
validation_losses,
first_n=5,
training_loss=None,
compute_baseline=False,
**kwargs):
assert isinstance(validation_losses, np.ndarray)
msg = np.array_str(validation_losses[:first_n], max_line_width=50, precision=2)
if index == 0:
logger.info('iter 0 (no update yet)')
else:
logger.info('iter %d' % index)
logger.info('\ttraining_loss \t: %f' % training_loss)
logger.info('\tvalidation_loss \t: %f' % validation_loss)
logger.info('\tvalidation_losses \t: %s' % msg)
if 'prediction_loss' in kwargs:
logger.info('\tprediction_loss \t: %s' % kwargs['prediction_loss'])
if compute_baseline:
logger.info(
'\tbaseline_validation_loss : %.3f' %
compute_baseline_loss(kwargs['x_batch'],
kwargs['y_batch'],
kwargs['n_models']))
def optimize_policy(env,
policy_opt_op,
policy_opt_params,
policy_costs,
set_stochastic,
sym_n_saturates,
training_policy_cost,
policy_adam_init,
policy_in, # for oracle
policy_out, # for oracle
cost_np_vec, # for oracle
sess,
policy_training_init,
policy_validation_init,
policy_validation_reset_init,
policy_grads_and_vars, # for debugging
verbose,
saver, log_dir, logger,
training_dynamics_data,
svg_args,
policy_summary,
train_writer,
val_writer,
**kwargs):
mode_order = ['real',
'estimated',
'second_estimated',
'third_estimated',
'fourth_estimated',
]
scope2mode = {'training_dynamics': 'estimated',
'validation_dynamics': 'second_estimated',
'second_validation_dynamics': 'third_estimated',
'third_validation_dynamics': 'fourth_estimated'}
batch_size = policy_opt_params.batch_size
### Re-initialize Adam parameters.
if 'reset_opt' in kwargs:
sess.run([policy_adam_init, kwargs['reset_opt']])
logger.info('Reinitialize Adam Optimizer and init_std.')
else:
sess.run(policy_adam_init)
logger.info('Reinitialize Adam Optimizer')
### Save all weights before updates
saver.save(sess,
os.path.join(log_dir, 'policy.ckpt'),
write_meta_graph=False)
logger.info('Saved policy weights before update.')
### Estimated cost for T steps prediction cost.
training_costs = []
estimated_validation_costs = {}
real_validation_costs = []
trpo_mean_costs = []
### Current minimums contain real and estimated validation costs
'''
Here is an example of min_validation costs
{
'real':1.0,
'estimated':4.4,
'second_estimated':[1.1, 3.2, 5., 0.3, 1.4 ], (length = n_models)
'sum': 2.0 (avg of second_estimated)
}
'''
min_validation_costs = {}
min_validation_costs['real'] = evaluate_fixed_init_trajectories(
env,
policy_in,
policy_out,
policy_validation_reset_init,
cost_np_vec, sess,
max_timestep=policy_opt_params.oracle_maxtimestep,
gamma=policy_opt_params.gamma
)
min_validation_costs['trpo_mean'] = np.inf
for scope in policy_costs.keys():
mode = scope2mode[scope]
min_validation_costs[mode] = np.array(sess.run(
policy_costs[scope],
feed_dict={policy_training_init: policy_validation_init}
))
best_index = 0
real_current_validation_cost = min_validation_costs['real']
logger.info('iter 0 (no update yet)')
log_dictionary(mode_order, min_validation_costs, min_validation_costs, logger)
candidates = {}
for j in range(1, policy_opt_params.max_iters + 1):
### Train policy
if kwargs['algo_name'] == 'trpo' or kwargs['algo_name']=='vpg':
algo = kwargs['rllab_algo']
algo.start_worker()
with rllab_logger.prefix('itr #%d | ' % j):
paths = algo.obtain_samples(j)
samples_data = algo.process_samples(j, paths)
algo.optimize_policy(j, samples_data)
training_cost = 0
elif kwargs['algo_name'] == 'bptt':
# Sample new initial states for computing gradients.
# x_batch = training_dynamics_data.sample(batch_size)[1]
x_batch = np.array([env.reset() for i in range(batch_size)])
_, training_cost = sess.run([policy_opt_op, training_policy_cost],
feed_dict={policy_training_init: x_batch})
training_cost = np.squeeze(training_cost)
elif kwargs['algo_name'] == 'bptt-stochastic':
x_batch = np.array([env.reset() for i in range(batch_size)])
# TODO: remove sym_n_saturates.
_, _, training_cost, n_saturates = sess.run([set_stochastic[1],
policy_opt_op,
training_policy_cost,
sym_n_saturates],
feed_dict={policy_training_init: x_batch})
training_cost = np.squeeze(training_cost)
elif kwargs['algo_name'] == 'l-bfgs':
train_step = tf.get_collection("l-bfgs_step")[0]
x_batch = np.array([env.reset() for i in range(batch_size)])
train_step.minimize(sess, feed_dict={policy_training_init: x_batch})
training_cost = sess.run(training_policy_cost,
feed_dict={policy_training_init: x_batch})
else:
assert kwargs['algo_name'] == 'svg'
grads = svg_update(**svg_args, sess=sess)
training_cost = 0
### Evaluate in learned and real dynamics
if j % policy_opt_params.log_every == 0:
# Set stochasticity back to 0.0.
if kwargs['algo_name'] == 'bptt-stochastic':
logger.debug('n_saturates: {}'.format(n_saturates[:5]))
entropy = 1./2.*np.sum(np.log(2*np.pi*np.e) + sess.run(kwargs['logstd']))
logger.debug('Entropy: %.3f' % entropy)
sess.run(set_stochastic[0])
# Compute TRPO mean if neccessary.
if kwargs["algo_name"] == "trpo":
algo = kwargs['rllab_algo']
if policy_opt_params.mode == 'trpo_mean':
determ_paths = algo.obtain_samples(j, determ=True)
traj_costs = []
for determ_path in determ_paths:
traj_costs.append(- np.sum(determ_path["rewards"]))
candidates['trpo_mean'] = np.mean(traj_costs)
if 'trpo_mean' != mode_order[1]:
mode_order.insert(1, 'trpo_mean')
else:
candidates['trpo_mean'] = 0.0
trpo_mean_costs.append(candidates['trpo_mean'])
else:
candidates['trpo_mean'] = 0.0
## Training cost
training_costs.append(training_cost)
## Estimated cost
for scope in policy_costs.keys():
mode = scope2mode[scope]
estimated_valid_cost = sess.run(policy_costs[scope],
feed_dict={policy_training_init: policy_validation_init}
)
estimated_valid_cost = np.array(estimated_valid_cost)
if mode in estimated_validation_costs:
estimated_validation_costs[mode].append(np.mean(estimated_valid_cost))
else:
estimated_validation_costs[mode] = [np.mean(estimated_valid_cost)]
candidates[mode] = estimated_valid_cost
## Real cost
real_validation_cost = evaluate_fixed_init_trajectories(
env,
policy_in,
policy_out,
policy_validation_reset_init,
cost_np_vec, sess,
max_timestep=policy_opt_params.oracle_maxtimestep,
gamma=policy_opt_params.gamma
)
real_validation_costs.append(real_validation_cost)
candidates['real'] = real_validation_cost
## Logging
logger.info('iter %d' % j)
logger.info('\ttraining_cost:\t%.3f' % training_cost)
log_dictionary(mode_order, candidates, min_validation_costs, logger)
if False: #verbose and kwargs['algo_name'] == 'bptt': # TODO: debug this
_policy_grads = sess.run([gv[0] for gv in policy_grads_and_vars],
feed_dict={policy_training_init: x_batch})
logger.debug('\t, policy_grads_max: {}'.format(
np.array([np.max(np.abs(grad)) for grad in _policy_grads])))
logger.debug('\t, policy_grads_norm: {}'.format(
np.array([np.linalg.norm(grad) for grad in _policy_grads])))
logger.debug('\t, policy_grads_avg: {}'.format(
np.array([np.mean(np.abs(grad)) for grad in _policy_grads])))
logger.debug('\t, policy_grads_min: {}'.format(
np.array([np.min(np.abs(grad)) for grad in _policy_grads])))
if kwargs['algo_name'] == 'svg':
is_broken = True
# break
## Not done - we update.
## Done - we go back and reduce std.
if not is_done(policy_opt_params, min_validation_costs, candidates, logger):
best_index = j
real_current_validation_cost = candidates['real']
# Save
logger.info('\tSaving policy')
saver.save(sess,
os.path.join(log_dir, 'policy.ckpt'),
write_meta_graph=False)
## Update - only when we save the update.
update_stats(min_validation_costs, candidates, policy_opt_params.whole)
## Stop
# If the number of consecutive dones is greater than the threshold
if j - best_index >= policy_opt_params.num_iters_threshold:
break
log_and_restore(sess,
log_dir,
j,
min_validation_costs,
candidates,
logger,
saver,
mode_order,
best_index,
policy_opt_params.mode)
if policy_opt_params.mode == 'one_model' or policy_opt_params.mode == 'no_early':
min_val_cost = min_validation_costs['estimated'][0]
else:
min_val_cost = np.mean(min_validation_costs[policy_opt_params.mode])
for key in min_validation_costs.keys():
rllab_logger.record_tabular('%s_policy_mean_min_validation_cost' % key,
np.mean(min_validation_costs[key]))
rllab_logger.record_tabular('real_current_validation_cost', real_current_validation_cost)
rllab_logger.record_tabular('# policy updates', best_index)
# Save summary
if TF_SUMMARY:
summary = sess.run(policy_summary,
feed_dict={policy_training_init: policy_validation_init}
)
val_writer.add_summary(summary, best_index)
return {'real_validation_costs': real_validation_costs,
'training_costs': training_costs,
'estimated_validation_costs': estimated_validation_costs,
'policy_opt_params': get_pickeable(policy_opt_params),
'best_index': best_index,
'best_cost': min_val_cost,
'trpo_mean_costs': trpo_mean_costs
}
def is_done(policy_opt_params, min_validation_costs, candidates, logger):
'''
When mode == 'real', we stop immediately if the cost increases.
When mode has 'estimated', we stop if one of the bucket has
the ratio of increasing costs exceeds a certain threshold
If one of the candidates are worst than min_validation_costs we stop.
'''
mode = policy_opt_params.mode
if mode == 'real':
# TODO:relax the constraint.
# return policy_opt_params.stop_critereon(min_validation_costs[mode],
# candidates[mode])
return min_validation_costs['real'] < candidates['real']
elif mode == 'trpo_mean':
assert 'trpo_mean' in min_validation_costs.keys()
return min_validation_costs['trpo_mean'] < candidates['trpo_mean']
elif mode == 'one_model':
return min_validation_costs['estimated'][0] < candidates['estimated'][0]
elif mode == 'no_early':
return False
else:
assert 'estimated' in mode
for _mode in min_validation_costs.keys():
if 'estimated' in _mode and \
policy_opt_params.stop_critereon(
min_validation_costs[_mode], # Input an array
candidates[_mode],
mode='vector'
):
logger.info('\t### %s tells us to stop.' % _mode)
return True
return False
def log_and_restore(sess,
log_dir,
index,
min_validation_costs,
candidates,
logger,
saver,
mode_order,
best_index,
mode):
'''
Recover back to the last saved checkpoint.
'''
logger.info('Stop at iter %d. Recover to iter %d.' % (index, best_index))
for _mode in mode_order:
if _mode in min_validation_costs:
_msg = '\t%.5s validation cost \t %.3f --> %.3f' % (
_mode,
np.mean(candidates[_mode]),
np.mean(min_validation_costs[_mode])
)
if _mode == mode:
_msg += ' ***'
logger.info(
_msg
)
saver.restore(sess, os.path.join(log_dir, 'policy.ckpt'))
def update_stats(min_validation_costs, candidates, whole=False):
'''
:param whole: if this is true, the whole candidates are set to
min_validation_costs. This means information in
the min validation costs are consistent (from the same iteration).
If this is false, we keep the min of each individual we have seen so far.
'''
for _mode in min_validation_costs.keys():
costs = min_validation_costs[_mode]
if hasattr(costs, '__iter__') and len(costs) != 1:
if whole:
min_validation_costs[_mode][:] = candidates[_mode][:]
else:
to_update = costs > candidates[_mode]
min_validation_costs[_mode][to_update] = candidates[_mode][to_update]
elif whole or costs > candidates[_mode]:
min_validation_costs[_mode] = candidates[_mode]
def log_dictionary(mode_order, validation_costs, min_validation_costs, logger, first_n=5):
for mode in mode_order:
if mode in validation_costs:
costs = validation_costs[mode]
if hasattr(costs, '__iter__'):
assert 'estimated' in mode
msg = np.array_str(costs[:first_n], max_line_width=50, precision=2)
logger.info('\t%.5s_validation_cost:\t%s' %
(mode, msg))
logger.info('\t\tavg=%.2f, increase_ratio=%.2f' % (
np.mean(costs),
np.mean(costs > min_validation_costs[mode])
))
logger.info('\t\tmode=%.2f, std=%.2f, min=%.2f, max=%.2f' %
(np.median(costs),
np.std(costs),
np.min(costs),
np.max(costs)))
else:
logger.info('\t%.5s_validation_cost:\t%.3f' %
(mode, costs))
```
|
{
"source": "JerrieYuan/DrawItforSublimeText",
"score": 2
}
|
#### File: JerrieYuan/DrawItforSublimeText/goclean.py
```python
import sublime, sublime_plugin, os
class GocleanCommand(sublime_plugin.TextCommand):
def run(self, edit):
spl = "/"
filename = self.view.file_name()
filename = filename.replace("\\",spl)
gosrcpath = os.path.join(os.getenv("GOPATH"),"src"+spl)
gosrcpath = gosrcpath.replace("\\",spl)
thispkg = filename.replace(gosrcpath,"")
pathlist = thispkg.split(spl)
num = len(pathlist)
thispkg = thispkg.replace(spl+pathlist[num-1],"")
thispkg = os.path.normpath(thispkg)
os.popen("go install "+ thispkg)
print("go clean: go install "+ thispkg)
```
|
{
"source": "JerrikEph/fastNLP",
"score": 3
}
|
#### File: fastNLP/core/field.py
```python
__all__ = [
"FieldArray",
"Padder",
"AutoPadder",
"EngChar2DPadder"
]
from copy import deepcopy
import numpy as np
class FieldArray(object):
"""
别名::class:`fastNLP.FieldArray` :class:`fastNLP.core.field.FieldArray`
FieldArray 是用于保存 :class:`~fastNLP.DataSet` 中一个field的类型。
:param str name: FieldArray的名称
:param list,numpy.ndarray content: 列表的元素可以为list,int,float,
:param bool is_target: 这个field是否是一个target field。
:param bool is_input: 这个field是否是一个input field。
:param padder: :class:`~fastNLP.Padder` 类型。赋值给fieldarray的padder的对象会被deepcopy一份,需要修改padder参数必须通过
fieldarray.set_pad_val()。默认为None,即使用 :class:`~fastNLP.AutoPadder` 。
:param bool ignore_type: 是否忽略该field的type,一般如果这个field不需要转为torch.FloatTensor或torch.LongTensor,
就可以设置为True。具体意义请参考 :class:`~fastNLP.DataSet` 。
"""
def __init__(self, name, content, is_target=None, is_input=None, padder=None, ignore_type=False):
self.name = name
if isinstance(content, list):
# 如果DataSet使用dict初始化, content 可能是二维list/二维array/三维list
# 如果DataSet使用list of Instance 初始化, content可能是 [list]/[array]/[2D list]
for idx, item in enumerate(content):
# 这是使用list of Instance 初始化时第一个样本:FieldArray(name, [field])
# 将[np.array] 转化为 list of list
# 也可以支持[array, array, array]的情况
if isinstance(item, np.ndarray):
content[idx] = content[idx].tolist()
elif isinstance(content, np.ndarray):
content = content.tolist() # convert np.ndarray into 2-D list
else:
raise TypeError("content in FieldArray can only be list or numpy.ndarray, got {}.".format(type(content)))
if len(content) == 0:
raise RuntimeError("Cannot initialize FieldArray with empty list.")
self.content = content # 1维 或 2维 或 3维 list, 形状可能不对齐
self.content_dim = None # 表示content是多少维的list
if padder is None:
padder = AutoPadder(pad_val=0)
else:
assert isinstance(padder, Padder), "padder must be of type Padder."
padder = deepcopy(padder)
self.set_padder(padder)
self.ignore_type = ignore_type
self.BASIC_TYPES = (int, float, str) # content中可接受的Python基本类型,这里没有np.array
self.pytype = None
self.dtype = None
self._is_input = None
self._is_target = None
if is_input is not None or is_target is not None:
self.is_input = is_input
self.is_target = is_target
def _set_dtype(self):
if self.ignore_type is False:
self.pytype = self._type_detection(self.content)
self.dtype = self._map_to_np_type(self.pytype)
@property
def is_input(self):
return self._is_input
@is_input.setter
def is_input(self, value):
"""
当 field_array.is_input = True / False 时被调用
"""
if value is True:
self._set_dtype()
self._is_input = value
@property
def is_target(self):
return self._is_target
@is_target.setter
def is_target(self, value):
"""
当 field_array.is_target = True / False 时被调用
"""
if value is True:
self._set_dtype()
self._is_target = value
def _type_detection(self, content):
"""
当该field被设置为is_input或者is_target时被调用
"""
if len(content) == 0:
raise RuntimeError("Empty list in Field {}.".format(self.name))
type_set = set([type(item) for item in content])
if list in type_set:
if len(type_set) > 1:
# list 跟 非list 混在一起
raise RuntimeError("Mixed data types in Field {}: {}".format(self.name, list(type_set)))
# >1维list
inner_type_set = set()
for l in content:
[inner_type_set.add(type(obj)) for obj in l]
if list not in inner_type_set:
# 二维list
self.content_dim = 2
return self._basic_type_detection(inner_type_set)
else:
if len(inner_type_set) == 1:
# >2维list
inner_inner_type_set = set()
for _2d_list in content:
for _1d_list in _2d_list:
[inner_inner_type_set.add(type(obj)) for obj in _1d_list]
if list in inner_inner_type_set:
raise RuntimeError("FieldArray cannot handle 4-D or more-D list.")
# 3维list
self.content_dim = 3
return self._basic_type_detection(inner_inner_type_set)
else:
# list 跟 非list 混在一起
raise RuntimeError("Mixed data types in Field {}: {}".format(self.name, list(inner_type_set)))
else:
# 一维list
for content_type in type_set:
if content_type not in self.BASIC_TYPES:
raise RuntimeError("Unexpected data type in Field '{}'. Expect one of {}. Got {}.".format(
self.name, self.BASIC_TYPES, content_type))
self.content_dim = 1
return self._basic_type_detection(type_set)
def _basic_type_detection(self, type_set):
"""
:param type_set: a set of Python types
:return: one of self.BASIC_TYPES
"""
if len(type_set) == 1:
return type_set.pop()
elif len(type_set) == 2:
# 有多个basic type; 可能需要up-cast
if float in type_set and int in type_set:
# up-cast int to float
return float
else:
# str 跟 int 或者 float 混在一起
raise RuntimeError("Mixed data types in Field {}: {}".format(self.name, list(type_set)))
else:
# str, int, float混在一起
raise RuntimeError("Mixed data types in Field {}: {}".format(self.name, list(type_set)))
def _1d_list_check(self, val):
"""如果不是1D list就报错
"""
type_set = set((type(obj) for obj in val))
if any(obj not in self.BASIC_TYPES for obj in type_set):
raise ValueError("Mixed data types in Field {}: {}".format(self.name, list(type_set)))
self._basic_type_detection(type_set)
# otherwise: _basic_type_detection will raise error
return True
def _2d_list_check(self, val):
"""如果不是2D list 就报错
"""
type_set = set(type(obj) for obj in val)
if list(type_set) != [list]:
raise ValueError("Mixed data types in Field {}: {}".format(self.name, type_set))
inner_type_set = set()
for l in val:
for obj in l:
inner_type_set.add(type(obj))
self._basic_type_detection(inner_type_set)
return True
@staticmethod
def _map_to_np_type(basic_type):
type_mapping = {int: np.int64, float: np.float64, str: np.str, np.ndarray: np.ndarray}
return type_mapping[basic_type]
def __repr__(self):
return "FieldArray {}: {}".format(self.name, self.content.__repr__())
def append(self, val):
"""将val append到这个field的尾部。如果这个field已经被设置为input或者target,则在append之前会检查该类型是否与已有
的内容是匹配的。
:param Any val: 需要append的值。
"""
if self.ignore_type is False:
if isinstance(val, list):
pass
elif isinstance(val, tuple): # 确保最外层是list
val = list(val)
elif isinstance(val, np.ndarray):
val = val.tolist()
elif any((isinstance(val, t) for t in self.BASIC_TYPES)):
pass
else:
raise RuntimeError(
"Unexpected data type {}. Should be list, np.array, or {}".format(type(val), self.BASIC_TYPES))
if self.is_input is True or self.is_target is True:
if type(val) == list:
if len(val) == 0:
raise ValueError("Cannot append an empty list.")
if self.content_dim == 2 and self._1d_list_check(val):
# 1维list检查
pass
elif self.content_dim == 3 and self._2d_list_check(val):
# 2维list检查
pass
else:
raise RuntimeError(
"Dimension not matched: expect dim={}, got {}.".format(self.content_dim - 1, val))
elif type(val) in self.BASIC_TYPES and self.content_dim == 1:
# scalar检查
if type(val) == float and self.pytype == int:
self.pytype = float
self.dtype = self._map_to_np_type(self.pytype)
else:
raise RuntimeError(
"Unexpected data type {}. Should be list, np.array, or {}".format(type(val), self.BASIC_TYPES))
self.content.append(val)
def __getitem__(self, indices):
return self.get(indices, pad=False)
def __setitem__(self, idx, val):
assert isinstance(idx, int)
self.content[idx] = val
def get(self, indices, pad=True):
"""
根据给定的indices返回内容
:param int,List[int] indices: 获取indices对应的内容。
:param bool pad: 是否对返回的结果进行padding。仅对indices为List[int]时有效
:return: 根据给定的indices返回的内容,可能是单个值或List
"""
if isinstance(indices, int):
return self.content[indices]
if self.is_input is False and self.is_target is False:
raise RuntimeError("Please specify either is_input or is_target is True for {}".format(self.name))
contents = [self.content[i] for i in indices]
if self.padder is None or pad is False:
return np.array(contents)
else:
return self.padder(contents, field_name=self.name, field_ele_dtype=self.dtype)
def set_padder(self, padder):
"""
设置padder,在这个field进行pad的时候用这个padder进行pad,如果为None则不进行pad。
:param padder: :class:`~fastNLP.Padder` 类型,设置为None即删除padder。
"""
if padder is not None:
assert isinstance(padder, Padder), "padder must be of type Padder."
self.padder = deepcopy(padder)
else:
self.padder = None
def set_pad_val(self, pad_val):
"""
修改padder的pad_val.
:param int pad_val: 该field的pad值设置为该值。
"""
if self.padder is not None:
self.padder.set_pad_val(pad_val)
return self
def __len__(self):
"""
Returns the size of FieldArray.
:return int length:
"""
return len(self.content)
def to(self, other):
"""
将other的属性复制给本FieldArray(other必须为FieldArray类型).
属性包括 is_input, is_target, padder, ignore_type
:param other: :class:`~fastNLP.FieldArray` 从哪个field拷贝属性
:return: :class:`~fastNLP.FieldArray`
"""
assert isinstance(other, FieldArray), "Only support FieldArray type, not {}.".format(type(other))
self.is_input = other.is_input
self.is_target = other.is_target
self.padder = other.padder
self.ignore_type = other.ignore_type
return self
def _is_iterable(content):
try:
_ = (e for e in content)
except TypeError:
return False
return True
class Padder:
"""
别名::class:`fastNLP.Padder` :class:`fastNLP.core.field.Padder`
所有padder都需要继承这个类,并覆盖__call__方法。
用于对batch进行padding操作。传入的element是inplace的,即直接修改element可能导致数据变化,建议inplace修改之前deepcopy一份。
.. py:function:: __call__(self, contents, field_name, field_ele_dtype):
传入的是List内容。假设有以下的DataSet。
:param list(Any) contents: 传入的element是inplace的,即直接修改element可能导致数据变化,建议inplace修改之前
deepcopy一份。
:param str, field_name: field的名称。
:param np.int64,np.float64,np.str,None, field_ele_dtype: 该field的内层元素的类型。如果该field的ignore_type为True,该这个值为None。
:return: np.array([padded_element])
"""
def __init__(self, pad_val=0, **kwargs):
self.pad_val = pad_val
def set_pad_val(self, pad_val):
self.pad_val = pad_val
def __call__(self, contents, field_name, field_ele_dtype):
"""
传入的是List内容。假设有以下的DataSet。
:param list(Any) contents: 传入的element是inplace的,即直接修改element可能导致数据变化,建议inplace修改之前
deepcopy一份。
:param str, field_name: field的名称。
:param np.int64,np.float64,np.str,None, field_ele_dtype: 该field的内层元素的类型。如果该field的ignore_type为True,该这个值为None。
:return: np.array([padded_element])
Example::
from fastNLP import DataSet
from fastNLP import Instance
dataset = DataSet()
dataset.append(Instance(sent='this is a demo', length=4,
chars=[['t', 'h', 'i', 's'], ['i', 's'], ['a'], ['d', 'e', 'm', 'o']]))
dataset.append(Instance(sent='another one', length=2,
chars=[['a', 'n', 'o', 't', 'h', 'e', 'r'], ['o', 'n', 'e']]))
如果调用
batch = dataset.get([0,1], pad=True)
sent这个field的padder的__call__会接收到的内容会是
[
'this is a demo',
'another one'
]
length这个field的padder的__call__会接收到的内容会是
[4, 2]
chars这个field的padder的__call__会接收到的内容会是
[
[['t', 'h', 'i', 's'], ['i', 's'], ['a'], ['d', 'e', 'm', 'o']],
[['a', 'n', 'o', 't', 'h', 'e', 'r'], ['o', 'n', 'e']]
]
即把每个instance中某个field的内容合成一个List传入
"""
raise NotImplementedError
class AutoPadder(Padder):
"""
别名::class:`fastNLP.AutoPadder` :class:`fastNLP.core.field.AutoPadder`
根据contents的数据自动判定是否需要做padding。
1 如果元素类型(元素类型是指field中最里层元素的数据类型, 可以通过FieldArray.dtype查看,比如['This', 'is', ...]的元素类
型为np.str, [[1,2], ...]的元素类型为np.int64)的数据不为(np.int64, np.float64)则不会进行pad
2 如果元素类型为(np.int64, np.float64),
2.1 如果该field的内容为(np.int64, np.float64),比如为seq_len, 则不进行padding
2.2 如果该field的内容为List, 那么会将Batch中的List pad为一样长。若该List下还有里层的List需要padding,请使用其它padder。
即如果Instance中field形如[1, 2, 3, ...],则可以pad;若为[[1,2], [3,4, ...]]则不能进行pad
"""
def __init__(self, pad_val=0):
"""
:param pad_val: int, padding的位置使用该index
"""
super().__init__(pad_val=pad_val)
def _is_two_dimension(self, contents):
"""
判断contents是不是只有两个维度。[[1,2], [3]]是两个维度. [[[1,2], [3, 4, 5]], [[4,5]]]有三个维度
:param contents:
:return:
"""
value = contents[0]
if isinstance(value, (np.ndarray, list)):
value = value[0]
if isinstance(value, (np.ndarray, list)):
return False
return True
return False
def __call__(self, contents, field_name, field_ele_dtype):
if not _is_iterable(contents[0]):
array = np.array([content for content in contents], dtype=field_ele_dtype)
elif field_ele_dtype in (np.int64, np.float64) and self._is_two_dimension(contents):
max_len = max([len(content) for content in contents])
array = np.full((len(contents), max_len), self.pad_val, dtype=field_ele_dtype)
for i, content in enumerate(contents):
array[i][:len(content)] = content
elif field_ele_dtype is None:
array = np.array(contents) # 当ignore_type=True时,直接返回contents
else: # should only be str
array = np.array([content for content in contents])
return array
class EngChar2DPadder(Padder):
"""
别名::class:`fastNLP.EngChar2DPadder` :class:`fastNLP.core.field.EngChar2DPadder`
用于为英语执行character级别的2D padding操作。对应的field内容应该类似[['T', 'h', 'i', 's'], ['a'], ['d', 'e', 'm', 'o']],
但这个Padder只能处理index为int的情况。
padded过后的batch内容,形状为(batch_size, max_sentence_length, max_word_length). max_sentence_length为这个batch中最大句
子长度;max_word_length为这个batch中最长的word的长度::
from fastNLP import DataSet
from fastNLP import EngChar2DPadder
from fastNLP import Vocabulary
dataset = DataSet({'sent': ['This is the first demo', 'This is the second demo']})
dataset.apply(lambda ins:[list(word) for word in ins['sent'].split()], new_field_name='chars')
vocab = Vocabulary()
vocab.from_dataset(dataset, field_name='chars')
vocab.index_dataset(dataset, field_name='chars')
dataset.set_input('chars')
padder = EngChar2DPadder()
dataset.set_padder('chars', padder) # chars这个field的设置为了EnChar2DPadder
"""
def __init__(self, pad_val=0, pad_length=0):
"""
:param pad_val: int, pad的位置使用该index
:param pad_length: int, 如果为0则取一个batch中最大的单词长度作为padding长度。如果为大于0的数,则将所有单词的长度
都pad或截取到该长度.
"""
super().__init__(pad_val=pad_val)
self.pad_length = pad_length
def _exactly_three_dims(self, contents, field_name):
"""
检查传入的contents是否刚好是3维,如果不是3维就报错。理论上,第一个维度是batch,第二个维度是word,第三个维度是character
:param contents:
:param field_name: str
:return:
"""
if not isinstance(contents, list):
raise TypeError("contents should be a list, not {}.".format(type(contents)))
value = contents[0]
try:
value = value[0]
except:
raise ValueError("Field:{} only has one dimension.".format(field_name))
try:
value = value[0]
except:
raise ValueError("Field:{} only has two dimensions.".format(field_name))
if _is_iterable(value):
raise ValueError("Field:{} has more than 3 dimension.".format(field_name))
def __call__(self, contents, field_name, field_ele_dtype):
"""
期望输入类似于
[
[[0, 2], [2, 3, 4], ..],
[[9, 8, 2, 4], [1, 2,], ...],
....
]
:param contents:
:param field_name:
:param field_ele_dtype
:return:
"""
if field_ele_dtype not in (np.int64, np.float64):
raise TypeError('dtype of Field:{} should be np.int64 or np.float64 to do 2D padding, get {}.'.format(
field_name, field_ele_dtype
))
self._exactly_three_dims(contents, field_name)
if self.pad_length < 1:
max_char_length = max([max(len(char_lst) for char_lst in word_lst) for word_lst in contents])
else:
max_char_length = self.pad_length
max_sent_length = max(len(word_lst) for word_lst in contents)
batch_size = len(contents)
dtype = type(contents[0][0][0])
padded_array = np.full((batch_size, max_sent_length, max_char_length), fill_value=self.pad_val,
dtype=dtype)
for b_idx, word_lst in enumerate(contents):
for c_idx, char_lst in enumerate(word_lst):
chars = char_lst[:max_char_length]
padded_array[b_idx, c_idx, :len(chars)] = chars
return padded_array
```
#### File: fastNLP/core/vocabulary.py
```python
__all__ = [
"Vocabulary",
"VocabularyOption",
]
from functools import wraps
from collections import Counter
from .dataset import DataSet
from .utils import Option
class VocabularyOption(Option):
def __init__(self,
max_size=None,
min_freq=None,
padding='<pad>',
unknown='<unk>'):
super().__init__(
max_size=max_size,
min_freq=min_freq,
padding=padding,
unknown=unknown
)
def _check_build_vocab(func):
"""A decorator to make sure the indexing is built before used.
"""
@wraps(func) # to solve missing docstring
def _wrapper(self, *args, **kwargs):
if self.word2idx is None or self.rebuild is True:
self.build_vocab()
return func(self, *args, **kwargs)
return _wrapper
def _check_build_status(func):
"""A decorator to check whether the vocabulary updates after the last build.
"""
@wraps(func) # to solve missing docstring
def _wrapper(self, *args, **kwargs):
if self.rebuild is False:
self.rebuild = True
if self.max_size is not None and len(self.word_count) >= self.max_size:
print("[Warning] Vocabulary has reached the max size {} when calling {} method. "
"Adding more words may cause unexpected behaviour of Vocabulary. ".format(
self.max_size, func.__name__))
return func(self, *args, **kwargs)
return _wrapper
class Vocabulary(object):
"""
别名::class:`fastNLP.Vocabulary` :class:`fastNLP.core.vocabulary.Vocabulary`
用于构建, 存储和使用 `str` 到 `int` 的一一映射::
vocab = Vocabulary()
word_list = "this is a word list".split()
vocab.update(word_list)
vocab["word"] # str to int
vocab.to_word(5) # int to str
:param int max_size: `Vocabulary` 的最大大小, 即能存储词的最大数量
若为 ``None`` , 则不限制大小. Default: ``None``
:param int min_freq: 能被记录下的词在文本中的最小出现频率, 应大于或等于 1.
若小于该频率, 词语将被视为 `unknown`. 若为 ``None`` , 所有文本中的词都被记录. Default: ``None``
:param str optional padding: padding的字符. 如果设置为 ``None`` ,
则vocabulary中不考虑padding, 也不计入词表大小,为 ``None`` 的情况多在为label建立Vocabulary的情况.
Default: '<pad>'
:param str optional unknown: unknown的字符,所有未被记录的词在转为 `int` 时将被视为unknown.
如果设置为 ``None`` ,则vocabulary中不考虑unknow, 也不计入词表大小.
为 ``None`` 的情况多在为label建立Vocabulary的情况.
Default: '<unk>'
"""
def __init__(self, max_size=None, min_freq=None, padding='<pad>', unknown='<unk>'):
self.max_size = max_size
self.min_freq = min_freq
self.word_count = Counter()
self.unknown = unknown
self.padding = padding
self.word2idx = None
self.idx2word = None
self.rebuild = True
@_check_build_status
def update(self, word_lst):
"""依次增加序列中词在词典中的出现频率
:param list word_lst: a list of strings
"""
self.word_count.update(word_lst)
@_check_build_status
def add(self, word):
"""
增加一个新词在词典中的出现频率
:param str word: 新词
"""
self.word_count[word] += 1
@_check_build_status
def add_word(self, word):
"""
增加一个新词在词典中的出现频率
:param str word: 新词
"""
self.add(word)
@_check_build_status
def add_word_lst(self, word_lst):
"""
依次增加序列中词在词典中的出现频率
:param list[str] word_lst: 词的序列
"""
self.update(word_lst)
def build_vocab(self):
"""
根据已经出现的词和出现频率构建词典. 注意: 重复构建可能会改变词典的大小,
但已经记录在词典中的词, 不会改变对应的 `int`
"""
if self.word2idx is None:
self.word2idx = {}
if self.padding is not None:
self.word2idx[self.padding] = len(self.word2idx)
if self.unknown is not None:
self.word2idx[self.unknown] = len(self.word2idx)
max_size = min(self.max_size, len(self.word_count)) if self.max_size else None
words = self.word_count.most_common(max_size)
if self.min_freq is not None:
words = filter(lambda kv: kv[1] >= self.min_freq, words)
if self.word2idx is not None:
words = filter(lambda kv: kv[0] not in self.word2idx, words)
start_idx = len(self.word2idx)
self.word2idx.update({w: i + start_idx for i, (w, _) in enumerate(words)})
self.build_reverse_vocab()
self.rebuild = False
def build_reverse_vocab(self):
"""
基于 "word to index" dict, 构建 "index to word" dict.
"""
self.idx2word = {i: w for w, i in self.word2idx.items()}
@_check_build_vocab
def __len__(self):
return len(self.word2idx)
@_check_build_vocab
def __contains__(self, item):
"""
检查词是否被记录
:param item: the word
:return: True or False
"""
return item in self.word2idx
def has_word(self, w):
"""
检查词是否被记录::
has_abc = vocab.has_word('abc')
# equals to
has_abc = 'abc' in vocab
:param item: the word
:return: ``True`` or ``False``
"""
return self.__contains__(w)
@_check_build_vocab
def __getitem__(self, w):
"""
To support usage like::
vocab[w]
"""
if w in self.word2idx:
return self.word2idx[w]
if self.unknown is not None:
return self.word2idx[self.unknown]
else:
raise ValueError("word {} not in vocabulary".format(w))
@_check_build_vocab
def index_dataset(self, *datasets, field_name, new_field_name=None):
"""
将DataSet中对应field的词转为数字,Example::
# remember to use `field_name`
vocab.index_dataset(train_data, dev_data, test_data, field_name='words')
:param datasets: 需要转index的 class:`~fastNLP.DataSet` , 支持一个或多个(list)
:param str field_name: 需要转index的field, 若有多个 DataSet, 每个DataSet都必须有此 field.
目前仅支持 ``str`` , ``list(str)`` , ``list(list(str))``
:param str new_field_name: 保存结果的field_name. 若为 ``None`` , 将覆盖原field.
Default: ``None``
"""
def index_instance(ins):
"""
有几种情况, str, 1d-list, 2d-list
:param ins:
:return:
"""
field = ins[field_name]
if isinstance(field, str):
return self.to_index(field)
elif isinstance(field, list):
if not isinstance(field[0], list):
return [self.to_index(w) for w in field]
else:
if isinstance(field[0][0], list):
raise RuntimeError("Only support field with 2 dimensions.")
return [[self.to_index(c) for c in w] for w in field]
if new_field_name is None:
new_field_name = field_name
for idx, dataset in enumerate(datasets):
if isinstance(dataset, DataSet):
try:
dataset.apply(index_instance, new_field_name=new_field_name)
except Exception as e:
print("When processing the `{}` dataset, the following error occurred.".format(idx))
raise e
else:
raise RuntimeError("Only DataSet type is allowed.")
def from_dataset(self, *datasets, field_name):
"""
使用dataset的对应field中词构建词典::
# remember to use `field_name`
vocab.from_dataset(train_data1, train_data2, field_name='words')
:param datasets: 需要转index的 class:`~fastNLP.DataSet` , 支持一个或多个(list)
:param field_name: 可为 ``str`` 或 ``list(str)`` .
构建词典所使用的 field(s), 支持一个或多个field
若有多个 DataSet, 每个DataSet都必须有这些field.
目前仅支持的field结构: ``str`` , ``list(str)`` , ``list(list(str))``
:return self:
"""
if isinstance(field_name, str):
field_name = [field_name]
elif not isinstance(field_name, list):
raise TypeError('invalid argument field_name: {}'.format(field_name))
def construct_vocab(ins):
for fn in field_name:
field = ins[fn]
if isinstance(field, str):
self.add_word(field)
elif isinstance(field, list):
if not isinstance(field[0], list):
self.add_word_lst(field)
else:
if isinstance(field[0][0], list):
raise RuntimeError("Only support field with 2 dimensions.")
[self.add_word_lst(w) for w in field]
for idx, dataset in enumerate(datasets):
if isinstance(dataset, DataSet):
try:
dataset.apply(construct_vocab)
except Exception as e:
print("When processing the `{}` dataset, the following error occurred.".format(idx))
raise e
else:
raise RuntimeError("Only DataSet type is allowed.")
return self
def to_index(self, w):
"""
将词转为数字. 若词不再词典中被记录, 将视为 unknown, 若 ``unknown=None`` , 将抛出
``ValueError``::
index = vocab.to_index('abc')
# equals to
index = vocab['abc']
:param str w: a word
:return int index: the number
"""
return self.__getitem__(w)
@property
@_check_build_vocab
def unknown_idx(self):
"""
unknown 对应的数字.
"""
if self.unknown is None:
return None
return self.word2idx[self.unknown]
@property
@_check_build_vocab
def padding_idx(self):
"""
padding 对应的数字
"""
if self.padding is None:
return None
return self.word2idx[self.padding]
@_check_build_vocab
def to_word(self, idx):
"""
给定一个数字, 将其转为对应的词.
:param int idx: the index
:return str word: the word
"""
return self.idx2word[idx]
def clear(self):
"""
删除Vocabulary中的词表数据。相当于重新初始化一下。
:return:
"""
self.word_count.clear()
self.word2idx = None
self.idx2word = None
self.rebuild = True
def __getstate__(self):
"""Use to prepare data for pickle.
"""
len(self) # make sure vocab has been built
state = self.__dict__.copy()
# no need to pickle idx2word as it can be constructed from word2idx
del state['idx2word']
return state
def __setstate__(self, state):
"""Use to restore state from pickle.
"""
self.__dict__.update(state)
self.build_reverse_vocab()
def __repr__(self):
return "Vocabulary({}...)".format(list(self.word_count.keys())[:5])
def __iter__(self):
return iter(list(self.word_count.keys()))
```
#### File: io/data_loader/sst.py
```python
from typing import Iterable
from nltk import Tree
from ..base_loader import DataInfo, DataSetLoader
from ...core.vocabulary import VocabularyOption, Vocabulary
from ...core.dataset import DataSet
from ...core.instance import Instance
from ..embed_loader import EmbeddingOption, EmbedLoader
class SSTLoader(DataSetLoader):
URL = 'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'
DATA_DIR = 'sst/'
"""
别名::class:`fastNLP.io.SSTLoader` :class:`fastNLP.io.dataset_loader.SSTLoader`
读取SST数据集, DataSet包含fields::
words: list(str) 需要分类的文本
target: str 文本的标签
数据来源: https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip
:param subtree: 是否将数据展开为子树,扩充数据量. Default: ``False``
:param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False``
"""
def __init__(self, subtree=False, fine_grained=False):
self.subtree = subtree
tag_v = {'0': 'very negative', '1': 'negative', '2': 'neutral',
'3': 'positive', '4': 'very positive'}
if not fine_grained:
tag_v['0'] = tag_v['1']
tag_v['4'] = tag_v['3']
self.tag_v = tag_v
def _load(self, path):
"""
:param str path: 存储数据的路径
:return: 一个 :class:`~fastNLP.DataSet` 类型的对象
"""
datalist = []
with open(path, 'r', encoding='utf-8') as f:
datas = []
for l in f:
datas.extend([(s, self.tag_v[t])
for s, t in self._get_one(l, self.subtree)])
ds = DataSet()
for words, tag in datas:
ds.append(Instance(words=words, target=tag))
return ds
@staticmethod
def _get_one(data, subtree):
tree = Tree.fromstring(data)
if subtree:
return [(t.leaves(), t.label()) for t in tree.subtrees()]
return [(tree.leaves(), tree.label())]
def process(self,
paths,
train_ds: Iterable[str] = None,
src_vocab_op: VocabularyOption = None,
tgt_vocab_op: VocabularyOption = None,
src_embed_op: EmbeddingOption = None):
input_name, target_name = 'words', 'target'
src_vocab = Vocabulary() if src_vocab_op is None else Vocabulary(**src_vocab_op)
tgt_vocab = Vocabulary(unknown=None, padding=None) \
if tgt_vocab_op is None else Vocabulary(**tgt_vocab_op)
info = DataInfo(datasets=self.load(paths))
_train_ds = [info.datasets[name]
for name in train_ds] if train_ds else info.datasets.values()
src_vocab.from_dataset(*_train_ds, field_name=input_name)
tgt_vocab.from_dataset(*_train_ds, field_name=target_name)
src_vocab.index_dataset(
*info.datasets.values(),
field_name=input_name, new_field_name=input_name)
tgt_vocab.index_dataset(
*info.datasets.values(),
field_name=target_name, new_field_name=target_name)
info.vocabs = {
input_name: src_vocab,
target_name: tgt_vocab
}
if src_embed_op is not None:
src_embed_op.vocab = src_vocab
init_emb = EmbedLoader.load_with_vocab(**src_embed_op)
info.embeddings[input_name] = init_emb
return info
```
#### File: modules/encoder/embedding.py
```python
__all__ = [
"Embedding"
]
import torch.nn as nn
from ..utils import get_embeddings
class Embedding(nn.Embedding):
"""
别名::class:`fastNLP.modules.Embedding` :class:`fastNLP.modules.encoder.embedding.Embedding`
Embedding组件. 可以通过self.num_embeddings获取词表大小; self.embedding_dim获取embedding的维度"""
def __init__(self, init_embed, padding_idx=None, dropout=0.0, sparse=False, max_norm=None, norm_type=2,
scale_grad_by_freq=False):
"""
:param tuple(int,int),torch.FloatTensor,nn.Embedding,numpy.ndarray init_embed: Embedding的大小(传入tuple(int, int),
第一个int为vocab_zie, 第二个int为embed_dim); 如果为Tensor, Embedding, ndarray等则直接使用该值初始化Embedding
:param None,int padding_idx: 该index的Embedding将一直为0.
:param float dropout: 对Embedding的输出的dropout。
:param bool sparse: 如果为True,则对Embedding的梯度将是sparse的,参考Pytorch Embedding获取更多信息。
:param None,float max_norm: 每个vector最大的norm能为多大
:param int norm_type: norm的类型
:param bool scale_grad_by_freq: 如果为True,将会把梯度除以这个词出现的次数.
"""
embed = get_embeddings(init_embed)
num_embeddings, embedding_dim = embed.weight.size()
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx,
max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse, _weight=embed.weight.data)
del embed
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"""
:param torch.LongTensor x: [batch, seq_len]
:return: torch.Tensor : [batch, seq_len, embed_dim]
"""
x = super().forward(x)
return self.dropout(x)
def size(self):
"""
Embedding的大小
:return: torch.Size()
"""
return self.weight.size()
```
#### File: matching/model/CIN.py
```python
import torch
import torch.nn as nn
from fastNLP.models import BaseModel
#args = type('args', (), {})()
class CIN4NLI(BaseModel):
# TODO: still in progress
def __init(self):
super(CIN4NLI, self).__init__()
def forward(self, words, segment_id, seq_len):
pass
class CIN(nn.Module):
def __init(self):
super(CIN, self).__init__()
def forward(self, words, segment_id, seq_len):
pass
```
#### File: test/core/test_callbacks.py
```python
import unittest
import numpy as np
import torch
from fastNLP.core.callback import EarlyStopCallback, GradientClipCallback, LRScheduler, ControlC, \
LRFinder, TensorboardCallback
from fastNLP import DataSet
from fastNLP import Instance
from fastNLP import BCELoss
from fastNLP import AccuracyMetric
from fastNLP import SGD
from fastNLP import Trainer
from fastNLP.models.base_model import NaiveClassifier
def prepare_env():
def prepare_fake_dataset():
mean = np.array([-3, -3])
cov = np.array([[1, 0], [0, 1]])
class_A = np.random.multivariate_normal(mean, cov, size=(1000,))
mean = np.array([3, 3])
cov = np.array([[1, 0], [0, 1]])
class_B = np.random.multivariate_normal(mean, cov, size=(1000,))
data_set = DataSet([Instance(x=[float(item[0]), float(item[1])], y=[0.0]) for item in class_A] +
[Instance(x=[float(item[0]), float(item[1])], y=[1.0]) for item in class_B])
return data_set
data_set = prepare_fake_dataset()
data_set.set_input("x")
data_set.set_target("y")
model = NaiveClassifier(2, 1)
return data_set, model
class TestCallback(unittest.TestCase):
def test_gradient_clip(self):
data_set, model = prepare_env()
trainer = Trainer(data_set, model,
loss=BCELoss(pred="predict", target="y"),
n_epochs=20,
batch_size=32,
print_every=50,
optimizer=SGD(lr=0.1),
check_code_level=2,
use_tqdm=False,
dev_data=data_set,
metrics=AccuracyMetric(pred="predict", target="y"),
callbacks=[GradientClipCallback(model.parameters(), clip_value=2)])
trainer.train()
def test_early_stop(self):
data_set, model = prepare_env()
trainer = Trainer(data_set, model,
loss=BCELoss(pred="predict", target="y"),
n_epochs=20,
batch_size=32,
print_every=50,
optimizer=SGD(lr=0.01),
check_code_level=2,
use_tqdm=False,
dev_data=data_set,
metrics=AccuracyMetric(pred="predict", target="y"),
callbacks=[EarlyStopCallback(5)])
trainer.train()
def test_lr_scheduler(self):
data_set, model = prepare_env()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
trainer = Trainer(data_set, model,
loss=BCELoss(pred="predict", target="y"),
n_epochs=5,
batch_size=32,
print_every=50,
optimizer=optimizer,
check_code_level=2,
use_tqdm=False,
dev_data=data_set,
metrics=AccuracyMetric(pred="predict", target="y"),
callbacks=[LRScheduler(torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1))])
trainer.train()
def test_KeyBoardInterrupt(self):
data_set, model = prepare_env()
trainer = Trainer(data_set, model,
loss=BCELoss(pred="predict", target="y"),
n_epochs=5,
batch_size=32,
print_every=50,
optimizer=SGD(lr=0.1),
check_code_level=2,
use_tqdm=False,
callbacks=[ControlC(False)])
trainer.train()
def test_LRFinder(self):
data_set, model = prepare_env()
trainer = Trainer(data_set, model,
loss=BCELoss(pred="predict", target="y"),
n_epochs=5,
batch_size=32,
print_every=50,
optimizer=SGD(lr=0.1),
check_code_level=2,
use_tqdm=False,
callbacks=[LRFinder(len(data_set) // 32)])
trainer.train()
def test_TensorboardCallback(self):
data_set, model = prepare_env()
trainer = Trainer(data_set, model,
loss=BCELoss(pred="predict", target="y"),
n_epochs=5,
batch_size=32,
print_every=50,
optimizer=SGD(lr=0.1),
check_code_level=2,
use_tqdm=False,
dev_data=data_set,
metrics=AccuracyMetric(pred="predict", target="y"),
callbacks=[TensorboardCallback("loss", "metric")])
trainer.train()
def test_readonly_property(self):
from fastNLP.core.callback import Callback
passed_epochs = []
total_epochs = 5
class MyCallback(Callback):
def __init__(self):
super(MyCallback, self).__init__()
def on_epoch_begin(self):
passed_epochs.append(self.epoch)
print(self.n_epochs, self.n_steps, self.batch_size)
print(self.model)
print(self.optimizer)
data_set, model = prepare_env()
trainer = Trainer(data_set, model,
loss=BCELoss(pred="predict", target="y"),
n_epochs=total_epochs,
batch_size=32,
print_every=50,
optimizer=SGD(lr=0.1),
check_code_level=2,
use_tqdm=False,
dev_data=data_set,
metrics=AccuracyMetric(pred="predict", target="y"),
callbacks=[MyCallback()])
trainer.train()
assert passed_epochs == list(range(1, total_epochs + 1))
```
#### File: test/models/model_runner.py
```python
from fastNLP import Trainer, Tester, DataSet, Callback
from fastNLP import AccuracyMetric
from fastNLP import CrossEntropyLoss
from fastNLP.core.const import Const as C
from random import randrange
VOCAB_SIZE = 100
NUM_CLS = 100
MAX_LEN = 10
N_SAMPLES = 100
N_EPOCHS = 1
BATCH_SIZE = 5
TEXT_CLS = 'text_cls'
POS_TAGGING = 'pos_tagging'
NLI = 'nli'
class ModelRunner():
class Checker(Callback):
def on_backward_begin(self, loss):
assert loss.to('cpu').numpy().isfinate()
def gen_seq(self, length, vocab_size):
"""generate fake sequence indexes with given length"""
# reserve 0 for padding
return [randrange(1, vocab_size) for _ in range(length)]
def gen_var_seq(self, max_len, vocab_size):
"""generate fake sequence indexes in variant length"""
length = randrange(3, max_len) # at least 3 words in a seq
return self.gen_seq(length, vocab_size)
def prepare_text_classification_data(self):
index = 'index'
ds = DataSet({index: list(range(N_SAMPLES))})
ds.apply_field(lambda x: self.gen_var_seq(MAX_LEN, VOCAB_SIZE),
field_name=index, new_field_name=C.INPUT,
is_input=True)
ds.apply_field(lambda x: randrange(NUM_CLS),
field_name=index, new_field_name=C.TARGET,
is_target=True)
ds.apply_field(len, C.INPUT, C.INPUT_LEN,
is_input=True)
return ds
def prepare_pos_tagging_data(self):
index = 'index'
ds = DataSet({index: list(range(N_SAMPLES))})
ds.apply_field(lambda x: self.gen_var_seq(MAX_LEN, VOCAB_SIZE),
field_name=index, new_field_name=C.INPUT,
is_input=True)
ds.apply_field(lambda x: self.gen_seq(len(x), NUM_CLS),
field_name=C.INPUT, new_field_name=C.TARGET,
is_target=True)
ds.apply_field(len, C.INPUT, C.INPUT_LEN,
is_input=True, is_target=True)
return ds
def prepare_nli_data(self):
index = 'index'
ds = DataSet({index: list(range(N_SAMPLES))})
ds.apply_field(lambda x: self.gen_var_seq(MAX_LEN, VOCAB_SIZE),
field_name=index, new_field_name=C.INPUTS(0),
is_input=True)
ds.apply_field(lambda x: self.gen_var_seq(MAX_LEN, VOCAB_SIZE),
field_name=index, new_field_name=C.INPUTS(1),
is_input=True)
ds.apply_field(lambda x: randrange(NUM_CLS),
field_name=index, new_field_name=C.TARGET,
is_target=True)
ds.apply_field(len, C.INPUTS(0), C.INPUT_LENS(0),
is_input=True, is_target=True)
ds.apply_field(len, C.INPUTS(1), C.INPUT_LENS(1),
is_input = True, is_target = True)
ds.set_input(C.INPUTS(0), C.INPUTS(1))
ds.set_target(C.TARGET)
return ds
def run_text_classification(self, model, data=None):
if data is None:
data = self.prepare_text_classification_data()
loss = CrossEntropyLoss(pred=C.OUTPUT, target=C.TARGET)
metric = AccuracyMetric(pred=C.OUTPUT, target=C.TARGET)
self.run_model(model, data, loss, metric)
def run_pos_tagging(self, model, data=None):
if data is None:
data = self.prepare_pos_tagging_data()
loss = CrossEntropyLoss(pred=C.OUTPUT, target=C.TARGET, padding_idx=0)
metric = AccuracyMetric(pred=C.OUTPUT, target=C.TARGET, seq_len=C.INPUT_LEN)
self.run_model(model, data, loss, metric)
def run_nli(self, model, data=None):
if data is None:
data = self.prepare_nli_data()
loss = CrossEntropyLoss(pred=C.OUTPUT, target=C.TARGET)
metric = AccuracyMetric(pred=C.OUTPUT, target=C.TARGET)
self.run_model(model, data, loss, metric)
def run_model(self, model, data, loss, metrics):
"""run a model, test if it can run with fastNLP"""
print('testing model:', model.__class__.__name__)
tester = Tester(data=data, model=model, metrics=metrics,
batch_size=BATCH_SIZE, verbose=0)
before_train = tester.test()
trainer = Trainer(model=model, train_data=data, dev_data=None,
n_epochs=N_EPOCHS, batch_size=BATCH_SIZE,
loss=loss,
save_path=None,
use_tqdm=False)
trainer.train(load_best_model=False)
after_train = tester.test()
for metric_name, v1 in before_train.items():
assert metric_name in after_train
# # at least we can sure model params changed, even if we don't know performance
# v2 = after_train[metric_name]
# assert v1 != v2
def run_model_with_task(self, task, model):
"""run a model with certain task"""
TASKS = {
TEXT_CLS: self.run_text_classification,
POS_TAGGING: self.run_pos_tagging,
NLI: self.run_nli,
}
assert task in TASKS
TASKS[task](model)
RUNNER = ModelRunner()
```
|
{
"source": "jerri/mpg123-python",
"score": 2
}
|
#### File: jerri/mpg123-python/mpg123.py
```python
import ctypes
from ctypes.util import find_library
import sys
VERBOSE = 0
OK = 0
NEED_MORE = -10
NEW_FORMAT = -11
DONE = -12
MONO = 1
STEREO = 2
ENC_8 = 0x00f
ENC_16 = 0x040
ENC_24 = 0x4000
ENC_32 = 0x100
ENC_SIGNED = 0x080
ENC_FLOAT = 0xe00
ENC_SIGNED_16 = (ENC_16 | ENC_SIGNED | 0x10)
ENC_UNSIGNED_16 = (ENC_16 | 0x20)
ENC_UNSIGNED_8 = 0x01
ENC_SIGNED_8 = (ENC_SIGNED | 0x02)
ENC_ULAW_8 = 0x04
ENC_ALAW_8 = 0x08
ENC_SIGNED_32 = (ENC_32 | ENC_SIGNED | 0x1000)
ENC_UNSIGNED_32 = (ENC_32 | 0x2000)
ENC_SIGNED_24 = (ENC_24 | ENC_SIGNED | 0x1000)
ENC_UNSIGNED_24 = (ENC_24 | 0x2000)
ENC_FLOAT_32 = 0x200
ENC_FLOAT_64 = 0x400
class ID3v1(ctypes.Structure):
_fields_ = [
('tag', ctypes.c_char * 3),
('title', ctypes.c_char * 30),
('artist', ctypes.c_char * 30),
('album', ctypes.c_char * 30),
('year', ctypes.c_char * 4),
('comment', ctypes.c_char * 30),
('genre', ctypes.c_ubyte),
]
class Mpg123:
_lib = None
class LibInitializationException(Exception):
pass
class OpenFeedException(Exception):
pass
class CloseException(Exception):
pass
class OpenFileException(Exception):
pass
class NotFeedException(Exception):
pass
class FeedingException(Exception):
pass
class FormatException(Exception):
pass
class DecodeException(Exception):
pass
class NeedMoreException(Exception):
pass
class DoneException(Exception):
pass
class LengthException(Exception):
pass
class ID3Exception(Exception):
pass
def plain_strerror(self, errcode):
self._lib.mpg123_plain_strerror.restype = ctypes.c_char_p
return self._lib.mpg123_plain_strerror(errcode).decode()
def init_library(self, library_path=None):
if not library_path:
library_path = find_library('mpg123')
if not library_path:
library_path = find_library('libmpg123-0')
if not library_path:
raise self.LibInitializationException('libmpg123 not found')
lib = ctypes.CDLL(library_path)
errcode = lib.mpg123_init()
if errcode != OK:
raise self.LibInitializationException(self.plain_strerror(errcode))
return lib
def __init__(self, filename=None, library_path=None):
self.handle = None
if not self._lib:
self._lib = self.init_library(library_path)
self._lib.mpg123_new.restype = ctypes.c_void_p
self.c_handle = self._lib.mpg123_new(ctypes.c_char_p(None), None)
self.handle = ctypes.c_void_p(self.c_handle)
self.is_feed = filename is None
self.offset = ctypes.c_size_t(0)
if self.is_feed:
errcode = self._lib.mpg123_open_feed(self.handle)
if errcode != OK:
raise self.OpenFeedException(self.plain_strerror(errcode))
else:
errcode = self._lib.mpg123_open(self.handle, filename.encode())
if errcode != OK:
raise self.OpenFileException(self.plain_strerror(errcode))
def feed(self, data):
if not self.is_feed:
raise self.NotFeedException('instance is not in feed mode')
# encode string to bytes in modern python
if sys.version_info[0] >= 3 and isinstance(data, str):
data = data.encode()
data = memoryview(data)
errcode = self._lib.mpg123_feed(self.handle,
ctypes.c_char_p(data.tobytes()),
len(data))
if errcode != OK:
raise self.FeedingException(self.plain_strerror(errcode))
def get_id3(self):
v1 = ctypes.c_void_p()
v2 = ctypes.c_void_p()
errcode = self._lib.mpg123_id3(self.handle,
ctypes.pointer(v1),
ctypes.pointer(v2))
if errcode != OK:
raise self.ID3Exception(self.plain_strerror(errcode))
if v1.value is None:
raise self.ID3Exception(self.plain_strerror(errcode))
return ctypes.cast(v1, ctypes.POINTER(ID3v1)).contents
def get_format(self):
rate = ctypes.c_int(0)
channels = ctypes.c_int(0)
encoding = ctypes.c_int(0)
errcode = self._lib.mpg123_getformat(self.handle,
ctypes.pointer(rate),
ctypes.pointer(channels),
ctypes.pointer(encoding))
if errcode != OK:
if errcode == NEED_MORE:
raise self.NeedMoreException(self.plain_strerror(errcode))
raise self.FormatException(self.plain_strerror(errcode))
return (rate.value, channels.value, encoding.value)
def get_width_by_encoding(self, encoding):
return self._lib.mpg123_encsize(encoding)
def length(self):
errcode = self._lib.mpg123_length(self.handle)
if errcode <= 0:
if errcode == NEED_MORE:
raise self.NeedMoreException(self.plain_strerror(errcode))
raise self.LengthException(self.plain_strerror(errcode))
return errcode
def frame_length(self):
errcode = self._lib.mpg123_framelength(self.handle)
if errcode <= 0:
if errcode == NEED_MORE:
raise self.NeedMoreException(self.plain_strerror(errcode))
raise self.LengthException(self.plain_strerror(errcode))
return errcode
def decode_frame(self):
audio = ctypes.c_char_p()
done = ctypes.c_size_t(0)
errcode = self._lib.mpg123_decode_frame(self.handle,
ctypes.pointer(self.offset),
ctypes.pointer(audio),
ctypes.pointer(done))
if errcode == OK:
return ctypes.string_at(audio, done.value)
if errcode == NEED_MORE:
raise self.NeedMoreException(self.plain_strerror(errcode))
if errcode == NEW_FORMAT:
return self.decode_frame()
if errcode == DONE:
raise self.DoneException(self.plain_strerror(errcode))
raise self.DecodeException(self.plain_strerror(errcode))
def iter_frames(self, new_format_callback=None):
self.offset = ctypes.c_size_t(0)
audio = ctypes.c_char_p()
done = ctypes.c_size_t(0)
while True:
errcode = self._lib.mpg123_decode_frame(
self.handle,
ctypes.pointer(self.offset),
ctypes.pointer(audio),
ctypes.pointer(done))
if errcode == OK:
yield ctypes.string_at(audio, done.value)
else:
if errcode in (NEED_MORE, DONE):
break
if errcode == NEW_FORMAT:
if new_format_callback:
new_format_callback(*self.get_format())
continue
raise self.DecodeException(self.plain_strerror(errcode))
def __del__(self):
if not self.handle:
return
errcode = self._lib.mpg123_close(self.handle)
if errcode != OK:
raise self.CloseException(self.plain_strerror(errcode))
class Out123:
_lib = None
class LibInitializationException(Exception):
pass
class OpenException(Exception):
pass
class CloseException(Exception):
pass
class StartException(Exception):
pass
class PlayingException(Exception):
pass
def init_library(self, library_path=None):
if not library_path:
library_path = find_library('out123')
if not library_path:
library_path = find_library('libout123-0')
if not library_path:
raise self.LibInitializationException('libout123 not found')
return ctypes.CDLL(library_path)
def plain_strerror(self, errcode):
self._lib.out123_plain_strerror.restype = ctypes.c_char_p
return self._lib.out123_plain_strerror(errcode).decode()
def __init__(self, library_path=None, modules=None):
self.handle = None
if not self._lib:
self._lib = self.init_library(library_path)
self._lib.out123_new.restype = ctypes.c_void_p
self.c_handle = self._lib.out123_new()
self.handle = ctypes.c_void_p(self.c_handle)
if modules is None:
modules = ctypes.c_char_p(None)
else:
modules = modules.encode()
errcode = self._lib.out123_open(self.handle,
modules,
ctypes.c_char_p(None))
if errcode != OK:
raise self.OpenException(self.plain_strerror(errcode))
def start(self, rate, channels, encoding):
errcode = self._lib.out123_start(self.handle, rate, channels, encoding)
if errcode != OK:
raise self.StartException(self.plain_strerror(errcode))
def play(self, data):
# encode string to bytes in modern python
if sys.version_info[0] >= 3 and isinstance(data, str):
data = data.encode()
data = memoryview(data)
return self._lib.out123_play(self.handle,
ctypes.c_char_p(data.tobytes()),
len(data))
def __del__(self):
if not self.handle:
return
self._lib.out123_close(self.handle)
```
#### File: mpg123-python/tests/test_mpg123.py
```python
import unittest
import mpg123
from mpg123 import Mpg123
import sys
class TestMpg123(unittest.TestCase):
def test_feeding_need_more(self):
mp3 = Mpg123()
mp3.feed(b'')
with self.assertRaises(Mpg123.NeedMoreException):
mp3.decode_frame()
def test_feeding_need_more_not_empty(self):
mp3 = Mpg123()
mp3.feed(b'\0\0\0\0')
with self.assertRaises(Mpg123.NeedMoreException):
mp3.decode_frame()
def test_get_format_need_more(self):
mp3 = Mpg123()
with self.assertRaises(Mpg123.NeedMoreException):
mp3.get_format()
def test_feeding_need_more_bytearray(self):
mp3 = Mpg123()
mp3.feed(bytearray(8))
with self.assertRaises(Mpg123.NeedMoreException):
mp3.decode_frame()
def test_feeding_need_more_string(self):
mp3 = Mpg123()
mp3.feed('hello')
with self.assertRaises(Mpg123.NeedMoreException):
mp3.decode_frame()
def test_feeding_file(self):
mp3 = Mpg123()
with open('tests/bensound-scifi.mp3', 'rb') as f:
while True:
data = f.read(4096)
if not data:
break
mp3.feed(data)
rate, channels, encoding = mp3.get_format()
self.assertEqual(rate, 44100)
self.assertEqual(channels, 2)
self.assertEqual(encoding, 208)
self.assertEqual(mp3.get_width_by_encoding(encoding), 2)
self.assertEqual(encoding, mpg123.ENC_SIGNED_16)
frame = mp3.decode_frame()
self.assertEqual(len(frame), 188)
frame = mp3.decode_frame()
self.assertEqual(len(frame), 4608)
def test_file_format(self):
mp3 = Mpg123('tests/bensound-epic.mp3')
rate, channels, encoding = mp3.get_format()
self.assertEqual(rate, 44100)
self.assertEqual(channels, 2)
self.assertEqual(encoding, 208)
def test_file_format2(self):
mp3 = Mpg123('tests/bensound-scifi.mp3')
rate, channels, encoding = mp3.get_format()
self.assertEqual(rate, 44100)
self.assertEqual(channels, 2)
self.assertEqual(encoding, 208)
def test_file_frame(self):
mp3 = Mpg123('tests/bensound-epic.mp3')
frame = mp3.decode_frame()
self.assertEqual(len(frame), 188)
frame = mp3.decode_frame()
self.assertEqual(len(frame), 4608)
def test_file_id3(self):
mp3 = Mpg123('tests/bensound-epic.mp3')
id3 = mp3.get_id3()
self.assertEqual(id3.artist, 'Bensound'.encode())
self.assertEqual(id3.year, '2017'.encode())
self.assertEqual(id3.genre, 0)
def test_file_id3_song2(self):
mp3 = Mpg123('tests/bensound-scifi.mp3')
id3 = mp3.get_id3()
self.assertEqual(id3.artist, 'http://www.bensound.com'.encode())
self.assertEqual(id3.year, '2012'.encode())
self.assertEqual(id3.genre, 1)
def test_file_all_frames(self):
mp3 = Mpg123('tests/bensound-epic.mp3')
frames = [frame for frame in mp3.iter_frames()]
self.assertEqual(len(frames), 6835)
def test_file_frame_data(self):
mp3 = Mpg123('tests/bensound-epic.mp3')
frames = [frame for frame in mp3.iter_frames()]
if sys.version_info[0] >= 3:
self.assertEqual(frames[17][22], 30)
else:
self.assertEqual(ord(frames[17][22]), 30)
def test_file_length(self):
mp3 = Mpg123('tests/bensound-epic.mp3')
self.assertEqual(mp3.length(), 7872625)
def test_file_frame_length(self):
mp3 = Mpg123('tests/bensound-epic.mp3')
self.assertEqual(mp3.frame_length(), 6835)
def test_feed_frame_length(self):
mp3 = Mpg123()
with self.assertRaises(Mpg123.NeedMoreException):
mp3.frame_length()
def test_feed_length(self):
mp3 = Mpg123()
with self.assertRaises(Mpg123.NeedMoreException):
mp3.length()
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jerrington/ISS-Tracker",
"score": 3
}
|
#### File: jerrington/ISS-Tracker/ISS_TRACKER.py
```python
import time
import tkinter as tk
from datetime import date
from tkinter import TOP, BOTH, BOTTOM, Label
import matplotlib as mpl
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import requests
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from mpl_toolkits.basemap import Basemap
mpl.rcParams['toolbar'] = 'None'
ISS_URL="http://api.open-notify.org/iss-now.json"
time1 = ''
date1 = ''
def tick():
global time1
# get the current local time from the PC
time2 = time.strftime('%H:%M')
# if time string has changed, update it
if time2 != time1:
time1 = time2
clock.config(text=time2)
clock.after(1000, tick)
def get_date():
global date1
#get current date
date2 = date.today().strftime('%b, %d %Y')
# compare
if date2 != date1:
date1 = date2
day.config(text=date2)
day.after(20000000, get_date)
# Function to get ISS position from NASA API
def get_iss_location():
json_data=''
while not json_data:
try:
json_data=requests.get(ISS_URL, timeout=30).json()
except requests.ConnectionError as e:
print(str(e))
time.sleep(10)
except requests.Timeout as e:
print(str(e))
time.sleep(10)
except requests.RequestException as e:
print(str(e))
time.sleep(10)
position=[float(json_data['iss_position']['longitude']), float(json_data['iss_position']['latitude'])]
return position
# Setup the figure for the animation
f = plt.figure(figsize=(8, 4.8), frameon=False, tight_layout={ "pad": 0.0 })
m = Basemap(projection='cyl', resolution='c')
m.drawcoastlines(color='#000000', linewidth=1)
m.drawcountries()
m.drawstates()
m.drawmapboundary(fill_color='xkcd:ocean')
m.fillcontinents(color='xkcd:dirt brown',lake_color='xkcd:lightblue')
# Set plot styles
x,y = m(0, 0)
x1, y1 = m(0, 0)
x2, y2 = m(0, 0)
# 0 - 180
line1 = m.plot(x, y, linestyle='-', color='xkcd:red')[0]
# -180 - 0
line2 = m.plot(x1, y1, linestyle='-', color='xkcd:black')[0]
# Marker
point = m.plot(x2, y2, marker='o', color='xkcd:red', markersize=5)[0]
# Set blank canvas for anaimation init
def init():
point.set_data([], [])
line1.set_data([], [])
line2.set_data([], [])
return point, line1, line2,
# Red Array
array1_1=[]
# Blue Array
array2_1=[]
# animation function. This is called sequentially
def animate(i):
global array1_1
global array2_1
current_location=get_iss_location()
if 179.950 <= current_location[0] < 180:
array1_1.append(current_location)
time.sleep(2)
current_location=get_iss_location()
if current_location[0] < 0:
array2_1 = array1_1
array1_1 = []
array1_1.append(current_location)
else:
time.sleep(2)
current_location=get_iss_location()
array2_1 = array1_1
array1_1 = []
array1_1.append(current_location)
else:
array1_1.append(current_location)
# Load array1_1 data
if array1_1:
lons, lats = zip(*array1_1)
x1, y1 = m(lons, lats)
line1.set_data(x1, y1)
# Load array1_2 data
if array2_1:
lons2, lats2 = zip(*array2_1)
x2, y2 = m(lons2, lats2)
line2.set_data(x2, y2)
# Set marker
lons_point, lats_point = zip(current_location)
x_point, y_point = m(lons_point, lats_point)
point.set_data(x_point, y_point)
return point, line1, line2,
root = tk.Tk()
root.configure(background='grey')
root.geometry('800x480+0+0')
root.overrideredirect(1)
canvas = FigureCanvasTkAgg(f, master=root)
canvas.get_tk_widget().configure(background='grey')
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
clock = Label(canvas.get_tk_widget(), font=('courier', 22, 'bold'), bg='grey')
clock.pack(side=BOTTOM, pady=1)
day = Label(canvas.get_tk_widget(),font=('courier', 22, 'bold'), bg='grey')
day.pack(side=TOP)
canvas.draw()
get_date()
tick()
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(f, animate, init_func=init,
frames=None, interval=2000, blit=False)
root.update()
root.mainloop()
```
|
{
"source": "jerri/pandoc-mermaid-filter",
"score": 2
}
|
#### File: jerri/pandoc-mermaid-filter/pandoc_mermaid_filter.py
```python
import os
import sys
import subprocess
from pandocfilters import toJSONFilter, Para, Image
from pandocfilters import get_filename4code, get_caption, get_extension
# Environment variables with fallback values
MERMAID_BIN = os.path.expanduser(os.environ.get('MERMAID_BIN', 'mermaid'))
PUPPETEER_CFG = os.environ.get('PUPPETEER_CFG', None)
def mermaid(key, value, format_, _):
if key == 'CodeBlock':
[[ident, classes, keyvals], code] = value
if "mermaid" in classes:
caption, typef, keyvals = get_caption(keyvals)
filename = get_filename4code("mermaid", code)
filetype = get_extension(format_, "png", html="svg", latex="png")
src = filename + '.mmd'
dest = filename + '.' + filetype
if not os.path.isfile(dest):
txt = code.encode(sys.getfilesystemencoding())
with open(src, "wb") as f:
f.write(txt)
# Default command to execute
cmd = [MERMAID_BIN, "-i", src, "-o", dest]
if PUPPETEER_CFG is not None:
cmd.extend(["-p", PUPPETEER_CFG])
if os.path.isfile('.puppeteer.json'):
cmd.extend(["-p", ".puppeteer.json"])
subprocess.check_call(cmd)
sys.stderr.write('Created image ' + dest + '\n')
return Para([Image([ident, [], keyvals], caption, [dest, typef])])
def main():
toJSONFilter(mermaid)
if __name__ == "__main__":
main()
```
|
{
"source": "jerroydmoore/YARB",
"score": 2
}
|
#### File: jerroydmoore/YARB/agent.py
```python
import time
import utils
import sys
from nlu.NLparser import NLparser
from nlg.NLgenerator import NLgenerator
from dm.dialogmanager import DialogManager
from datetime import date
from dm.imdb_wrapper import IMDBWrapper
from dm.localdb_wrapper import LocalDBWrapper
from nlu.entity import EntitySet
class Agent:
def __init__(self, verbose = False):
# load modules
# NLU, DB connection test
self.nlu = NLparser(verbose)
# verbose true for DM and NLG
verbose = True
self.dm = DialogManager(verbose)
self.nlg = NLgenerator(verbose)
self.sessionid = date.strftime(date.today(),"%y%m%d") + "_" + time.strftime("%H%M%S")
self.logger = utils.ConsoleAndFileLogger(self.sessionid)
def run(self):
self.logger.log("Hello. I am YARB (Yet Another Recommendation Bot).")
self.logger.log("Please tell me your name.")
usermsg = raw_input("> ")
self.logger.logtofile("> " + usermsg)
if (self.dm.processUserName(usermsg) == 1):
self.logger.log("Welcome back, " + usermsg + ".")
else:
self.logger.log("Nice to meet you, " + usermsg + ".")
self.logger.log("If you'd like a recommendation, please tell\nme about what you like or dislike.")
self.dm.loadOptions()
while not self.dm.sessionclosed():
usermsg = raw_input("> ")
self.logger.logtofile("> " + usermsg)
if usermsg == "":
continue
nluoutput = self.nlu.process(usermsg) # NLU
for output in nluoutput:
dmoutput = self.dm.process(output) # DM
#dmoutput = self.dm.process(nluoutput)
response = self.nlg.process(dmoutput) # NLG
self.logger.log(response)
self.dm.saveUserPreferences()
self.logger.log("Session closed [id = {0:s}].".format(self.sessionid))
def test(self, inputfilename):
print 'reading: ' + inputfilename
infile = open(inputfilename, 'r')
num = 1
breakpoint = 29
print 'processing... classifier: trivia'
for line in infile:
# NLU process
input = line.strip()
#print input
nluoutput = self.nlu.process(input) # NLU
#if num != breakpoint and nluoutput.get_classifier() != "userPreference":
# num = num + 1
# continue
#if breakpoint == num:
#print str(num) + ". " + line.replace('\\','') + " --> " + nluoutput.get_classifier() + " , [", nluoutput.tostr_entities(), "]"
#if nluoutput.get_classifier() == "trivia":
print str(num) + ". " + input
dmoutput = self.dm.process(nluoutput)
msg = self.nlg.process(dmoutput)
print "> " + msg
print
num = num + 1
def test_db():
IMDBWrapper()
pass
# main function
if __name__ == '__main__':
#test_db()
#Agent().test("./corpus1.txt")
Agent().run()
"""
# imdb test
localdb = LocalDBWrapper()
localdb.load_preference("ywkang")
#localdb.add_preference("genre", "Comedy", 4)
print localdb.get_preference()
db = IMDBWrapper()
entities = EntitySet("dummy")
db.get_recommendation(entities, localdb.get_preference())
"""
```
#### File: YARB/nlg/NLgenerator.py
```python
class NLgenerator:
MOVIELIST = "{name:s} ({year:d})"
RECOMMENDATION = "{listNumber:d}. {name:s} ({year:d})"
CASTLIST = "{name:s}"
RECOMMENDATIONS = "Here is a list of recommendations."
AFTERRECOMMENDATIONS = ("This is page {page:d} of {totalpages:d}.\n"
"You can change pages by typing next, \n"
"or give me another command.")
AFTERPAGINGRECOMMENDATIONS = ("This is page {page:d} of {totalpages:d}.\n"
"You can change pages by typing back or next, \n"
"or give me another command.")
RECOMMENDONE = ("I recommend {name:s}.\n"
"If you'd like a different recommendation,\n"
"tell me some more of your likes or dislikes.")
PREFERENCEPROCESSED = "Alright. Is there anything else you like or dislike?"
ASKINGFORRECOMMENDATION = "OK. Would you like a recommendation now?"
NOTUNDERSTOOD = "Wouldn't you like to talk about something else? Tell me about movies, genres, actors, and directors you like, and I'll recommend you a movie."
TOPRATINGMOVIES = "The followings are top rating movies, say 'next' if you would like to see more."
WORSTMOVIES = "The followings are worst movies ever, say 'next' if you would like to see more."
MOVIESEARCH = "Here's the list of movies I found:"
PLOT = "Here's the plot of {name:s}"
NOTAVAILABLE = "Sorry, we searched the database, but couldn't find any information"
DIRECTOR = "The director of {movietitle:s} is {directorname:s}"
CAST = "Here's the cast list of movie {name:s}"
QUITCONFIRM = "Are you sure you want to quit?"
NOTHING = "Tell me some things you like or dislike, and then ask me for a recommendation."
COULDNOTUNDERSTANDPREFERENCE = ("Please tell me about some movies, genres, actors, or directors you like.\n"
"Put movie titles in quotes; For example: I like \"Inception\".")
QUITTING = "Thank you for using YARB. Goodbye!"
QUIZ1 = "Before you quit, would you like to answer a quick survey?"
QUIZ2 = "Did you find the movies that were recommended satisfactory?"
QUIZ3 = "Did you think that the way the movies were displayed (single or list) was satisfactory?"
CHANGINGSCORECALCULATION = ("OK. We'll change the way we calculate the recommendations next time.\n"
"Did you think that the way the movies were displayed (single or list) was satisfactory?")
CHANGINGOUTPUTTYPE = ("OK. We'll change the way we display the recommendations next time.\n"
"Thanks for answering the questions. Quitting now...")
THANKSFORANSWERING = "Thanks for answering the questions. Quitting now..."
INTHERE = "Yes, {name:s} is affiliated with \"{title:s}\""
NOTTHERE = "No, {name:s} is not affiliated with \"{title:s}\""
def __init__(self, verbose=False):
self.test = 0
self.verbose = verbose
pass
def process(self, dmoutput):
msg = []
# generate an output based on a template
if dmoutput.get_classifier() == "movielist":
msg.append(NLgenerator.MOVIESEARCH)
line = 1
for movies in dmoutput.get_items():
msg.append(str(line) + ". " + NLgenerator.MOVIELIST.format(name=movies.find_entity("name"), year=movies.find_entity("year")))
line = line + 1
elif dmoutput.get_classifier() == "recommendations":
msg.append(NLgenerator.RECOMMENDATIONS)
for movies in dmoutput.get_items():
msg.append(NLgenerator.RECOMMENDATION.format(listNumber=movies.find_entity("listNumber"), name=movies.find_entity("name"), year=movies.find_entity("year")))
msg.append(NLgenerator.AFTERRECOMMENDATIONS.format(page=dmoutput.getPageNumber(), totalpages=dmoutput.getTotalPages()))
elif dmoutput.get_classifier() == "recommendationsPaging":
for movies in dmoutput.get_items():
msg.append(NLgenerator.RECOMMENDATION.format(listNumber=movies.find_entity("listNumber"), name=movies.find_entity("name"), year=movies.find_entity("year")))
msg.append(NLgenerator.AFTERPAGINGRECOMMENDATIONS.format(page=dmoutput.getPageNumber(), totalpages=dmoutput.getTotalPages()))
elif dmoutput.get_classifier() == "recommendOne":
msg.append(NLgenerator.RECOMMENDONE.format(name=dmoutput.get_movietitle()))
elif dmoutput.get_classifier() == "preferenceProcessed":
msg.append(NLgenerator.PREFERENCEPROCESSED)
elif dmoutput.get_classifier() == "askingForRecommendation":
msg.append(NLgenerator.ASKINGFORRECOMMENDATION)
elif dmoutput.get_classifier() == "notunderstood":
msg.append(NLgenerator.NOTUNDERSTOOD)
elif dmoutput.get_classifier() == "topratingmovies":
msg.append(NLgenerator.TOPRATINGMOVIES)
line = 1
for movies in dmoutput.get_items():
msg.append(str(line) + ". " + NLgenerator.MOVIELIST.format(name=movies.find_entity("name"), year=movies.find_entity("year")))
line = line + 1
elif dmoutput.get_classifier() == "worstmovies":
msg.append(NLgenerator.WORSTMOVIES)
line = 1
for movies in dmoutput.get_items():
msg.append(str(line) + ". " + NLgenerator.MOVIELIST.format(name=movies.find_entity("name"), year=movies.find_entity("year")))
line = line + 1
elif dmoutput.get_classifier() == "plot":
msg.append(NLgenerator.PLOT.format(name=dmoutput.get_movietitle()))
msg.append(dmoutput.get_items()[0].find_entity("plot"))
elif dmoutput.get_classifier() == "notavailable":
msg.append(NLgenerator.NOTAVAILABLE)
elif dmoutput.get_classifier() == "director":
msg.append(NLgenerator.DIRECTOR.format(movietitle=dmoutput.get_movietitle(), directorname = dmoutput.get_items()[0].find_entity("name") ))
elif dmoutput.get_classifier() == "cast":
msg.append(NLgenerator.CAST.format(name=dmoutput.get_movietitle()))
line = 1
for movies in dmoutput.get_items():
msg.append(str(line) + ". " + NLgenerator.CASTLIST.format(name=movies.find_entity("name")))
line = line + 1
elif dmoutput.get_classifier() == "quitConfirm":
msg.append(NLgenerator.QUITCONFIRM)
elif dmoutput.get_classifier() == "nothing":
msg.append(NLgenerator.NOTHING)
elif dmoutput.get_classifier() == "couldNotUnderstandPreference":
msg.append(NLgenerator.COULDNOTUNDERSTANDPREFERENCE)
elif dmoutput.get_classifier() == "quitting":
msg.append(NLgenerator.QUITTING)
elif dmoutput.get_classifier() == "quiz1":
msg.append(NLgenerator.QUIZ1)
elif dmoutput.get_classifier() == "quiz2":
msg.append(NLgenerator.QUIZ2)
elif dmoutput.get_classifier() == "quiz3":
msg.append(NLgenerator.QUIZ3)
elif dmoutput.get_classifier() == "changingOutputType":
msg.append(NLgenerator.CHANGINGOUTPUTTYPE)
elif dmoutput.get_classifier() == "changingScoreCalculation":
msg.append(NLgenerator.CHANGINGSCORECALCULATION)
elif dmoutput.get_classifier() == "thanksForAnswering":
msg.append(NLgenerator.THANKSFORANSWERING)
elif dmoutput.get_classifier().lower() == "inthere":
msg.append(NLgenerator.INTHERE.format(name= dmoutput.get_items()[0].find_entity("name"), title=dmoutput.get_movietitle() ))
elif dmoutput.get_classifier().lower() == "notthere":
msg.append(NLgenerator.NOTTHERE.format(name= dmoutput.get_items()[0].find_entity("name"), title=dmoutput.get_movietitle() ))
else:
if (self.verbose == True):
msg.append("NLG classifier: "+dmoutput.get_classifier())
return "\n".join(msg)
```
|
{
"source": "JerrrrryL/BlowfishDB-backend",
"score": 3
}
|
#### File: BlowfishDB-backend/blowfish/app.py
```python
from flask import Flask, request, jsonify
from flask_restful import Resource, Api
from blowfish import run_analysis
app = Flask(__name__)
api = Api(app)
#define constants
ATTRNAME = 'attrName'
ATTRTYPE = 'attrType'
WORKLOAD = 'workload'
THRESHOLDS = 'thresholds'
SENSITIVITY_SET = 'sensitivity_set'
RESULTS = 'results'
# info = {
# "age": {
# "type": "Numerical",
# "policy": {
# "template": "Line",
# "graph": 7,
# "bot": 0
# }
# },
# "salary": {
# "type": "Categorical",
# "domain": [1,2,4,5,6,7,8,9,10,12,14],
# "policy": {
# "template": "Sensitivity",
# "graph": [1,4,7,5,6,9],
# "bot": 0
# }
# }
# }
####
class WorkloadForComparison(Resource):
def post(self):
data = request.get_json(force=True)
predicates = data[WORKLOAD]
attrname = data[ATTRNAME]
results = []
if data[ATTRTYPE] == 'Numerical':
thresholds = data[THRESHOLDS]
for val in thresholds:
config = {
attrname: {
"type": "Numerical",
"policy":{
"template": "Line",
"graph": val,
"bot": 0
}
}
}
results.append(run_analysis(predicates,[attrname],config))
return jsonify({RESULTS: results})
else:
print("TBD")
return jsonify({RESULTS: results})
api.add_resource(WorkloadForComparison, '/')
if __name__ == '__main__':
app.run(debug=True)
```
#### File: BlowfishDB-backend/blowfish/WCQ.py
```python
from collections import defaultdict
import numpy as np
class WCQ:
#salary 1~50, age 18~100
def __init__(self, cols, conds, table):
self.columns = cols #[salary, age]
self.domain_per_column = defaultdict(set)
self.predicates = conds #["salary >= 0 AND salary <= 20 AND age <= 20"]
#the columns in predicates should be ordered
self.table_name = table
#For intermediate result
self.PG = None
self.W = None
self.x= None
#For results
self.true_answer = None
self.noisy_answer = None
self.answer_diff = None
#SELECT id, [col1], [col2] FROM table1
def generate_base_query(self):
query = "SELECT id"
for col in self.columns:
query += ", {}".format(col)
query += " FROM {}".format(self.table_name)
return query
#generate x and W
def run_query(self, db):
base_query = self.generate_base_query()
results_by_query = []
print("Running queries...")
#execute the query to retrieve [(id, val1, val2, ...)]
#collect present values for each column
for pred in self.predicates:
res = db.run(base_query+' WHERE '+pred)
for i in range(len(self.columns)):
self.domain_per_column[self.columns[i]].update([record[i+1] for record in res])
results_by_query.append(res)
print("Collecting domain for each column...")
#calculate size of x
total_size = 1
for col in self.columns:
total_size *= len(self.domain_per_column[col])
assert total_size > 0
#generate a mapping for val of each column to its sorted index in the column domain
#e.g. column_domain:{3,1,2}, value 1 maps to index 0
value_to_index_per_column = defaultdict(dict)
for col in self.columns:
vals = list(self.domain_per_column[col])
indices = np.argsort(vals)
for i in range(len(vals)):
value_to_index_per_column[col][vals[i]]=indices[i]
def get_index_in_x(rec,total_size):
index = 0
for i in range(1, len(rec)):
column = self.columns[i-1]
val = rec[i]
total_size /= len(self.domain_per_column[column])
index += value_to_index_per_column[column][val]*total_size
return int(index)
print("Generating W and x") #TODO DEBUG
#print(len(value_to_index_per_column['salary']), len(value_to_index_per_column['age']),total_size)
cache = {}
W = np.zeros((len(self.predicates), total_size))
x = np.zeros(total_size)
#generate W and x
for i in range(len(results_by_query)):
for rec in results_by_query[i]:
index = None
if rec in cache:
index = cache[rec]
W[i][index] = 1
else:
index = get_index_in_x(rec, total_size)
cache[rec] = index
W[i][index] = 1
x[index] += 1
self.W = W
self.x = x
print(W)
print(x)
```
#### File: BlowfishDB-backend/conn/DB.py
```python
import mysql.connector
# global dict to reuse previous results
pre_rs = dict()
class DB:
def __init__(self, d):
self.cnx = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', use_pure=True, database=d)
# print("DB: connection established")
def close_conn(self):
self.cnx.close()
@staticmethod
def clear_history():
# reset the pre_rs
global pre_rs
pre_rs = dict()
def run(self, query):
global pre_rs
if query in pre_rs:
# print("DEBUG: reuse query")
return pre_rs[query]
cursor = self.cnx.cursor()
cursor.execute(query)
result_set = cursor.fetchall()
# print(query, "\t", len(result_set))
cursor.close()
pre_rs[query] = result_set
return result_set
```
#### File: privacy/func/LCM.py
```python
import numpy as np
def lcm(m):
"""laplace comparison mechanism for icq"""
q = m.query
lap_b = m.lap_b
q.lap_noise = np.random.laplace(0, lap_b, len(q.cond_list))
# real cost is same to the estimated one
m.set_real_cost(m.est_cost)
q.true_answer = np.matmul(q.query_matrix, q.domain_hist)
# print("true answer=", q.true_answer)
q.noisy_answer = [sum(x) for x in zip(q.lap_noise, q.true_answer)]
# for i in range(0, q.count_query):
# if q.true_answer[i] > q.count_threshold + m.alpha:
# q.big_cond_index.append(i)
# elif q.true_answer[i] < q.count_threshold - m.alpha:
# q.small_cond_index.append(i)
cmp_result_noisy = [x - q.icq_c for x in q.noisy_answer]
print("cmp_result_noisy= ", cmp_result_noisy)
for i in range(0, len(q.cond_list)):
if cmp_result_noisy[i] > 0:
q.selected_cond_index.append(i)
# for i in range(0, len(q.cond_list)):
# print("PRINT", q.cond_list[i], q.true_answer[i], q.true_answer[i] / q.cardinality, sep=',')
print(len(q.selected_cond_index))
def lcm_est_cost(m):
q = m.query
est_cost = q.get_sensitivity() * (np.log(1.0 / (1.0 - (1.0 - m.beta) ** (1.0 / len(q.cond_list)))) - np.log(2)) / m.alpha
return est_cost
```
|
{
"source": "Jerrrrry/traffic",
"score": 3
}
|
#### File: Jerrrrry/traffic/get_proxy.py
```python
import requests
def get_proxy():
try:
# 这里填写大象代理api地址,num参数必须为1,每次只请求一个IP地址
token=get_token()
url = 'https://api.getproxylist.com/proxy?country[]=US&lastTested=600&maxConnectTime=1&apiKey='+token
response = requests.get(url)
response.close()
protocol=str(response.json()['protocol'])
ip = response.json()['ip']
port=str(response.json()['port'])
result=protocol+'://'+ip+':'+port
print(result)
return result
except:
print('no ip available')
return ''
finally:
pass
```
|
{
"source": "jerrutledge/Poke-Controller",
"score": 3
}
|
#### File: PythonCommands/ImageProcessingOnly/FossilShiny.py
```python
from Commands.PythonCommandBase import PythonCommand, ImageProcPythonCommand
from Commands.Keys import KeyPress, Button, Direction, Stick
class Fossil_shiny(ImageProcPythonCommand):
def __init__(self, cam):
super().__init__(cam)
'''
head = {0 : "カセキのトリ", 1 : "カセキのサカナ"}
body = {0 : "カセキのリュウ", 1 : "カセキのクビナガ"}
'''
def fossil_loop(self, head=0, body=0):
# start = time.time()
i = 0
while True:
for j in range(30):
print(str(30*i+j+1)+"体目 ({}/30 of a box)".format(j+1))
self.press(Button.A, wait=0.75)
self.press(Button.A, wait=0.75)
if head == 1:
self.press(Direction.DOWN, duration=0.07, wait=0.75) # select fossil
self.press(Button.A, wait=0.75) # determine fossil
if body == 1:
self.press(Direction.DOWN, duration=0.07, wait=0.75) # select fossil
self.press(Button.A, wait=0.75) # determine fossil
self.press(Button.A, wait=0.75) # select "それでよければ"
while not self.isContainTemplate('Network_Offline.png', 0.8):
self.press(Button.B, wait=0.5)
self.wait(1.0)
# open up pokemon box
self.press(Button.X, wait=1)
self.press(Direction.RIGHT, duration=0.07, wait=1)
self.press(Button.A, wait=2)
self.press(Button.R, wait=2)
is_contain_shiny = self.CheckBox()
# tm = round(time.time() - start, 2)
# print('Loop : {} in {} sec. Average: {} sec/loop'.format(i, tm, round(tm / i, 2)))
if is_contain_shiny:
print('Shiny!')
break
self.press(Button.HOME, wait=2) # EXIT Game
self.press(Button.X, wait=0.6)
self.press(Button.A, wait=2.5) # closed
self.press(Button.A, wait=2.0) # Choose game
self.press(Button.A) # User selection
while not self.isContainTemplate('OP.png', 0.7): # recognize Opening
self.wait(0.2)
self.press(Button.A) # load save-data
while not self.isContainTemplate('Network_Offline.png', 0.8):
self.wait(0.5)
self.wait(1.0)
i += 1
def CheckBox(self):
row = 5
col = 6
for i in range(0, row):
for j in range(0, col):
# if shiny, then stop
if self.isContainTemplate('shiny_mark.png', threshold=0.9):
return True
# Maybe this threshold works for only Japanese version.
if self.isContainTemplate('status.png', threshold=0.7):
pass
if not j == col - 1:
if i % 2 == 0:
self.press(Direction.RIGHT, wait=0.2)
else:
self.press(Direction.LEFT, wait=0.2)
self.press(Direction.DOWN, wait=0.2)
return False
class Fossil_shiny_00(Fossil_shiny): # パッチラゴン
NAME = 'Shiny Fossil 00'
def __init__(self, cam):
super().__init__(cam)
def do(self):
self.fossil_loop(0, 0)
class Fossil_shiny_01(Fossil_shiny): # パッチルドン
NAME = 'Shiny Fossil 01'
def __init__(self, cam):
super().__init__(cam)
def do(self):
self.fossil_loop(0, 1)
class Fossil_shiny_10(Fossil_shiny): # ウオノラゴン
NAME = 'Shiny Fossil 10'
def __init__(self, cam):
super().__init__(cam)
def do(self):
self.fossil_loop(1, 0)
class Fossil_shiny_11(Fossil_shiny): # ウオチルドン
NAME = 'Shiny Fossil 11'
def __init__(self, cam):
super().__init__(cam)
def do(self):
self.fossil_loop(1, 1)
```
#### File: Commands/PythonCommands/Reset.py
```python
from Commands.PythonCommandBase import PythonCommand, ImageProcPythonCommand
from Commands.Keys import KeyPress, Button, Direction, Stick
# reset the game
class Reset(PythonCommand):
NAME = "Reset"
def __init__(self):
super().__init__()
def do(self):
self.wait(1)
self.press(Button.HOME, wait=1)
self.press(Button.X, wait=1)
self.press(Button.A, wait=5)
self.press(Button.A, wait=2)
self.press(Button.A, wait=18)
self.press(Button.A, wait=1)
```
|
{
"source": "jerry0317/JYLabTool",
"score": 3
}
|
#### File: JYL/methods/filem.py
```python
from ..classes.jyldata import JYLData
from ..classes.jyldataset import JYLDataSet
from ..classes.jyldatapoint import JYLDataPoint
import csv
def saveToCSV(dataset, path=None, file=None):
header = []
for n in dataset.dataNames:
header.extend(["{}_value".format(n), "{}_uncertainty".format(n), "{}_unit".format(n)])
if path is not None:
csvFile = open(path, "w")
if file is not None:
csvFile = file
dict_writer = csv.DictWriter(csvFile, header)
dict_writer.writeheader()
for dp in dataset.dataPoints:
dict = {}
for n in dataset.dataNames:
dict["{}_value".format(n)] = dp.valueDict[n]
dict["{}_uncertainty".format(n)] = dp.uncertaintyDict[n]
dict["{}_unit".format(n)] = dp.unitDict[n]
dict_writer.writerow(dict)
csvFile.close()
def openFromCSV(path=None, file=None):
if path is not None:
csvFile = open(path, "r")
if file is not None:
csvFile = file
dict_reader = csv.DictReader(csvFile,skipinitialspace=True)
di = {}
for h in dict_reader.fieldnames:
hs = h.rsplit("_", 1)
pre = hs[0]
suf = hs[1]
if pre not in di:
di[pre] = set([suf])
else:
di[pre].add(suf)
zSet = set(["value", "uncertainty", "unit"])
for k, v in di.items():
if v != zSet:
raise Exception("The format of CSV file is incorrect.")
names = di.keys()
s = JYLDataSet()
for row in dict_reader:
li = []
for n in names:
dic = {}
dic["name"] = n
dic["value"] = float(row["{}_value".format(n)])
dic["uncertainty"] = float(row["{}_uncertainty".format(n)])
dic["unit"] = row["{}_unit".format(n)]
li.append(dic)
dp = JYLDataPoint(fromList=li)
s.dataPoints.append(dp)
return s
```
|
{
"source": "jerry0317/JYMoleculeTool-ML-Swift",
"score": 3
}
|
#### File: Sources/JYMT-ML-StructureFinder/tf_prototype.py
```python
import csv
import random
import xyz2mol
## Some code referenced from Tensorflow Tutorial code: https://www.tensorflow.org/tutorials/keras/classification
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
TEST_RATIO = 0.15
FEATURES = [
"numAtoms",
"numBonds",
"numGroups",
"numAromaticAtoms",
"numAromaticBonds",
"numInRingAtoms",
"numInRingBonds",
"numOfSingleBonds",
"numOfDoubleBonds",
"numOfTripleBonds",
"numOfQuadrupleBonds",
"numOfCAtoms",
"numOfNonCHAtoms",
"numOfRotorBonds",
"mol2dPSA",
"molAnionicCarbonCount",
"molAromaticRingCount",
# "molFractionCsp3",
"molHalideFraction",
"molHBondAcceptorCount",
"molHBondDonorCount",
"molLipinskiAcceptorCount",
"molLipinskiDonorCount",
"molLongestUnbranchedHeavyAtomsChain",
"molLongestUnbranchedCarbonsChain",
"molNumUnspecifiedAtomStereos",
"molNumUnspecifiedBondStereos",
"molWeight"
] + [
"numsOfAtomWithImplicitHCount" + str(k) for k in range(9)
] + [
"numsOfAtomWithDegree" + str(k) for k in range(9)
] + [
"numsOfAtomWithExplicitDegree" + str(k) for k in range(9)
] + [
"numsOfAtomWithExplicitValence" + str(k) for k in range(9)
] + [
"numsOfAtomWithHvyDegree" + str(k) for k in range(9)
] + [
"numsOfAtomWithHvyValence" + str(k) for k in range(9)
] + [
"numsOfAtomWithValence" + str(k) for k in range(9)
] + [
"numsOfAtomWithHyb" + str(k) for k in range(6)
] + [
"numsOfAtomWithFormalCharge" + str(k - 4) for k in range(9)
]
NUM_FEATURES = len(FEATURES)
print("Number of features in use: {}".format(NUM_FEATURES))
def input_csv_reader():
hold = True
while hold:
path = input("Please enter the csv file path for SMILES and corresponding validity: ")
try:
reader = csv.reader(open(path.replace("\\", "").strip()), delimiter=',')
hold = False
except Exception as e:
print(e)
print("Invalid path. Please try again.")
return reader
def import_from_csv(reader, remove_duplicates=True):
smiles_list = []
validity_list = []
line_count = 0
for row in reader:
if line_count > 0:
smiles_list.append(row[0])
validity_list.append(int(row[1]))
line_count += 1
print(f'Processed {line_count} lines from csv file.')
if remove_duplicates:
snt_list = [(smiles_list[i], validity_list[i]) for i in range(len(smiles_list))]
snt_list = list(set(snt_list))
smiles_list = [snt_list[i][0] for i in range(len(snt_list))]
validity_list = [snt_list[i][1] for i in range(len(snt_list))]
print("Found {} unique data entries.".format(len(smiles_list)))
return smiles_list, validity_list
def sample_indices(len, test_ratio):
r = list(range(len))
num_te = int(round(len * test_ratio))
test_indices = random.sample(r, num_te)
train_indices = r
for i in sorted(test_indices, reverse=True):
del train_indices[i]
return train_indices, test_indices
def extract_features(dict):
return np.array([dict[f] for f in FEATURES])
# Print iterations progress
# Credit: https://gist.github.com/greenstick/b23e475d2bfdc3a82e34eaa1f6781ee4
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
raw_smiles, raw_labels = import_from_csv(input_csv_reader())
num_of_compounds = len(list(filter(lambda x: x == 1, raw_labels)))
num_of_smiles = len(raw_smiles)
positive_rate = num_of_compounds / len(raw_labels)
print("Positive rate: ", positive_rate)
print("Negative rate: ", 1 - positive_rate)
train_indices, test_indices = sample_indices(len(raw_smiles), TEST_RATIO)
train_features = np.zeros((len(train_indices), NUM_FEATURES))
train_labels = np.zeros(len(train_indices), dtype=np.uint8)
test_features = np.zeros((len(test_indices), NUM_FEATURES))
test_labels = np.zeros(len(test_indices), dtype=np.uint8)
raw_features = []
printProgressBar(0, len(raw_smiles), prefix = 'Computing Features:', length = 48)
xyz2mol.nullifyOEThrowStream()
for i, smiles in enumerate(raw_smiles):
raw_features.append(xyz2mol.smiles2features(smiles))
printProgressBar(i + 1, len(raw_smiles), prefix = 'Computing Features:', length = 48)
# print(raw_features[:1])
for j, i_tr in enumerate(train_indices):
train_features[j, :] = extract_features(raw_features[i_tr])
train_labels[j] = raw_labels[i_tr]
for j, i_te in enumerate(test_indices):
test_features[j, :] = extract_features(raw_features[i_te])
test_labels[j] = raw_labels[i_te]
model = keras.Sequential([
keras.layers.Dense(NUM_FEATURES + 1, activation="relu"),
keras.layers.Dense(NUM_FEATURES * 2, activation="relu"),
keras.layers.Dense(NUM_FEATURES * 3, activation="relu"),
keras.layers.Dense(NUM_FEATURES * 4, activation="relu"),
keras.layers.Dense(1, activation="sigmoid")
])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_features, train_labels, epochs=20)
print()
print("In Raw Data: {} compounds - {} SMILES - T: {:.2f}% - F: {:.2f}%".format(num_of_compounds, num_of_smiles, positive_rate * 100, (1 - positive_rate) * 100))
print()
test_loss, test_acc = model.evaluate(test_features, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
test_pred = (model.predict(test_features) > 0.5).astype("int32")
print(classification_report(test_labels, test_pred, labels=[1, 0], target_names=['T', 'F']))
```
|
{
"source": "jerry0317/JYMoleculeTool",
"score": 3
}
|
#### File: jerry0317/JYMoleculeTool/proj1-2.py
```python
import imp_xyz as ix
import m_tools as mt
import time
import itertools
file_name = "propanediol-1"
_, _, mol = ix.import_xyz("molecule_models/{}.xyz".format(file_name))
dmol = ix.select_by_name(mol,'C') + ix.select_by_name(mol,'O')
# De-signing dmol (taking absolute value) to obtain |x|, |y|, |z| for each oxygen or carbon atom
for i, m in enumerate(dmol):
dmol[i]['rvec'] = list(map(abs, dmol[i]['rvec']))
mol_C = ix.select_by_name(dmol, 'C')
mol_O = ix.select_by_name(dmol, 'O')
CO_BOND_LENGTH = 1.43
CC_BOND_LENGTH = 1.54
TOLERANCE_LEVEL = 0.15 # In bond-length filters, an atom passes the filter when the distance lies between BOND_LENGTH plus/minus TOLERANCE_LEVEL
# Fix the first O (O1)
# We choose the first fixed (and the only fixed) atom to be O because it should be easy to find the C adjacent to it. Other two C-s are further than its neighboring C.
O1 = mol_O[1]
print("O1 has been fixed.")
mol_Or = mol_O
mol_Or.remove(O1) # The remaining set of O excluding O1
def find_C2(O1, mol_C):
# Find the neighboring C of O1 (C2)
C2_possibles = mt.find_possibles(mol_C)
C2_af = filter(lambda m: mt.bond_length_filter(m['rvec'], O1['rvec'], CO_BOND_LENGTH, TOLERANCE_LEVEL), C2_possibles) # Change the tolerance range here to see how this filter works
return C2_af
def find_C3C4(C2, O1, mol_C):
# Find the possible neighboring C(s) of C2 (C3/C4)
C3_possibles = filter(lambda m: m['index'] != C2['index'], mt.find_possibles(mol_C))
d_C2O1 = mt.distance(C2['rvec'], O1['rvec'])
C3_af = filter(lambda m: mt.bond_length_filter(m['rvec'], C2['rvec'], CC_BOND_LENGTH, TOLERANCE_LEVEL) and mt.distance(m['rvec'], O1['rvec']) > d_C2O1, C3_possibles) # Change the selection of O1 to see how the code below works # Extra filter: Make sure C3 is not close to O1 (which is adjacent to C2)
C3_af_index = set()
for c3 in C3_af:
C3_af_index.add(c3['index'])
if len(C3_af_index) == 1:
C2_adj = 1 # C2 is adjacent to one C. Choose that C to be C3.
C3C4_af = C3_af
elif len(C3_af_index) == 2:
C2_adj = 2 # C2 is adjacent to two C-s. Let those C-s be C3 and C4 respectively.
C3_af_grps = []
for i in C3_af_index:
C3_af_grps.append(filter(lambda m: m['index'] == i, C3_af))
C3C4_af = list(map(list, itertools.product(C3_af_grps[0], C3_af_grps[1])))
else:
C2_adj = 0
C3C4_af = []
return C2_adj, C3C4_af
def find_O5_C2adj1(C2, C3, mol_Or): # If C2 is adjacent to one C (C3), then C3 must be connected to an O (O5).
O5_possibles = mt.find_possibles(mol_Or)
O5_af = filter(lambda m: mt.bond_length_filter(m['rvec'], C3['rvec'], CO_BOND_LENGTH, TOLERANCE_LEVEL) and mt.distance(m['rvec'], C2['rvec']) > (CO_BOND_LENGTH + TOLERANCE_LEVEL), O5_possibles)
return O5_af
def find_C4_C2adj1(C2, C3, O5, mol_C):
# Find the possible C4
C4_possibles = filter(lambda m: m['index'] not in (C2['index'], C3['index']), mt.find_possibles(mol_C))
d_C3O5 = mt.distance(C3['rvec'], O5['rvec'])
C4_af = filter(lambda m: mt.bond_length_filter(m['rvec'], C3['rvec'], CC_BOND_LENGTH, TOLERANCE_LEVEL) and mt.distance(m['rvec'], O5['rvec']) > d_C3O5, C4_possibles) # Extra filter: Make sure C4 is not close to O5 (which is adjacent to C3)
return C4_af
def find_O5_C2adj2(C3, C4, mol_Or): # If C2 is adjacent to two C-s (C3, C4), then O5 must be adjacent to one of C3 and C4.
O5_possibles = mt.find_possibles(mol_Or)
O5_afC3 = filter(lambda m: mt.bond_length_filter(m['rvec'], C3['rvec'], CO_BOND_LENGTH, TOLERANCE_LEVEL) and mt.distance(m['rvec'], C4['rvec']) > (CO_BOND_LENGTH + TOLERANCE_LEVEL), O5_possibles) # Extra filter: If O5 is connected to C3, make sure it is not too close to C4 (vice versa)
O5_afC4 = filter(lambda m: mt.bond_length_filter(m['rvec'], C4['rvec'], CO_BOND_LENGTH, TOLERANCE_LEVEL) and mt.distance(m['rvec'], C3['rvec']) > (CO_BOND_LENGTH + TOLERANCE_LEVEL), O5_possibles)
O5_af = O5_afC3 + O5_afC4
return O5_af
def save_mols(mols, icode = None):
print("-----The structure of molecule is found as follows:-----")
for m in mols:
print("{0} {1}".format(m['name'], m['rvec']))
if icode is None:
filename = 'molecule_results/{0}_{1}.xyz'.format(file_name,int(time.time()))
else:
filename = 'molecule_results/{0}_{1}_{2}.xyz'.format(file_name,int(time.time()),str(icode))
#ix.export_xyz(filename, mols)
print("Results saved to xyz file.")
# Use the alogrithm above to determine the structure of molecule (x, y, z for each of carbon and oxygen atom)
C2_af = find_C2(O1, mol_C)
for C2 in C2_af:
C2_adj, C3C4_af = find_C3C4(C2, O1, mol_C)
if C2_adj == 1:
for C3C4 in C3C4_af:
C3 = C3C4
O5_af = find_O5_C2adj1(C2, C3, mol_Or)
for O5 in O5_af:
C4_af = find_C4_C2adj1(C2, C3, O5, mol_C)
i = 1
for C4 in C4_af:
save_mols([O1, C2, C3, C4, O5], i)
i = i + 1
elif C2_adj == 2:
for C3C4 in C3C4_af:
C3 = C3C4[0]
C4 = C3C4[1]
O5_af = find_O5_C2adj2(C3, C4, mol_Or)
i = 1
for O5 in O5_af:
save_mols([O1, C2, C3, C4, O5], i)
i = i + 1
```
|
{
"source": "jerry0317/Measuring-k_B",
"score": 3
}
|
#### File: jerry0317/Measuring-k_B/main.py
```python
import sys
import time
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
from scipy import stats
import os
import csv
import itertools
import RPi.GPIO as GPIO
import board
import busio
import adafruit_bmp280
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
#set GPIO Pins
GPIO_TRIGGER = 18
GPIO_ECHO = 24
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
i2c = busio.I2C(board.SCL, board.SDA)
bmp280 = adafruit_bmp280.Adafruit_BMP280_I2C(i2c)
if (bmp280.temperature > 0):
print()
print("BMP280 has been connected.")
print()
# Data Name Format
DATA_NAME = "data/{}".format(int(time.time()))
# Module to securely prompt for a user input
def user_input(val_name, val_range = None):
input_hold = True
while(input_hold):
try:
val_d = input("Please enter the value of {}: ".format(val_name))
val_d = float(val_d)
val_min = val_range[0]
val_max = val_range[1]
if val_d < val_min or val_d > val_max:
raise Exception("{} out of range.".format(val_name))
except Exception as e:
print(e)
print("ERROR. Please try again.")
else:
input_hold = False
print()
print("{0} is set as {1}.".format(val_name, val_d))
print()
return val_d
def timett():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
return TimeElapsed
def temp_bmp():
temp_C = bmp280.temperature
temp_K = temp_C + 273.15
return temp_K
# Saving file name
def file_name(suffix):
return DATA_NAME + "." + str(suffix)
# Saving the data
def save_data():
h = ["Time", "Exp Distance", "Measured Time Diff", "Temperature", "Derived k_B", "Derived k_B Error"]
try:
with open(file_name("csv"), "w+") as f:
dict_writer = csv.DictWriter(f, h)
dict_writer.writeheader()
try:
count = len(time_arr)
if len(temp_arr) != count or len(tt_arr) != count or len(derived_kb_arr) != count:
raise Exception("Different list lengths.")
except Exception as e:
print(e)
else:
pass
for i in range(0, count):
dict_writer.writerow({
h[0]: time_arr[i],
h[1]: distance_d,
h[2]: tt_arr[i],
h[3]: temp_arr[i],
h[4]: derived_kb_arr[i],
h[5]: kb_err_abs_arr[i]
})
f.close()
print("\nData saved to {}.\n".format(file_name('csv')))
except Exception as e:
print(e)
else:
pass
pass
# Save the plot
def save_plot(fig):
fig_now.savefig(file_name("eps"), format='eps')
print("\nPlot saved to {}.\n".format(file_name("eps")))
# Boltzmann constant (10^-23)
K_B = 1.38064852
# Avogadro constant (10^23)
N_A = 6.02214
# Experiment Constants
# DISTANCE = 1
MOLAR_MASS = 28.97 * 10 ** (-3)
GAMMA = 1.40
# Van der Waals Constants
VDW_A = 0
VDW_B = 0
# Controller Constants
DELAY = 1
# Experiment Error Constants
DIS_ERR_ABS = 0.005
TT_ERR_ABS = 4.665306263360271e-07
TEMP_ERR_ABS = 0.5
# List storing values
tt_arr = []
time_arr = []
temp_arr = []
derived_kb_arr = []
kb_err_abs_arr = []
def c_from_tt(tt, dis):
c_sound = dis / tt
return c_sound
def kb_from_tt(tt, temp, dis):
c_sound = c_from_tt(tt, dis)
kb = (c_sound ** 2) * MOLAR_MASS / (GAMMA * N_A * temp)
return kb
# # Van der Waals Correction
# def kb_from_vdw_tt(tt, temp, pres, dis):
def err_from_tt_pct(tt, temp, dis):
dis_err_pct = DIS_ERR_ABS / dis
temp_err_pct = TEMP_ERR_ABS / temp
tt_err_pct = TT_ERR_ABS / tt
err_pct = 2 * (dis_err_pct + tt_err_pct) + temp_err_pct
return err_pct
def err_from_tt_vdw_pct(tt, temp, pres, dis):
dis_err_pct = DIS_ERR_ABS / dis
temp_err_pct = TEMP_ERR_ABS / temp
tt_err_pct = TT_ERR_ABS / tt
err_pct = 2 * (dis_err_pct + tt_err_pct) + temp_err_pct
return err_pct
def err_arr_gp(x_arr, data_arr, err_arr):
if len(data_arr) != len(err_arr):
return False
else:
up_arr = []
low_arr = []
seg_arr = []
for i in range(0, len(data_arr)):
x_p = x_arr[i]
data_p = data_arr[i]
err_p = err_arr[i]
up_p = data_p + err_p
low_p = data_p - err_p
up_arr.append(up_p)
low_arr.append(low_p)
seg_arr.append([[x_p, low_p], [x_p, up_p]])
return (low_arr, up_arr, seg_arr)
distance_d = user_input("distance in cm", (1,200))
distance_d = distance_d / 100 * 2
print()
print("NOTE: You can exit the recodring early by pressing ctrl + C.")
fig = plt.figure(1)
ax = plt.gca()
line, (bottoms, tops), verts = ax.errorbar([0], [0], yerr=0.01, capsize=3, fmt='ko', markersize=4, elinewidth=1,label="Realtime Measurement").lines
# st_lines = [plt.plot([], [], linestyle='dashed', label="Mean Measured Value")[0], plt.plot([], [], linestyle='dashed', label=r"True $k_B$")[0], plt.plot([], [], 'm', linestyle='dashed', label=r"+3$\sigma$")[0], plt.plot([], [], 'm', linestyle='dashed', label=r"-3$\sigma$")[0]]
st_lines = [plt.plot([], [], linestyle='dashed', label="Mean Measured Value")[0], plt.plot([], [], linestyle='dashed', label=r"True $k_B$")[0]]
t0 = time.perf_counter()
def plt_init():
plt.xlabel("Time (s)")
plt.ylabel(r"Derived $k_B$ ($10^{-23} J K^{-1}$)")
plt.legend(loc="lower right")
# line.set_xdata([0])
# line.set_ydata([0])
# bottoms.set_ydata([0])
# tops.set_ydata([0])
# for line in lines:
# line.set_data([], [])
return line, bottoms, tops, verts, st_lines
def main_controller(frame):
global tt_arr
global time_arr
global temp_arr
global derived_kb_arr
global kb_err_abs_arr
try:
tt = timett()
temp = temp_bmp()
c_s = c_from_tt(tt, distance_d)
kb_d = kb_from_tt(tt, temp, distance_d)
err_pct = err_from_tt_pct(tt, temp, distance_d)
err_abs = err_pct * kb_d
# Calculate time since started
t = time.perf_counter() - t0
# Recording data
tt_arr.append(tt)
time_arr.append(t)
temp_arr.append(temp)
derived_kb_arr.append(kb_d)
kb_err_abs_arr.append(err_abs)
kb_d_avg = np.mean(derived_kb_arr)
if len(time_arr) > 1:
kb_d_sigma = stats.sem(derived_kb_arr)
else:
kb_d_sigma = 0
kb_d_sigma_up = kb_d_avg + 3 * kb_d_sigma
kb_d_sigma_down = kb_d_avg - 3 * kb_d_sigma
# Print result
print("The measured temperature is {0} K ({1} °C).".format(round(temp,2), round((temp-273.15),2)))
print("The derived speed of sound is {} m/s.".format(c_s))
print("The derived k_B is {}.".format(kb_d))
print("The averaged derived k_B is {}.".format(kb_d_avg))
print("The precision of the measurement is {}%.".format(err_pct * 100))
print()
# Plotting Data with Error Bars
err_gp = err_arr_gp(time_arr, derived_kb_arr, kb_err_abs_arr)
line.set_xdata(time_arr)
line.set_ydata(derived_kb_arr)
bottoms.set_xdata(time_arr)
tops.set_xdata(time_arr)
bottoms.set_ydata(err_gp[0])
tops.set_ydata(err_gp[1])
verts[0].set_segments(err_gp[2])
# Plotting Reference lines
# x_list = list(itertools.repeat([np.min(time_arr), np.max(time_arr)], 4))
# y_list = [[kb_d_avg , kb_d_avg], [K_B, K_B], [kb_d_sigma_up, kb_d_sigma_up], [kb_d_sigma_down], [kb_d_sigma_down]]
x_list = list(itertools.repeat([np.min(time_arr), np.max(time_arr)], 2))
y_list = [[kb_d_avg , kb_d_avg], [K_B, K_B]]
for lnum, st_line in enumerate(st_lines):
st_line.set_data(x_list[lnum], y_list[lnum])
fig.gca().relim()
fig.gca().autoscale_view()
except (KeyboardInterrupt, SystemExit):
print()
print("Interrupt experienced.")
except Exception as e:
print(e)
finally:
return line, bottoms, tops, verts, st_lines
anim = animation.FuncAnimation(fig, main_controller, interval=DELAY*1000, init_func = plt_init)
try:
print("NOTE: You can close the pyplot window to exit the program.")
fig_now = plt.gcf()
plt.show()
except (KeyboardInterrupt, SystemExit):
save_data()
save_plot(fig_now)
print("Interrupt experienced. Early Exit.")
exit()
except Exception as e:
GPIO.cleanup()
print(e)
print("Exiting the program...")
GPIO.cleanup()
save_data()
save_plot(fig_now)
```
#### File: jerry0317/Measuring-k_B/plot.py
```python
import csv
import util
import numpy as np
import matplotlib.pyplot as plt
import itertools
import time
from scipy import stats
def err_arr_gp(x_arr, data_arr, err_arr):
if len(data_arr) != len(err_arr):
return False
else:
up_arr = []
low_arr = []
seg_arr = []
for i in range(0, len(data_arr)):
x_p = x_arr[i]
data_p = data_arr[i]
err_p = err_arr[i]
up_p = data_p + err_p
low_p = data_p - err_p
up_arr.append(up_p)
low_arr.append(low_p)
seg_arr.append([[x_p, low_p], [x_p, up_p]])
return (low_arr, up_arr, seg_arr)
def save_plot(fig):
# eps_loc = DATA_NAME + "_plt_" + str(int(time.time())) + '.eps'
eps_loc = DATA_NAME + '.eps'
fig_now.savefig(eps_loc, format='eps')
print("\nPlot saved to {}.\n".format(eps_loc))
SR04_OFFSET = 55
def save_data():
h = ["Time", "Exp Distance", "Measured Time Diff", "Temperature", "Derived k_B", "Derived k_B Error", "Pressure", "HC-SRO4 Raw"]
try:
with open(csv_loc, "w+") as f:
dict_writer = csv.DictWriter(f, h)
dict_writer.writeheader()
try:
count = len(time_arr)
if len(temp_arr) != count or len(tt_arr) != count or len(derived_kb_arr) != count:
raise Exception("Different list lengths.")
except Exception as e:
print(e)
else:
pass
for i in range(0, count):
dict_writer.writerow({
h[0]: time_arr[i],
h[1]: distance_d,
h[2]: tt_arr[i],
h[3]: temp_arr[i],
h[4]: derived_kb_arr[i],
h[5]: kb_err_abs_arr[i],
h[6]: pres_arr[i],
h[7]: tt_arr[i] - (SR04_OFFSET * 10 ** (-6))
})
f.close()
except Exception as e:
print(e)
else:
pass
pass
def std_error(err_arr):
n = len(err_arr)
ste = np.sqrt(np.sum([e ** 2 for e in err_arr])/(n-1))
return ste
# Boltzmann constant (10^-23)
K_B = 1.38064852
data_id = util.user_input("data number", val_float=False)
DATA_NAME = DATA_NAME = "data/{}".format(data_id)
csv_loc = DATA_NAME + ".csv"
# List storing values
tt_arr = []
time_arr = []
temp_arr = []
derived_kb_arr = []
kb_err_abs_arr = []
pres_arr = []
kb_avg_arr = []
distance_d = 0
h = ["Time", "Exp Distance", "Measured Time Diff", "Temperature", "Derived k_B", "Derived k_B Error", "Pressure", "HC-SRO4 Raw"]
try:
csvFile = open(csv_loc, "r")
dict_reader = csv.DictReader(csvFile)
for row in dict_reader:
t = float(row[h[0]])
distance_d = float(row[h[1]])
tt = float(row[h[2]])
temp = float(row[h[3]])
pres = float(row[h[6]])
kb_d = util.kb_from_tt_rk_n2(tt, temp, distance_d, pres)
err_pct = util.err_from_tt_pct(tt, temp, distance_d)
err_abs = err_pct * kb_d
if len(time_arr) > 1:
kb_d_sigma = std_error(kb_err_abs_arr)
kb_d_avg_pre = np.mean(derived_kb_arr)
else:
kb_d_sigma = err_abs
kb_d_avg_pre = kb_d
kb_d_sigma_up = kb_d_avg_pre + 2 * kb_d_sigma
kb_d_sigma_down = kb_d_avg_pre - 2 * kb_d_sigma
if kb_d_sigma_down <= kb_d <= kb_d_sigma_up:
tt_arr.append(tt)
time_arr.append(t)
temp_arr.append(temp)
pres_arr.append(pres)
derived_kb_arr.append(kb_d)
kb_err_abs_arr.append(err_abs)
kb_d_avg = np.mean(derived_kb_arr)
kb_avg_arr.append(kb_d_avg)
print("The data set has been successfully loaded from CSV file.")
except Exception as e:
print(e)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(223)
ax3 = fig.add_subplot(224)
line, (bottoms, tops), verts = ax1.errorbar([0], [0], yerr=0.01, capsize=0.1, fmt='ko', markersize=4, elinewidth=1,label="Realtime Measurement").lines
st_lines = [ax1.plot([], [], linestyle='dashed', label="Mean Measured Value")[0], ax1.plot([], [], linestyle='dashed', label=r"True $k_B$")[0], ax1.plot([], [], '.', label="Instantaneous Average Value", markersize=8)[0], ax2.plot([], [], '.', label="Temperature")[0], ax3.plot([], [], '.', label="Pressure")[0]]
ax1.set_ylabel(r"Derived $k_B$ ($10^{-23} J K^{-1}$)")
ax2.set_ylabel(r"Temperature $T$ (K)")
ax3.set_ylabel(r"Pressure $P$ (Pa)")
for ax in [ax1, ax2, ax3]:
ax.set_xlabel("Time (s)")
ax.legend(loc="lower right")
ax.tick_params(direction="in")
err_gp = util.err_arr_gp(time_arr, derived_kb_arr, kb_err_abs_arr)
line.set_xdata(time_arr)
line.set_ydata(derived_kb_arr)
bottoms.set_xdata(time_arr)
tops.set_xdata(time_arr)
bottoms.set_ydata(err_gp[0])
tops.set_ydata(err_gp[1])
verts[0].set_segments(err_gp[2])
# Plotting Reference lines
# x_list = list(itertools.repeat([np.min(time_arr), np.max(time_arr)], 4))
# y_list = [[kb_d_avg , kb_d_avg], [K_B, K_B], [kb_d_sigma_up, kb_d_sigma_up], [kb_d_sigma_down], [kb_d_sigma_down]]
kb_d_avg = np.mean(derived_kb_arr)
x_list = list(itertools.repeat([np.min(time_arr), np.max(time_arr)], 2))
y_list = [[kb_d_avg , kb_d_avg], [K_B, K_B]]
x_list.append(time_arr)
y_list.append(kb_avg_arr)
x_list.append(time_arr)
y_list.append(temp_arr)
x_list.append(time_arr)
y_list.append(pres_arr)
for lnum, st_line in enumerate(st_lines):
st_line.set_data(x_list[lnum], y_list[lnum])
fig.gca().relim()
fig.gca().autoscale_view()
for ax in [ax1, ax2, ax3]:
ax.relim()
ax.autoscale_view()
ax2.set_ylim([np.min(temp_arr) - 0.1,np.max(temp_arr) + 0.1])
ax3.set_ylim([np.min(pres_arr) - 25,np.max(pres_arr) + 25])
try:
fig_now = plt.gcf()
plt.show()
except (KeyboardInterrupt, SystemExit):
save_plot(fig_now)
save_data()
exit()
except Exception as e:
print(e)
save_plot(fig_now)
save_data()
```
|
{
"source": "jerry0-3/pybooru",
"score": 3
}
|
#### File: pybooru/pybooru/pybooru.py
```python
from __future__ import absolute_import
# External imports
import re
import requests
# pybooru imports
from . import __version__
from .exceptions import (PybooruError, PybooruHTTPError)
from .resources import (SITE_LIST, HTTP_STATUS_CODE)
class _Pybooru(object):
"""Pybooru main class.
Attributes:
site_name (str): Get or set site name set.
site_url (str): Get or set the URL of Moebooru/Danbooru based site.
username (str): Return user name.
last_call (dict): Return last call.
"""
def __init__(self, site_name='', site_url='', username=''):
"""Initialize Pybooru.
Keyword arguments:
site_name (str): The site name in 'SITE_LIST', default sites.
site_url (str): URL of on Moebooru/Danbooru based sites.
username (str): Your username of the site (Required only for
functions that modify the content).
Raises:
PybooruError: When 'site_name' and 'site_url' are empty.
"""
# Attributes
self.__site_name = '' # for site_name property
self.__site_url = '' # for site_url property
self.username = username
self.last_call = {}
# Set HTTP Client
self.client = requests.Session()
headers = {'user-agent': 'Pybooru/{0}'.format(__version__),
'content-type': 'application/json; charset=utf-8'}
self.client.headers = headers
# Validate site_name or site_url
if site_name:
self.site_name = site_name
elif site_url:
self.site_url = site_url
else:
raise PybooruError("Unexpected empty arguments, specify parameter "
"'site_name' or 'site_url'.")
@property
def site_name(self):
"""Get or set site name.
:getter: Return site name.
:setter: Validate and set site name.
:type: string
"""
return self.__site_name
@site_name.setter
def site_name(self, site_name):
"""Function that sets and checks the site name and set url.
Parameters:
site_name (str): The site name in 'SITE_LIST', default sites.
Raises:
PybooruError: When 'site_name' isn't valid.
"""
if site_name in SITE_LIST:
self.__site_name = site_name
self.__site_url = SITE_LIST[site_name]['url']
else:
raise PybooruError(
"The 'site_name' is not valid, specify a valid 'site_name'.")
@property
def site_url(self):
"""Get or set site url.
:getter: Return site url.
:setter: Validate and set site url.
:type: string
"""
return self.__site_url
@site_url.setter
def site_url(self, url):
"""URL setter and validator for site_url property.
Parameters:
url (str): URL of on Moebooru/Danbooru based sites.
Raises:
PybooruError: When URL scheme or URL are invalid.
"""
# Regular expression to URL validate
regex = re.compile(
r'^(?:http|https)://' # Scheme only HTTP/HTTPS
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?| \
[A-Z0-9-]{2,}(?<!-)\.?)|' # Domain
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # or ipv6
r'(?::\d+)?' # Port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# Validate URL
if re.match('^(?:http|https)://', url):
if re.search(regex, url):
self.__site_url = url
else:
raise PybooruError("Invalid URL: {0}".format(url))
else:
raise PybooruError(
"Invalid URL scheme, use HTTP or HTTPS: {0}".format(url))
@staticmethod
def _get_status(status_code):
"""Get status message for status code.
Parameters:
status_code (int): HTTP status code.
Returns:
status message (str).
"""
return "{0}, {1}".format(*HTTP_STATUS_CODE.get(
status_code, ('Undefined', 'undefined')))
def _request(self, url, api_call, request_args, method='GET'):
"""Function to request and returning JSON data.
Parameters:
url (str): Base url call.
api_call (str): API function to be called.
request_args (dict): All requests parameters.
method (str): (Defauld: GET) HTTP method 'GET' or 'POST'
Raises:
PybooruHTTPError: HTTP Error.
requests.exceptions.Timeout: When HTTP Timeout.
ValueError: When can't decode JSON response.
"""
try:
if method != 'GET':
# Reset content-type for data encoded as a multipart form
self.client.headers.update({'content-type': None})
response = self.client.request(method, url, **request_args)
self.last_call.update({
'API': api_call,
'url': response.url,
'status_code': response.status_code,
'status': self._get_status(response.status_code),
'headers': response.headers
})
if response.status_code in (200, 201, 202):
return response.json()
elif response.status_code == 204:
return True
raise PybooruHTTPError("In _request", response.status_code,
response.url)
except requests.exceptions.Timeout:
raise PybooruError("Timeout! url: {0}".format(response.url))
except ValueError as e:
raise PybooruError("JSON Error: {0} in line {1} column {2}".format(
e.msg, e.lineno, e.colno))
```
|
{
"source": "Jerry-0591/Fusang",
"score": 3
}
|
#### File: simulation_experimental_phylogeny/code/extract_fasta_data.py
```python
import os
import shutil
from multiprocessing import Process, Pool
import multiprocessing
folder = '../simulate_data/'
folder_fasta = '../fasta_file/'
if not os.path.exists(folder_fasta):
os.mkdir(folder_fasta)
def extract(ele):
if('.fas' in ele and 'TRUE' in ele):
file = folder + ele
file_fasta = folder_fasta + ele
shutil.copy(file, file_fasta)
para_list = os.listdir('../simulate_data/')
pool = Pool(8)
pool.map(extract, para_list)
pool.close()
pool.join()
```
|
{
"source": "jerry0chu/Experiment",
"score": 2
}
|
#### File: lab/handle/handleAppliance.py
```python
from app.models import Appliance
from app import db
import json
def handleGetAppliances(page,per_page):
appliances = Appliance.query.paginate(page=page, per_page=per_page, error_out=False)
res = db.engine.execute("select count(*) from appliance")
count = [r[0] for r in res][0]
applianceInfo={
'appliances':[a.to_json() for a in appliances.items],
'count':count
}
return json.dumps(applianceInfo)
def handleSubmitApplianceEditForm(appliance):
appli = Appliance.query.filter_by(aid=appliance['aid']).first()
if appli:
appli.name=appliance['name']
appli.category=appliance['category']
appli.manufacturer=appliance['manufacturer']
appli.note=appliance['note']
db.session.commit()
return "success"
else:
return "failure"
def handleSubmitApplianceAddForm(appliance):
appli=Appliance(name=appliance["name"],category=appliance["category"],manufacturer=appliance["manufacturer"],note=appliance["note"])
db.session.add(appli)
db.session.commit()
return "success"
def handleRemoveAppliance(aid):
appliance = Appliance.query.filter_by(aid=aid).first()
if appliance:
db.session.delete(appliance)
db.session.commit()
return "success"
else:
return "failure"
def handleApplianceBatchDelete(aidList):
for aid in aidList:
appliance = Appliance.query.filter_by(aid=aid).first()
if appliance:
db.session.delete(appliance)
db.session.commit()
return "success"
def handleAppllianceQueryContent(selectType,content,page,per_page):
countQuery="db.session.query(Appliance).filter(Appliance."+selectType+".like('%"+content+"%')).count()"
count=eval(countQuery)
result="db.session.query(Appliance).filter(Appliance."+selectType+".like('%"+content+"%')).paginate(page="+page+", per_page="+per_page+", error_out=False)"
appliances=eval(result)
applianceInfo = {
'appliances': [a.to_json() for a in appliances.items],
'count': count
}
return json.dumps(applianceInfo)
```
#### File: lab/handle/handleExpData.py
```python
from sqlalchemy import text
from app.models import ExpData,Experiment
from app import db,APP_STATIC_DOWNLOAD
import json,time,os
import pandas as pd
import datetime
from app.lab.handle.handleLab import handleGetLid
def handleGetExpDatas(page, per_page):
res = db.engine.execute("select count(*) as count from expdata")
count = [r[0] for r in res][0]
sql = "select expdata.did, expdata.eid,experiment.name,encapsulation,discharge,charge,eficiency,loopretention,retention from expdata,experiment where expdata.eid=experiment.eid LIMIT " + str(
(page - 1) * per_page) + " ," + str(per_page)
expdataTu = db.engine.execute(text(sql))
expdataList = []
for expdata in expdataTu:
exp = {'did': expdata[0], 'eid': expdata[1], 'expname': expdata[2], 'encapsulation': expdata[3],
'discharge': expdata[4], 'charge': expdata[5], 'eficiency': expdata[6], 'loopretention': expdata[7],
'retention': expdata[8]}
expdataList.append(exp)
expdataSql='select experiment.eid ,experiment.name from experiment where experiment.eid not in (select expdata.eid from expdata)'
expTu=db.engine.execute(text(expdataSql))
availableExpList=[]
for exper in expTu:
ex={'eid':exper[0],'expname':exper[1]}
availableExpList.append(ex)
expdataInfo = {
'expdatas': expdataList,
'count': count,
'availableExp':availableExpList
}
return json.dumps(expdataInfo)
def handleSubmitExpDataEditForm(expdata):
exp = ExpData.query.filter_by(did=expdata['did']).first()
if exp:
exp.encapsulation = expdata['encapsulation']
exp.discharge = expdata['discharge']
exp.charge = expdata['charge']
exp.eficiency = expdata['eficiency']
exp.loopretention = expdata['loopretention']
exp.retention = expdata['retention']
db.session.commit()
return "success"
else:
return "failure"
return "success"
def handleSubmitExpDataAddForm(expdata):
sql = 'INSERT INTO expdata (eid,encapsulation,discharge,charge,eficiency,loopretention,retention) values (%d,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f)'%(
expdata["availExp"], expdata["encapsulation"],
expdata["discharge"], expdata["charge"], expdata['eficiency'],expdata['loopretention'],expdata['retention'])
db.engine.execute(text(sql))
return "success"
def handleRemoveExpData(did):
db.engine.execute('delete from expdata where did='+str(did))
return "success"
def handleExpdataBatchDelete(didList):
for did in didList:
db.engine.execute('delete from expdata where did=' + str(did))
return "success"
def handleExpdataQueryContent(selectType,content,page,per_page):
countexpnameSql="""
select count(did) as count from expdata,experiment where expdata.eid=experiment.eid and experiment.name like
"""
expnameSql = """
select expdata.did, expdata.eid,experiment.name as expname,
encapsulation,discharge,charge,eficiency,loopretention,retention
from expdata,experiment where expdata.eid=experiment.eid and expname like
"""
eidSql="""
select expdata.did, expdata.eid,experiment.name as expname,
encapsulation,discharge,charge,eficiency,loopretention,retention
from expdata,experiment where expdata.eid=experiment.eid and expdata.eid=
"""
count=0
expdataList=[]
def makeExpdataList(sql):
expdataTu = db.engine.execute(text(sql))
for expdata in expdataTu:
exp = {'did': expdata[0], 'eid': expdata[1], 'expname': expdata[2], 'encapsulation': expdata[3],
'discharge': expdata[4], 'charge': expdata[5], 'eficiency': expdata[6], 'loopretention': expdata[7],
'retention': expdata[8]}
expdataList.append(exp)
if selectType=='expname':
common = "'%" + content + "%'"
countexpnameSql += common
expnameSql += common
res = db.engine.execute(text(countexpnameSql))
count = [r[0] for r in res][0]
makeExpdataList(expnameSql)
if selectType=='eid' and content.isdigit():
count=1
eidSql += str(content)
makeExpdataList(eidSql)
expdatasInfo = {
'expdatas': expdataList,
'count': count
}
return json.dumps(expdatasInfo)
def handleDownExpData(userType):
commonSql = """
select experiment.name as expname,experiment.date,lab.name as labname,encapsulation,discharge,charge,eficiency,loopretention,retention
from experiment,lab,expdata where experiment.lid=lab.lid and expdata.eid=experiment.eid
"""
expSql = ''
if userType=='all':
expSql=commonSql
else:
expSql=commonSql+' and experiment.uid='+userType
expDataList = pd.read_sql_query(expSql, db.engine)
def timestamp_datetime(value):
format = '%Y-%m-%d'
value = time.localtime(value)
dt = time.strftime(format, value)
return dt
expDataList.columns = ['实验名称', '实验时间', '实验地点', '包覆含量(%)', '1C首圈放电比容量(mAh/g)', '1C首圈充电比容量(mAh/g)','首圈效率(%)','1C循环50圈后放电容量','容量保持率(%)']
expDataList['实验时间'] = expDataList['实验时间'] // 1000
expDataList['实验时间'] = expDataList['实验时间'].apply(timestamp_datetime)
path = os.path.join(APP_STATIC_DOWNLOAD, 'expdata.xlsx')
expDataList.to_excel(path,index=False)
return "success"
def handleUploadExpData(path,uid):
uploadExcel=pd.read_excel(path)
#print(uploadExcel.columns[0])
if '条件变量' in uploadExcel.columns[0]:
uploadExcel = pd.read_excel(path, header=None)
uploadExcel = uploadExcel[2:-1]
uploadExcel.columns = ['实验名称', '实验时间', '实验地点', '包覆含量(%)', '1C首圈放电比容量(mAh/g)', '1C首圈充电比容量(mAh/g)', '首圈效率(%)',
'1C循环50圈后放电容量', '容量保持率(%)']
uploadExcel.reset_index(inplace=True, drop=True)
uploadExcel['实验时间'] = pd.to_datetime(uploadExcel['实验时间'])
def datetime_timestamp(dt):
time.strptime(dt, '%Y-%m-%d')
s = time.mktime(time.strptime(dt, '%Y-%m-%d'))
return int(s)*1000
#uploadExcel['实验时间'] = uploadExcel['实验时间'].astype('str').apply(datetime_timestamp)
uploadExcel.columns = ['expname', 'date', 'labname', 'encapsulation', 'discharge', 'charge', 'eficiency',
'loopretention', 'retention']
#print(uploadExcel.head())
#print(uploadExcel['实验时间'].dtype)
length=uploadExcel.shape[0]
#print("length",length)
for row in range(length):
excelRowJson=json.loads(uploadExcel.iloc[row].to_json(force_ascii=False))
#print(excelRowJson)
if excelRowJson['expname'] == None:
break
#labid=handleGetLid(excelRowJson['labname'])
# experiment=Experiment(
# lid=labid,
# uid=uid,
# name=excelRowJson['expname'],
# date=datetime.datetime.fromtimestamp(excelRowJson['date']//1000),
# status=2
# )
# db.session.add(experiment)
# db.session.commit()
labid = handleGetLid(excelRowJson['labname'])
sql = 'INSERT INTO experiment (lid,uid,name,date,status) values (%d,%d,"%s",%d,%d)' % (
int(labid), int(uid), excelRowJson['expname'], excelRowJson['date'],2)
db.engine.execute(text(sql))
res=db.engine.execute('select eid from experiment order by eid DESC limit 1')
exp_eid = [r[0] for r in res][0]
expdata=ExpData(
eid=exp_eid,
encapsulation=excelRowJson['encapsulation'],
discharge=excelRowJson['discharge'],
charge=excelRowJson['charge'],
eficiency=excelRowJson['eficiency'],
loopretention=excelRowJson['loopretention'],
retention=excelRowJson['retention']
)
db.session.add(expdata)
db.session.commit()
```
#### File: lab/handle/handleExperiment.py
```python
from sqlalchemy import text
import datetime
from app.models import Experiment, Lab
from app import db
import json
import pandas as pd
import os
import time
from app import APP_STATIC_DOWNLOAD
status = {0: "未进行", 1: "正在进行", 2: "已完成"}
def makeExperimentInfo(expSql, countSql):
experiments = db.engine.execute(text(expSql))
expList = []
for exp in experiments:
ex = {'eid': exp[0], 'expname': exp[1], 'labname': exp[2], 'date': exp[3], 'status': status[exp[4]],
'account': exp[5]}
expList.append(ex)
res = db.engine.execute(text(countSql))
count = [r[0] for r in res][0]
return expList, count
def handleGetExperiments(page, per_page):
expSql = "select experiment.eid,experiment.name as expname,lab.name as labname,experiment.date,experiment.status,account from experiment,lab,user where experiment.lid=lab.lid and experiment.uid=user.uid LIMIT " + str(
(page - 1) * per_page) + " ," + str(per_page)
countSql='select count(*) as count from experiment,lab,user where experiment.lid=lab.lid and experiment.uid=user.uid'
experimentList,count=makeExperimentInfo(expSql,countSql)
labs = Lab.query.all()
labJson = [r.to_json() for r in labs]
users = db.engine.execute("select uid,account from user")
userList = []
for row in users:
user = {'uid': row[0], 'account': row[1]}
userList.append(user)
experimentInfo = {
'experiments': experimentList,
'count': count,
'labs': labJson,
'users': userList
}
return json.dumps(experimentInfo)
def handleSubmitExperimentEditForm(experiment):
sql = 'update experiment set name=' + "'" + experiment['expname'] + "'" + ', date=' + str(experiment['date'])
if type(experiment['labname']) is int:
sql = sql + ", lid=" + str(experiment['labname'])
if len(experiment['status']) == 1:
sql += ",status= " + str(experiment['status'])
if type(experiment['account']) is int:
sql += ",uid= " + str(experiment['account'])
sql = sql + ' where eid=' + str(experiment['eid'])
db.engine.execute(text(sql))
return "success"
def handleSubmitExperimentAddForm(experiment):
sql = 'INSERT INTO experiment (lid,uid,name,date,status) values (%d,%d,"%s",%d,%d)' % (
experiment["labname"], experiment["account"], experiment["expname"], experiment["date"], int(experiment["status"]))
db.engine.execute(text(sql))
return "success"
def handleRemoveExperiment(eid):
res=db.engine.execute("select experiment.eid from experiment where experiment.eid in (select expdata.eid from expdata) and experiment.eid="+str(eid))
exist = [r[0] for r in res]
if len(exist)==0:
db.engine.execute('delete from experiment where eid=' + str(eid))
return "success"
else:
return "实验已经产生数据,不能删除"
def handleExperimentBatchDelete(eidList):
for eid in eidList:
handleRemoveExperiment(eid)
return "success"
def handleExperimentQueryContent(selectType, statusType, content, page, per_page):
limitSql=" LIMIT " + str((page - 1) * per_page) + " ," + str(per_page)
commonExpSql="""
select experiment.eid,experiment.name as expname,lab.name as labname,experiment.date,
experiment.status,account from experiment,lab,user where experiment.lid=lab.lid
and experiment.uid=user.uid and
"""
commonCountSQl="""
select count() as count from experiment,lab,user where experiment.lid=lab.lid
and experiment.uid=user.uid and
"""
experimentList=[]
count=0
if content == '' and statusType != '-1':
expSql=commonExpSql+" status="+statusType+limitSql
countSql=commonCountSQl+" status="+statusType
experimentList,count=makeExperimentInfo(expSql,countSql)
elif content != '' and statusType == '-1':
if selectType =='eid':
expSql=commonExpSql+" experiment.eid="+content
countSql=commonCountSQl+" experiment.eid="+content
experimentList, count = makeExperimentInfo(expSql, countSql)
elif selectType=='expname':
expSql = commonExpSql + " expname like " +"'%"+ content+"%'"+limitSql
countSql = commonCountSQl + " experiment.name like " +"'%"+ content+"%'"
experimentList, count = makeExperimentInfo(expSql, countSql)
elif selectType=='account':
expSql = commonExpSql + " account= "+"'"+ content+ "'"+limitSql
countSql = commonCountSQl + " account= "+ "'"+content+"'"
experimentList, count = makeExperimentInfo(expSql, countSql)
elif content != '' and statusType != '-1':
if selectType =='eid':
expSql=commonExpSql+" experiment.eid="+content+" and status="+statusType
countSql=commonCountSQl+" experiment.eid="+content+" and status="+statusType
experimentList, count = makeExperimentInfo(expSql, countSql)
elif selectType=='expname':
expSql = commonExpSql + " expname like " +"'%"+ content+"%'"+" and status="+statusType + limitSql
countSql = commonCountSQl + " experiment.name like " +"'%"+ content+"%'"+" and status="+statusType
experimentList, count = makeExperimentInfo(expSql, countSql)
elif selectType=='account':
expSql = commonExpSql + " account= "+"'"+ content+ "'"+" and status="+statusType +limitSql
countSql = commonCountSQl + " account= "+ "'"+content+"'"+" and status="+statusType
experimentList, count = makeExperimentInfo(expSql, countSql)
experimentInfo = {
'experiments': experimentList,
'count': count
}
return json.dumps(experimentInfo)
def handleDownExperiment(downloadExpStatus):
commonSql="""
select experiment.eid,experiment.name as expname,lab.name as labname,experiment.date,
experiment.status,account from experiment,lab,user where experiment.lid=lab.lid
and experiment.uid=user.uid
"""
expSql=''
if downloadExpStatus =='-1':
expSql=commonSql
else:
expSql=commonSql+" and experiment.status="+downloadExpStatus
expList=pd.read_sql_query(expSql,db.engine)
def timestamp_datetime(value):
format = '%Y-%m-%d'
value = time.localtime(value)
dt = time.strftime(format, value)
return dt
expList.columns=['实验编号', '实验名称', '实验地点','实验时间','实验状态','用户账号']
expList['实验状态'].replace({0:"未进行",1:"正在进行",2:"已完成"},inplace=True)
expList['实验时间'] = expList['实验时间'] // 1000
expList['实验时间'] = expList['实验时间'].apply(timestamp_datetime)
path = os.path.join(APP_STATIC_DOWNLOAD, 'experiment.xlsx')
expList.to_excel(path,index=False)
return "success"
```
#### File: lab/view/expdataViews.py
```python
from werkzeug.utils import secure_filename
from app.lab import lab
from flask import request, send_file
from app import APP_STATIC_DOWNLOAD, APP_STATIC_UPLOAD
from app.lab.handle.handleExpData import handleGetExpDatas, handleSubmitExpDataEditForm, handleSubmitExpDataAddForm, \
handleRemoveExpData, handleExpdataBatchDelete, handleExpdataQueryContent, handleDownExpData, handleUploadExpData
import json, os
@lab.route('/getExpDatas', methods=['POST'])
def getExpDatas():
page = request.form.get('page', None)
per_page = request.form.get('per_page', None)
if page and per_page:
return handleGetExpDatas(int(page), int(per_page))
else:
return "error"
@lab.route('/submitExpDataEditForm', methods=['POST'])
def submitExpDataEditForm():
expdata = json.loads(request.form.get('expdata', None))
return handleSubmitExpDataEditForm(expdata)
@lab.route('/submitExpDataAddForm', methods=['POST'])
def submitExpDataAddForm():
expdata = json.loads(request.form.get('expdata', None))
return handleSubmitExpDataAddForm(expdata)
@lab.route('/removeExpData', methods=['POST'])
def removeExpData():
did = request.form.get('did', None)
return handleRemoveExpData(did)
@lab.route('/expdataBatchDelete', methods=['POST'])
def expdataBatchDelete():
didList = json.loads(request.form.get('didList', None))
return handleExpdataBatchDelete(didList)
@lab.route('/expdataQueryContent', methods=['POST'])
def expdataQueryContent():
selectType = request.form.get('selectType', None)
content = request.form.get('content', None)
page = request.form.get('page', None)
per_page = request.form.get('per_page', None)
return handleExpdataQueryContent(selectType, content, page, per_page)
@lab.route('/downExpData', methods=['POST'])
def downExpData():
userType = request.form.get('userType', None)
if handleDownExpData(userType) == 'success':
path = os.path.join(APP_STATIC_DOWNLOAD, 'expdata.xlsx')
rv = send_file(path, attachment_filename=path, as_attachment=True)
return rv
else:
return 'failure'
@lab.route('/uploadExpData', methods=['POST'])
def uploadExpData():
file = request.files['file']
uid=request.form.get('uid',None)
print('uploadExpData uid:',uid)
string = ''
string.endswith(('.xls', '.xlsx'))
if file.filename.endswith(('.xls', '.xlsx')):
print(file.filename)
path = os.path.join(APP_STATIC_UPLOAD, file.filename)
file.save(path)
handleUploadExpData(path,uid)
return "success"
else:
return "failure"
```
#### File: Experiment/app/models.py
```python
from datetime import datetime
from app import db
# from werkzeug.security import generate_password_hash
# from flask import Flask
# from flask_sqlalchemy import SQLAlchemy
#
# app = Flask(__name__)
# app.debug = True
#
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db/labpro.db'
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# db = SQLAlchemy(app)
# 角色
class Role(db.Model):
__tablename__ = "role"
rid = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(100), unique=True) # 名称
note = db.Column(db.String(100), default="") # 备注
# 关系
users = db.relationship("User", backref='role')
def to_json(self):
return {
"rid": self.rid,
"name": self.name,
}
def __repr__(self):
return '<Role %r>' % self.name
# 用户
class User(db.Model):
__tablename__ = "user"
uid = db.Column(db.Integer, primary_key=True, autoincrement=True) # 用户编号
rid = db.Column(db.Integer, db.ForeignKey("role.rid")) # 角色编号
account = db.Column(db.String(50), unique=True) # 账号
username = db.Column(db.String(100), unique=True) # 昵称
password = db.Column(db.String(20)) # 密码
phone = db.Column(db.Integer) # 手机号码
# 关系
experiments = db.relationship("Experiment", backref="user")
def to_json(self):
return {
"uid": self.uid,
"rid": self.rid,
"account": self.account,
"username": self.username,
"phone": self.phone
}
def __repr__(self):
return '<User %r>' % self.username
# 仪器
class Appliance(db.Model):
__tablename__ = "appliance"
aid = db.Column(db.Integer, primary_key=True, autoincrement=True) # 仪器编号
name = db.Column(db.String(50)) # 仪器名称
manufacturer = db.Column(db.String(50)) # 生产厂家
category = db.Column(db.String(50)) # 仪器型号
note = db.Column(db.String(100), default="") # 备注
def to_json(self):
return {
"aid": self.aid,
"name": self.name,
"manufacturer": self.manufacturer,
"category": self.category,
"note": self.note
}
def __repr__(self):
return '<appliance %r>' % self.name
# 材料
class Material(db.Model):
__tablename__ = "material"
mid = db.Column(db.Integer, primary_key=True, autoincrement=True) # 材料编号
name = db.Column(db.String(50)) # 材料名称
manufacturer = db.Column(db.String(50)) # 生产厂家
purity = db.Column(db.String(50)) # 材料纯度
note = db.Column(db.String(100), default="") # 备注
def to_json(self):
return {
"mid": self.mid,
"name": self.name,
"manufacturer": self.manufacturer,
"purity": self.purity,
"note": self.note
}
def __repr__(self):
return '<material %r>' % self.name
# 实验室
class Lab(db.Model):
__tablename__ = "lab"
lid = db.Column(db.Integer, primary_key=True, autoincrement=True) # 实验室编号
name = db.Column(db.String(50), unique=True) # 实验室名称
note = db.Column(db.String(100), default="") # 备注
# 关系
experiments = db.relationship("Experiment", backref="lab")
def to_json(self):
return {
"lid": self.lid,
"name": self.name,
"note": self.note
}
def __repr__(self):
return '<lab %r>' % self.name
# 实验
class Experiment(db.Model):
__tablename__ = "experiment"
eid = db.Column(db.Integer, primary_key=True, autoincrement=True) # 实验编号
lid = db.Column(db.Integer, db.ForeignKey("lab.lid")) # 实验室编号
uid = db.Column(db.Integer, db.ForeignKey("user.uid")) # 用户编号
name = db.Column(db.String(50)) # 实验名称
date = db.Column(db.DateTime, index=True, default=datetime.now)
status = db.Column(db.Integer) # 状态0未进行,1正在进行,2已进行
# 关系
expdatas = db.relationship("ExpData", backref="experiment")
def __repr__(self):
return '<experiment %r>' % self.name
# 实验数据
class ExpData(db.Model):
__tablename__ = "expdata"
did = db.Column(db.Integer, primary_key=True, autoincrement=True) # 实验数据编号
eid = db.Column(db.Integer, db.ForeignKey("experiment.eid")) # 实验室编号
encapsulation = db.Column(db.Float) # 包覆含量
discharge = db.Column(db.Float) # 放电比容量
charge = db.Column(db.Float) # 充电比容量
eficiency = db.Column(db.Float) # 首圈效率
loopretention = db.Column(db.Float) # 循环50圈放电容量
retention = db.Column(db.Float) # 容量保持率
def __repr__(self):
return '<expdata %d>' % self.did
if __name__ == "__main__":
# 创建 所有数据表
"""
db.create_all()
"""
#print("testRole")
# 测试插入数据
"""
role=Role(
name="超级管理员",
)
db.session.add(role)
db.session.commit()
"""
# 测试插入admin 并且生成哈希密码
"""
from werkzeug.security import generate_password_hash
user=User(
rid=1,
account="Jerry",
username="JerryChu",
password=generate_password_hash("<PASSWORD>"),
phone=12345678912
)
db.session.add(user)
db.session.commit()
"""
# role=Role.query.filter_by(name="教师").first()
# print(role)
# 修改
# user = User.query.filter_by(account="David12").first()
# user.username = "李伟"
# db.session.commit()
# print(user)
# 分页
# users=User.query.paginate(page=1, per_page=2)
# print(type(users))
# print(users.items)
# print(users.pages)
# print(users.prev_num)
# print(users.has_prev)
# print(users.next_num)
# print(users.next()) # 对象
# print(users.has_next)
# print(users.next_num)
# print(type(users.next_num))
#
# roles = Role.query.all()
# for r in roles:
# print(r.to_json())
# print(roles)
# raw sql
# result = db.engine.execute("select uid,user.rid,account,username,phone,name from user,role where user.rid=role.rid")
# userList = []
# for row in result:
# user={}
# user['uid']=row[0]
# user['rid'] = row[1]
# user['account'] = row[2]
# user['username'] = row[3]
# user['phone'] = row[4]
# user['roleName'] = row[5]
# userList.append(user)
# print(userList)
#
# res=db.engine.execute("select count(*) from user")
# count=[r[0] for r in res][0]
# print(count)
#print(res[0])
# roles = Role.query.all()
# roleJson = [r.to_json() for r in roles]
# print(roleJson)
# role = Role.query.filter_by(name="教师").first()
# print(role)
# print(role.rid)
# cd 到 app 命令行运行 python models.py
```
|
{
"source": "Jerry12228/Source",
"score": 4
}
|
#### File: Source/Alien/ship.py
```python
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, ai_settings, screen):
"""Initialize Ship and Set initializing position"""
super(Ship, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# Loading Ship image and get circumscribed rectangle
self.image = pygame.image.load('images/ship_57×112.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Make every new ship at mid bottom on screen
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# Save float value in attribute Center of ship
self.center = float(self.rect.centerx)
# Move sign
self.moving_right = False
self.moving_left = False
def update(self):
"""According to the move sign change position of ship"""
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor
# Update rect with self.center
self.rect.centerx = self.center
def blitme(self):
"""Drawing Ship at appoint position"""
self.screen.blit(self.image, self.rect)
def center_ship(self):
"""Make ship at mid of screen"""
self.center = self.screen_rect.centerx
```
|
{
"source": "Jerry1962325/Google-Grass-Machine",
"score": 3
}
|
#### File: Jerry1962325/Google-Grass-Machine/GoogleGM_qqbot.py
```python
from typing import Optional
import requests
import asyncio
from fastapi import FastAPI
from pydantic import BaseModel
import urllib
import urllib.parse
import json
import sys
import os
import platform
from googletrans import Translator
class Item(BaseModel):
name: str
description: Optional[str] = None
price: float
tax: Optional[float] = None
url="http://127.0.0.1:5700/send_group_msg"
version="1.0"
app = FastAPI()
class GroupItem(BaseModel):
message: str
group_id: int
class FriendMessage(BaseModel):
message: str
friend_id: int
translator = Translator(service_urls=['translate.google.cn']) #将翻译服务URL替换为国内区域名
lang = ['de','en','ja','da','fr','ny','uk','gl','ht','es','zh-cn'] #翻译次数及语言
@app.post("/")
async def create_item(item: dict):
msg1=item.get("message")
group=item.get("group_id")
if msg1 and (msg1.startswith("生草")):
print("收到了请求。")
juzi = msg1[2:]
count = 0
for mubiao in lang:
count = count + 1 #翻译计次
juzi = translator.translate(juzi,dest=mubiao).text
print(str(count)+"次翻译结果:"+juzi)
print("最终翻译结果:"+juzi)
requests.post(url,json={"group_id":group,"message":"翻译结果:"+juzi})
print("完成了请求。")
del juzi
if msg1=="ver":
requests.post(url,json={"group_id":group,"message":"GoogleGM_qqbot(https://github.com/Asankilp/Google-Grass-Machine) ver"+version+"\n本机器人基于uvicorn及go-cqhttp(github.com/Mrs4s/go-cqhttp)。Google Translate核心模块为googletrans(https://pypi.org/project/googletrans/)。\n运行环境:\nPython "+sys.version+"\n操作系统:\n"+platform.platform()+" "+platform.version()})
if msg1=="目力":
requests.post(url,json={"group_id":group,"message":"[CQ:record,file=https://asankilp.github.io/muli.mp3]"})
return {}
```
|
{
"source": "Jerry2001Qu/pennylane-qiskit",
"score": 2
}
|
#### File: pennylane-qiskit/pennylane_qiskit/converter.py
```python
r"""
This module contains functions for converting Qiskit QuantumCircuit objects
into PennyLane circuit templates.
"""
from typing import Dict, Any
import warnings
import numpy as np
from qiskit import QuantumCircuit
from qiskit.circuit import Parameter, ParameterExpression
from qiskit.exceptions import QiskitError
import pennylane as qml
import pennylane.ops.qubit as pennylane_ops
from pennylane_qiskit.qiskit_device import QISKIT_OPERATION_MAP
# pylint: disable=too-many-instance-attributes
inv_map = {v.__name__: k for k, v in QISKIT_OPERATION_MAP.items()}
def _check_parameter_bound(param: Parameter, var_ref_map: Dict[Parameter, qml.variable.Variable]):
"""Utility function determining if a certain parameter in a QuantumCircuit has
been bound.
Args:
param (qiskit.circuit.Parameter): the parameter to be checked
var_ref_map (dict[qiskit.circuit.Parameter, pennylane.variable.Variable]):
a dictionary mapping qiskit parameters to PennyLane variables
"""
if isinstance(param, Parameter) and param not in var_ref_map:
raise ValueError("The parameter {} was not bound correctly.".format(param))
def _extract_variable_refs(params: Dict[Parameter, Any]) -> Dict[Parameter, qml.variable.Variable]:
"""Iterate through the parameter mapping to be bound to the circuit,
and return a dictionary containing the differentiable parameters.
Args:
params (dict): dictionary of the parameters in the circuit to their corresponding values
Returns:
dict[qiskit.circuit.Parameter, pennylane.variable.Variable]: a dictionary mapping
qiskit parameters to PennyLane variables
"""
variable_refs = {}
# map qiskit parameters to PennyLane differentiable Variables.
if params is not None:
for k, v in params.items():
# Values can be arrays of size 1, need to extract the Python scalar
# (this can happen e.g. when indexing into a PennyLane numpy array)
if isinstance(v, np.ndarray):
v = v.item()
if isinstance(v, qml.variable.Variable):
variable_refs[k] = v
return variable_refs # map qiskit parameters to PennyLane differentiable Variables.
def _check_circuit_and_bind_parameters(
quantum_circuit: QuantumCircuit, params: dict, diff_params: dict
) -> QuantumCircuit:
"""Utility function for checking for a valid quantum circuit and then binding parameters.
Args:
quantum_circuit (QuantumCircuit): the quantum circuit to check and bind the parameters for
params (dict): dictionary of the parameters in the circuit to their corresponding values
diff_params (dict): dictionary mapping the differentiable parameters to PennyLane
Variable instances
Returns:
QuantumCircuit: quantum circuit with bound parameters
"""
if not isinstance(quantum_circuit, QuantumCircuit):
raise ValueError(
"The circuit {} is not a valid Qiskit QuantumCircuit.".format(quantum_circuit)
)
if params is None:
return quantum_circuit
for k in diff_params:
# Since we cannot bind Variables to Qiskit circuits,
# we must remove them from the binding dictionary before binding.
del params[k]
return quantum_circuit.bind_parameters(params)
def map_wires(wires: list, qc_wires: list) -> dict:
"""Utility function mapping the wires specified in a quantum circuit with the wires
specified by the user for the template.
Args:
wires (list): wires specified for the template
qc_wires (list): wires from the converted quantum circuit
Returns:
dict[int, int]: map from quantum circuit wires to the user defined wires
"""
if wires is None:
return dict(zip(qc_wires, range(len(qc_wires))))
if len(qc_wires) == len(wires):
return dict(zip(qc_wires, wires))
raise qml.QuantumFunctionError(
"The specified number of wires - {} - does not match "
"the number of wires the loaded quantum circuit acts on.".format(len(wires))
)
def execute_supported_operation(operation_name: str, parameters: list, wires: list):
"""Utility function that executes an operation that is natively supported by PennyLane.
Args:
operation_name (str): wires specified for the template
parameters (str): parameters of the operation that will be executed
wires (list): wires of the operation
"""
operation = getattr(pennylane_ops, operation_name)
if not parameters:
operation(wires=wires)
elif operation_name == "QubitStateVector":
operation(np.array(parameters), wires=wires)
else:
operation(*parameters, wires=wires)
def load(quantum_circuit: QuantumCircuit):
"""Loads a PennyLane template from a Qiskit QuantumCircuit.
Warnings are created for each of the QuantumCircuit instructions that were
not incorporated in the PennyLane template.
Args:
quantum_circuit (qiskit.QuantumCircuit): the QuantumCircuit to be converted
Returns:
function: the resulting PennyLane template
"""
def _function(params: dict = None, wires: list = None):
"""Returns a PennyLane template created based on the input QuantumCircuit.
Warnings are created for each of the QuantumCircuit instructions that were
not incorporated in the PennyLane template.
Args:
params (dict): specifies the parameters that need to be bound in the QuantumCircuit
wires (Sequence[int] or int): The wires the converted template acts on.
Note that if the original QuantumCircuit acted on :math:`N` qubits,
then this must be a list of length :math:`N`.
Returns:
function: the new PennyLane template
"""
var_ref_map = _extract_variable_refs(params)
qc = _check_circuit_and_bind_parameters(quantum_circuit, params, var_ref_map)
# Wires from a qiskit circuit are unique w.r.t. a register name and a qubit index
qc_wires = [(q.register.name, q.index) for q in quantum_circuit.qubits]
wire_map = map_wires(wires, qc_wires)
# Processing the dictionary of parameters passed
for op in qc.data:
instruction_name = op[0].__class__.__name__
operation_wires = [wire_map[(qubit.register.name, qubit.index)] for qubit in op[1]]
# New Qiskit gates that are not natively supported by PL (identical
# gates exist with a different name)
# TODO: remove the following when gates have been renamed in PennyLane
instruction_name = "U3Gate" if instruction_name == "UGate" else instruction_name
if instruction_name in inv_map and inv_map[instruction_name] in pennylane_ops.ops:
# Extract the bound parameters from the operation. If the bound parameters are a
# Qiskit ParameterExpression, then replace it with the corresponding PennyLane
# variable from the var_ref_map dictionary.
parameters = []
for p in op[0].params:
_check_parameter_bound(p, var_ref_map)
if isinstance(p, ParameterExpression):
if p.parameters:
# p.parameters must be a single parameter, as PennyLane
# does not support expressions of variables currently.
if len(p.parameters) > 1:
raise ValueError(
"Operation {} has invalid parameter {}. PennyLane does not support "
"expressions containing differentiable parameters as operation "
"arguments".format(instruction_name, p)
)
param = min(p.parameters)
parameters.append(var_ref_map.get(param))
else:
parameters.append(float(p))
else:
parameters.append(p)
execute_supported_operation(inv_map[instruction_name], parameters, operation_wires)
elif instruction_name == "SdgGate":
sgate = getattr(pennylane_ops, "S")
sgate(wires=operation_wires).inv()
elif instruction_name == "TdgGate":
tgate = getattr(pennylane_ops, "T")
tgate(wires=operation_wires).inv()
else:
try:
operation_matrix = op[0].to_matrix()
pennylane_ops.QubitUnitary(operation_matrix, wires=operation_wires)
except (AttributeError, QiskitError):
warnings.warn(
__name__ + ": The {} instruction is not supported by PennyLane,"
" and has not been added to the template.".format(instruction_name),
UserWarning,
)
return _function
def load_qasm(qasm_string: str):
"""Loads a PennyLane template from a QASM string.
Args:
qasm_string (str): the name of the QASM string
Returns:
function: the new PennyLane template
"""
return load(QuantumCircuit.from_qasm_str(qasm_string))
def load_qasm_from_file(file: str):
"""Loads a PennyLane template from a QASM file.
Args:
file (str): the name of the QASM file
Returns:
function: the new PennyLane template
"""
return load(QuantumCircuit.from_qasm_file(file))
```
|
{
"source": "jerry3links/tensorflow_input_image_by_tfrecord",
"score": 3
}
|
#### File: tensorflow_input_image_by_tfrecord/src/conv_cards_utils.py
```python
from tensorflow.contrib.learn.python.learn.datasets import mnist as mnist_module
# PATH: /home/jerry3chang/Workspace/tensorflow_py3/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import gfile
import math
import h5py
# import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
import os
from array import *
from random import shuffle
from PIL import Image
import numpy as np
# the generated mnist will still get one-hot of 10, needs to be transformed
def transform_to_3(images, labels):
trans = np.zeros((images.shape[0], 3))
c = 0
for e in labels:
d = 0
for f in e:
if d < 2:
trans[c, d] = labels[c, d]
elif labels[c, 0] == 0 and labels[c, 1] == 0:
trans[c, 2] = 1
else:
trans[c, 2] = 0
d += 1
#if c <= 15:
#print (e)
#print (trans[c,:])
c += 1
return trans
# copy from mnist
def read_data_sets(train_dir,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
validation_size=5000,
seed=None,
source_url=None): # omit url since we are using our own dataset
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 'test-images-idx3-ubyte.gz'
TEST_LABELS = 'test-labels-idx1-ubyte.gz'
local_file = base.maybe_download(TRAIN_IMAGES, train_dir, None) # omit url, local file will be a path
# type: DataSets
with gfile.Open(local_file, 'rb') as f:
train_images = mnist_module.extract_images(f)
print (train_images.shape)
local_file = base.maybe_download(TRAIN_LABELS, train_dir, None) # omit url
with gfile.Open(local_file, 'rb') as f:
train_labels = mnist_module.extract_labels(f, one_hot=one_hot)
local_file = base.maybe_download(TEST_IMAGES, train_dir, None) # omit url
with gfile.Open(local_file, 'rb') as f:
test_images = mnist_module.extract_images(f)
local_file = base.maybe_download(TEST_LABELS, train_dir, None) # omit url
with gfile.Open(local_file, 'rb') as f:
test_labels = mnist_module.extract_labels(f, one_hot=one_hot)
if not 0 <= validation_size <= len(train_images):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'
.format(len(train_images), validation_size))
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
options = dict(dtype=dtype, reshape=reshape, seed=seed)
train = mnist_module.DataSet(train_images, train_labels, **options)
validation = mnist_module.DataSet(validation_images, validation_labels, **options)
test = mnist_module.DataSet(test_images, test_labels, **options)
return base.Datasets(train=train, validation=validation, test=test)
def load_dataset():
train_dataset = h5py.File('./datasets/train_signs.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
# (1080,64,64,3)
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('./datasets/test_signs.h5', "r")
print ("test_dataset.type " + str(type(test_dataset)))
print ("test_dataset[\"test_set_x\"][:].type " + str(type(test_dataset["test_set_x"][:])))
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
print ("test_set.type " + str(type(test_set_x_orig)))
print ("test_set.ndim " + str(test_set_x_orig.ndim))
print ("test_set.shape " + str(test_set_x_orig.shape))
print ("test_set[0].type " + str(type(test_set_x_orig[0])))
print ("test_set[0].ndim " + str(test_set_x_orig[0].ndim))
print ("test_set[0].shape " + str(test_set_x_orig[0].shape))
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def load_dataset_deck():
# Load from and save to
Names = [['./jpg_to_mnist/training_images','train'], ['./jpg_to_mnist/testing_images','test']]
train_images = np.zeros(1620528) # 689 * 28 * 28 * 3
train_labels = np.zeros(689)
test_images = np.zeros(181104)
test_labels = np.zeros(77)
for name in Names:
data_image = array('B')
data_label = array('B')
FileList = []
# NOTE: [1:] Excludes .DS_Store from Mac OS
for dirname in os.listdir(name[0]):
path = os.path.join(name[0],dirname)
for filename in os.listdir(path):
if filename.endswith(".png"):
FileList.append(os.path.join(name[0],dirname,filename))
print (str(name) + " " + str(len(FileList)))
shuffle(FileList) # Usefull for further segmenting the validation set
cnt = 0
for filename in FileList:
cnt += 1
# NOTE: The class labels have to be integer
label = int(filename.split('/')[3])
im = Image.open(filename) # .convert('L')
pixel = im.load()
width, height = im.size # 28*28
for channel in range(0,len(pixel[0,0])):
for x in range(0,width):
for y in range(0,height):
data_image.append(pixel[y,x][channel])
data_label.append(label) # labels start (one unsigned byte each)
if name[1] == 'train':
train_images = np.array(data_image).reshape(len(FileList), 28, 28, 3)
train_labels = np.array(data_label).reshape(len(FileList))
elif name[1] == 'test':
test_images = np.array(data_image).reshape(len(FileList), 28, 28, 3)
test_labels = np.array(data_label).reshape(len(FileList))
return train_images, train_labels, test_images, test_labels
```
|
{
"source": "jerry73204/cns-final-tor-store",
"score": 2
}
|
#### File: cns-final-tor-store/src/tor_helper.py
```python
import argparse
import sys
import os
import logzero
import tor_utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument('COMMAND', choices=['store', 'load'])
parser.add_argument('KEY_SIZE', type=int)
parser.add_argument('DATA_SIZE', type=int)
parser.add_argument('NONCE_SIZE', type=int)
args = parser.parse_args()
# Configure logger
if 'LOGLEVEL' in os.environ:
logzero.loglevel(os.environ['LOGLEVEL'])
# Parse arguments
data_length = args.DATA_SIZE // 8
assert args.DATA_SIZE / 8 == data_length
if args.COMMAND == 'load':
addr = input()
block = tor_utils.load_block(
addr,
key_size=args.KEY_SIZE,
data_size=args.DATA_SIZE,
nonce_size=args.NONCE_SIZE,
)
if block is not None:
sys.stdout.buffer.write(block)
else:
exit(1)
elif args.COMMAND == 'store':
block = sys.stdin.buffer.read(data_length)
addr = tor_utils.store_block(
block,
key_size=args.KEY_SIZE,
data_size=args.DATA_SIZE,
nonce_size=args.NONCE_SIZE,
)
if addr is not None:
print(addr)
else:
exit(1)
else:
raise ValueError('Command %s is not understood' % args.COMMAND)
if __name__ == '__main__':
main()
```
|
{
"source": "jerry73204/gqnrs",
"score": 2
}
|
#### File: jerry73204/gqnrs/plot.py
```python
import os
import argparse
import glob
import time
import torch
from visdom import Visdom
def main():
# Parse arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('LOG_DIR')
arg_parser.add_argument(
'--visdom-address',
default='localhost'
)
arg_parser.add_argument(
'--visdom-port',
type=int,
default=8097
)
args = arg_parser.parse_args()
# Connect to Visdom server
visdom_addr = args.visdom_address
visdom_port = args.visdom_port
viz = Visdom(port=visdom_port, server=visdom_addr)
# Load log data
paths = list(glob.glob(os.path.join(args.LOG_DIR, '*.zip')))
paths.sort()
elbo_loss_x = list()
elbo_loss_y = list()
target_mse_x = list()
target_mse_y = list()
for path in paths:
log_name = os.path.basename(path)[:-4]
step, ts = log_name.split('-')
step = int(step)
ts = int(ts)
log = torch.jit.load(path, map_location=torch.device('cpu'))
elbo_loss_x.append(step)
elbo_loss_y.append(log.elbo_loss)
target_mse_x.append(step)
target_mse_y.append(log.target_mse)
# Uncomment this to plot histogram
# viz.histogram(X=log.stds_target.view([-1]))
# viz.images(log.means_target, opts=dict(title=log_name))
# Plot ELBO loss and target MSE curve
elbo_loss_y = torch.stack(elbo_loss_y).detach().numpy()
viz.line(X=elbo_loss_x, Y=elbo_loss_y)
target_mse_y = torch.stack(target_mse_y).detach().numpy()
viz.line(X=target_mse_x, Y=target_mse_y)
if __name__ == '__main__':
main()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.